diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/.gitignore b/contrib/HSDF-Net/ChamferDistancePytorch/.gitignore new file mode 100644 index 00000000..ca5b6a7b --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/.gitignore @@ -0,0 +1,2 @@ +*__pycache__* +/tmp diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/LICENSE b/contrib/HSDF-Net/ChamferDistancePytorch/LICENSE new file mode 100644 index 00000000..794e2dfb --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 ThibaultGROUEIX + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/README.md b/contrib/HSDF-Net/ChamferDistancePytorch/README.md new file mode 100644 index 00000000..c3ad6619 --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/README.md @@ -0,0 +1,103 @@ +`pip install torch ninja` + +# Pytorch Chamfer Distance. + +Include a **CUDA** version, and a **PYTHON** version with pytorch standard operations. +NB : In this depo, dist1 and dist2 are squared pointcloud euclidean distances, so you should adapt thresholds accordingly. + +- [x] F - Score + + + +### CUDA VERSION + +- [x] JIT compilation +- [x] Supports multi-gpu +- [x] 2D point clouds. +- [x] 3D point clouds. +- [x] 5D point clouds. +- [x] Contiguous() safe. + + + +### Python Version + +- [x] Supports any dimension + + + +### Usage + +```python +import torch, chamfer3D.dist_chamfer_3D, fscore +chamLoss = chamfer3D.dist_chamfer_3D.chamfer_3DDist() +points1 = torch.rand(32, 1000, 3).cuda() +points2 = torch.rand(32, 2000, 3, requires_grad=True).cuda() +dist1, dist2, idx1, idx2 = chamLoss(points1, points2) +f_score, precision, recall = fscore.fscore(dist1, dist2) +``` + + + +### Add it to your project as a submodule + +```shell +git submodule add https://github.com/ThibaultGROUEIX/ChamferDistancePytorch +``` + + + +### Benchmark: [forward + backward] pass +- [x] CUDA 10.1, NVIDIA 435, Pytorch 1.4 +- [x] p1 : 32 x 2000 x dim +- [x] p2 : 32 x 1000 x dim + +| *Timing (sec * 1000)* | 2D | 3D | 5D | +| ---------- | -------- | ------- | ------- | +| **Cuda Compiled** | **1.2** | 1.4 |1.8 | +| **Cuda JIT** | 1.3 | **1.4** |**1.5** | +| **Python** | 37 | 37 | 37 | + + +| *Memory (MB)* | 2D | 3D | 5D | +| ---------- | -------- | ------- | ------- | +| **Cuda Compiled** | 529 | 529 | 549 | +| **Cuda JIT** | **520** | **529** |**549** | +| **Python** | 2495 | 2495 | 2495 | + + + +### What is the chamfer distance ? + +[Stanford course](http://graphics.stanford.edu/courses/cs468-17-spring/LectureSlides/L14%20-%203d%20deep%20learning%20on%20point%20cloud%20representation%20(analysis).pdf) on 3D deep Learning + + + +### Aknowledgment + +Original backbone from [Fei Xia](https://github.com/fxia22/pointGAN/blob/master/nndistance/src/nnd_cuda.cu). + +JIT cool trick from [Christian Diller](https://github.com/chrdiller) + +### Troubleshoot + +- `Undefined symbol: Zxxxxxxxxxxxxxxxxx `: + +--> Fix: Make sure to `import torch` before you `import chamfer`. +--> Use pytorch.version >= 1.1.0 + +- [RuntimeError: Ninja is required to load C++ extension](https://github.com/zhanghang1989/PyTorch-Encoding/issues/167) + +```shell +wget https://github.com/ninja-build/ninja/releases/download/v1.8.2/ninja-linux.zip +sudo unzip ninja-linux.zip -d /usr/local/bin/ +sudo update-alternatives --install /usr/bin/ninja ninja /usr/local/bin/ninja 1 --force +``` + + + + + +#### TODO: + +* Discuss behaviour of torch.min() and tensor.min() which causes issues in some pytorch versions diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/chamfer2D/chamfer2D.cu b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer2D/chamfer2D.cu new file mode 100644 index 00000000..567dd1a0 --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer2D/chamfer2D.cu @@ -0,0 +1,182 @@ + +#include +#include + +#include +#include + +#include + + + +__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){ + const int batch=512; + __shared__ float buf[batch*2]; + for (int i=blockIdx.x;ibest){ + result[(i*n+j)]=best; + result_i[(i*n+j)]=best_i; + } + } + __syncthreads(); + } + } +} +// int chamfer_cuda_forward(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i, cudaStream_t stream){ +int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2){ + + const auto batch_size = xyz1.size(0); + const auto n = xyz1.size(1); //num_points point cloud A + const auto m = xyz2.size(1); //num_points point cloud B + + NmDistanceKernel<<>>(batch_size, n, xyz1.data(), m, xyz2.data(), dist1.data(), idx1.data()); + NmDistanceKernel<<>>(batch_size, m, xyz2.data(), n, xyz1.data(), dist2.data(), idx2.data()); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in nnd updateOutput: %s\n", cudaGetErrorString(err)); + //THError("aborting"); + return 0; + } + return 1; + + +} +__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){ + for (int i=blockIdx.x;i>>(batch_size,n,xyz1.data(),m,xyz2.data(),graddist1.data(),idx1.data(),gradxyz1.data(),gradxyz2.data()); + NmDistanceGradKernel<<>>(batch_size,m,xyz2.data(),n,xyz1.data(),graddist2.data(),idx2.data(),gradxyz2.data(),gradxyz1.data()); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in nnd get grad: %s\n", cudaGetErrorString(err)); + //THError("aborting"); + return 0; + } + return 1; + +} + diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/chamfer2D/chamfer_cuda.cpp b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer2D/chamfer_cuda.cpp new file mode 100644 index 00000000..67574e21 --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer2D/chamfer_cuda.cpp @@ -0,0 +1,33 @@ +#include +#include + +///TMP +//#include "common.h" +/// NOT TMP + + +int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2); + + +int chamfer_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2); + + + + +int chamfer_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2) { + return chamfer_cuda_forward(xyz1, xyz2, dist1, dist2, idx1, idx2); +} + + +int chamfer_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, + at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2) { + + return chamfer_cuda_backward(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2); +} + + + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &chamfer_forward, "chamfer forward (CUDA)"); + m.def("backward", &chamfer_backward, "chamfer backward (CUDA)"); +} \ No newline at end of file diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/chamfer2D/dist_chamfer_2D.py b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer2D/dist_chamfer_2D.py new file mode 100644 index 00000000..b013642c --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer2D/dist_chamfer_2D.py @@ -0,0 +1,80 @@ +from torch import nn +from torch.autograd import Function +import torch +import importlib +import os +chamfer_found = importlib.find_loader("chamfer_2D") is not None +if not chamfer_found: + ## Cool trick from https://github.com/chrdiller + print("Jitting Chamfer 2D") + cur_path = os.path.dirname(os.path.abspath(__file__)) + build_path = cur_path.replace('chamfer2D', 'tmp') + os.makedirs(build_path, exist_ok=True) + + from torch.utils.cpp_extension import load + chamfer_2D = load(name="chamfer_2D", + sources=[ + "/".join(os.path.abspath(__file__).split('/')[:-1] + ["chamfer_cuda.cpp"]), + "/".join(os.path.abspath(__file__).split('/')[:-1] + ["chamfer2D.cu"]), + ], build_directory=build_path) + print("Loaded JIT 2D CUDA chamfer distance") + +else: + import chamfer_2D + print("Loaded compiled 2D CUDA chamfer distance") + +# Chamfer's distance module @thibaultgroueix +# GPU tensors only +class chamfer_2DFunction(Function): + @staticmethod + def forward(ctx, xyz1, xyz2): + batchsize, n, dim = xyz1.size() + assert dim==2, "Wrong last dimension for the chamfer distance 's input! Check with .size()" + _, m, dim = xyz2.size() + assert dim==2, "Wrong last dimension for the chamfer distance 's input! Check with .size()" + device = xyz1.device + + device = xyz1.device + + dist1 = torch.zeros(batchsize, n) + dist2 = torch.zeros(batchsize, m) + + idx1 = torch.zeros(batchsize, n).type(torch.IntTensor) + idx2 = torch.zeros(batchsize, m).type(torch.IntTensor) + + dist1 = dist1.to(device) + dist2 = dist2.to(device) + idx1 = idx1.to(device) + idx2 = idx2.to(device) + torch.cuda.set_device(device) + + chamfer_2D.forward(xyz1, xyz2, dist1, dist2, idx1, idx2) + ctx.save_for_backward(xyz1, xyz2, idx1, idx2) + return dist1, dist2, idx1, idx2 + + @staticmethod + def backward(ctx, graddist1, graddist2, gradidx1, gradidx2): + xyz1, xyz2, idx1, idx2 = ctx.saved_tensors + graddist1 = graddist1.contiguous() + graddist2 = graddist2.contiguous() + device = graddist1.device + + gradxyz1 = torch.zeros(xyz1.size()) + gradxyz2 = torch.zeros(xyz2.size()) + + gradxyz1 = gradxyz1.to(device) + gradxyz2 = gradxyz2.to(device) + chamfer_2D.backward( + xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2 + ) + return gradxyz1, gradxyz2 + + +class chamfer_2DDist(nn.Module): + def __init__(self): + super(chamfer_2DDist, self).__init__() + + def forward(self, input1, input2): + input1 = input1.contiguous() + input2 = input2.contiguous() + return chamfer_2DFunction.apply(input1, input2) diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/chamfer2D/setup.py b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer2D/setup.py new file mode 100644 index 00000000..11d01237 --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer2D/setup.py @@ -0,0 +1,14 @@ +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +setup( + name='chamfer_2D', + ext_modules=[ + CUDAExtension('chamfer_2D', [ + "/".join(__file__.split('/')[:-1] + ['chamfer_cuda.cpp']), + "/".join(__file__.split('/')[:-1] + ['chamfer2D.cu']), + ]), + ], + cmdclass={ + 'build_ext': BuildExtension + }) \ No newline at end of file diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/chamfer3D/chamfer3D.cu b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer3D/chamfer3D.cu new file mode 100644 index 00000000..d5b886df --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer3D/chamfer3D.cu @@ -0,0 +1,196 @@ + +#include +#include + +#include +#include + +#include + + + +__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){ + const int batch=512; + __shared__ float buf[batch*3]; + for (int i=blockIdx.x;ibest){ + result[(i*n+j)]=best; + result_i[(i*n+j)]=best_i; + } + } + __syncthreads(); + } + } +} +// int chamfer_cuda_forward(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i, cudaStream_t stream){ +int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2){ + + const auto batch_size = xyz1.size(0); + const auto n = xyz1.size(1); //num_points point cloud A + const auto m = xyz2.size(1); //num_points point cloud B + + NmDistanceKernel<<>>(batch_size, n, xyz1.data(), m, xyz2.data(), dist1.data(), idx1.data()); + NmDistanceKernel<<>>(batch_size, m, xyz2.data(), n, xyz1.data(), dist2.data(), idx2.data()); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in nnd updateOutput: %s\n", cudaGetErrorString(err)); + //THError("aborting"); + return 0; + } + return 1; + + +} +__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){ + for (int i=blockIdx.x;i>>(batch_size,n,xyz1.data(),m,xyz2.data(),graddist1.data(),idx1.data(),gradxyz1.data(),gradxyz2.data()); + NmDistanceGradKernel<<>>(batch_size,m,xyz2.data(),n,xyz1.data(),graddist2.data(),idx2.data(),gradxyz2.data(),gradxyz1.data()); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in nnd get grad: %s\n", cudaGetErrorString(err)); + //THError("aborting"); + return 0; + } + return 1; + +} + diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/chamfer3D/chamfer_cuda.cpp b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer3D/chamfer_cuda.cpp new file mode 100644 index 00000000..67574e21 --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer3D/chamfer_cuda.cpp @@ -0,0 +1,33 @@ +#include +#include + +///TMP +//#include "common.h" +/// NOT TMP + + +int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2); + + +int chamfer_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2); + + + + +int chamfer_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2) { + return chamfer_cuda_forward(xyz1, xyz2, dist1, dist2, idx1, idx2); +} + + +int chamfer_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, + at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2) { + + return chamfer_cuda_backward(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2); +} + + + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &chamfer_forward, "chamfer forward (CUDA)"); + m.def("backward", &chamfer_backward, "chamfer backward (CUDA)"); +} \ No newline at end of file diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/chamfer3D/dist_chamfer_3D.py b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer3D/dist_chamfer_3D.py new file mode 100644 index 00000000..de26d2c2 --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer3D/dist_chamfer_3D.py @@ -0,0 +1,81 @@ +from torch import nn +from torch.autograd import Function +import torch +import importlib +import os +chamfer_found = importlib.find_loader("chamfer_3D") is not None +if not chamfer_found: + ## Cool trick from https://github.com/chrdiller + print("Jitting Chamfer 3D") + cur_path = os.path.dirname(os.path.abspath(__file__)) + build_path = cur_path.replace('chamfer3D', 'tmp') + os.makedirs(build_path, exist_ok=True) + + from torch.utils.cpp_extension import load + chamfer_3D = load(name="chamfer_3D", + sources=[ + "/".join(os.path.abspath(__file__).split('/')[:-1] + ["chamfer_cuda.cpp"]), + "/".join(os.path.abspath(__file__).split('/')[:-1] + ["chamfer3D.cu"]), + ], build_directory=build_path) + print("Loaded JIT 3D CUDA chamfer distance") + +else: + import chamfer_3D + print("Loaded compiled 3D CUDA chamfer distance") + + +# Chamfer's distance module @thibaultgroueix +# GPU tensors only +class chamfer_3DFunction(Function): + @staticmethod + def forward(ctx, xyz1, xyz2): + batchsize, n, dim = xyz1.size() + assert dim==3, "Wrong last dimension for the chamfer distance 's input! Check with .size()" + _, m, dim = xyz2.size() + assert dim==3, "Wrong last dimension for the chamfer distance 's input! Check with .size()" + device = xyz1.device + + device = xyz1.device + + dist1 = torch.zeros(batchsize, n) + dist2 = torch.zeros(batchsize, m) + + idx1 = torch.zeros(batchsize, n).type(torch.IntTensor) + idx2 = torch.zeros(batchsize, m).type(torch.IntTensor) + + dist1 = dist1.to(device) + dist2 = dist2.to(device) + idx1 = idx1.to(device) + idx2 = idx2.to(device) + torch.cuda.set_device(device) + + chamfer_3D.forward(xyz1, xyz2, dist1, dist2, idx1, idx2) + ctx.save_for_backward(xyz1, xyz2, idx1, idx2) + return dist1, dist2, idx1, idx2 + + @staticmethod + def backward(ctx, graddist1, graddist2, gradidx1, gradidx2): + xyz1, xyz2, idx1, idx2 = ctx.saved_tensors + graddist1 = graddist1.contiguous() + graddist2 = graddist2.contiguous() + device = graddist1.device + + gradxyz1 = torch.zeros(xyz1.size()) + gradxyz2 = torch.zeros(xyz2.size()) + + gradxyz1 = gradxyz1.to(device) + gradxyz2 = gradxyz2.to(device) + chamfer_3D.backward( + xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2 + ) + return gradxyz1, gradxyz2 + + +class chamfer_3DDist(nn.Module): + def __init__(self): + super(chamfer_3DDist, self).__init__() + + def forward(self, input1, input2): + input1 = input1.contiguous() + input2 = input2.contiguous() + return chamfer_3DFunction.apply(input1, input2) diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/chamfer3D/setup.py b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer3D/setup.py new file mode 100644 index 00000000..9a23aada --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer3D/setup.py @@ -0,0 +1,14 @@ +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +setup( + name='chamfer_3D', + ext_modules=[ + CUDAExtension('chamfer_3D', [ + "/".join(__file__.split('/')[:-1] + ['chamfer_cuda.cpp']), + "/".join(__file__.split('/')[:-1] + ['chamfer3D.cu']), + ]), + ], + cmdclass={ + 'build_ext': BuildExtension + }) \ No newline at end of file diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/chamfer5D/chamfer5D.cu b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer5D/chamfer5D.cu new file mode 100644 index 00000000..650e8890 --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer5D/chamfer5D.cu @@ -0,0 +1,223 @@ + +#include +#include + +#include +#include + +#include + + + +__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){ + const int batch=2048; + __shared__ float buf[batch*5]; + for (int i=blockIdx.x;ibest){ + result[(i*n+j)]=best; + result_i[(i*n+j)]=best_i; + } + } + __syncthreads(); + } + } +} +// int chamfer_cuda_forward(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i, cudaStream_t stream){ +int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2){ + + const auto batch_size = xyz1.size(0); + const auto n = xyz1.size(1); //num_points point cloud A + const auto m = xyz2.size(1); //num_points point cloud B + + NmDistanceKernel<<>>(batch_size, n, xyz1.data(), m, xyz2.data(), dist1.data(), idx1.data()); + NmDistanceKernel<<>>(batch_size, m, xyz2.data(), n, xyz1.data(), dist2.data(), idx2.data()); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in nnd updateOutput: %s\n", cudaGetErrorString(err)); + //THError("aborting"); + return 0; + } + return 1; + + +} +__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){ + for (int i=blockIdx.x;i>>(batch_size,n,xyz1.data(),m,xyz2.data(),graddist1.data(),idx1.data(),gradxyz1.data(),gradxyz2.data()); + NmDistanceGradKernel<<>>(batch_size,m,xyz2.data(),n,xyz1.data(),graddist2.data(),idx2.data(),gradxyz2.data(),gradxyz1.data()); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in nnd get grad: %s\n", cudaGetErrorString(err)); + //THError("aborting"); + return 0; + } + return 1; + +} diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/chamfer5D/chamfer_cuda.cpp b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer5D/chamfer_cuda.cpp new file mode 100644 index 00000000..67574e21 --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer5D/chamfer_cuda.cpp @@ -0,0 +1,33 @@ +#include +#include + +///TMP +//#include "common.h" +/// NOT TMP + + +int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2); + + +int chamfer_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2); + + + + +int chamfer_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2) { + return chamfer_cuda_forward(xyz1, xyz2, dist1, dist2, idx1, idx2); +} + + +int chamfer_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, + at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2) { + + return chamfer_cuda_backward(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2); +} + + + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &chamfer_forward, "chamfer forward (CUDA)"); + m.def("backward", &chamfer_backward, "chamfer backward (CUDA)"); +} \ No newline at end of file diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/chamfer5D/dist_chamfer_5D.py b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer5D/dist_chamfer_5D.py new file mode 100644 index 00000000..9cf749d7 --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer5D/dist_chamfer_5D.py @@ -0,0 +1,82 @@ +from torch import nn +from torch.autograd import Function +import torch +import importlib +import os + +chamfer_found = importlib.find_loader("chamfer_5D") is not None +if not chamfer_found: + ## Cool trick from https://github.com/chrdiller + print("Jitting Chamfer 5D") + cur_path = os.path.dirname(os.path.abspath(__file__)) + build_path = cur_path.replace('chamfer5D', 'tmp') + os.makedirs(build_path, exist_ok=True) + + from torch.utils.cpp_extension import load + chamfer_5D = load(name="chamfer_5D", + sources=[ + "/".join(os.path.abspath(__file__).split('/')[:-1] + ["chamfer_cuda.cpp"]), + "/".join(os.path.abspath(__file__).split('/')[:-1] + ["chamfer5D.cu"]), + ], build_directory=build_path) + print("Loaded JIT 5D CUDA chamfer distance") + +else: + import chamfer_5D + print("Loaded compiled 5D CUDA chamfer distance") + + +# Chamfer's distance module @thibaultgroueix +# GPU tensors only +class chamfer_5DFunction(Function): + @staticmethod + def forward(ctx, xyz1, xyz2): + batchsize, n, dim = xyz1.size() + assert dim==5, "Wrong last dimension for the chamfer distance 's input! Check with .size()" + _, m, dim = xyz2.size() + assert dim==5, "Wrong last dimension for the chamfer distance 's input! Check with .size()" + device = xyz1.device + + device = xyz1.device + + dist1 = torch.zeros(batchsize, n) + dist2 = torch.zeros(batchsize, m) + + idx1 = torch.zeros(batchsize, n).type(torch.IntTensor) + idx2 = torch.zeros(batchsize, m).type(torch.IntTensor) + + dist1 = dist1.to(device) + dist2 = dist2.to(device) + idx1 = idx1.to(device) + idx2 = idx2.to(device) + torch.cuda.set_device(device) + + chamfer_5D.forward(xyz1, xyz2, dist1, dist2, idx1, idx2) + ctx.save_for_backward(xyz1, xyz2, idx1, idx2) + return dist1, dist2, idx1, idx2 + + @staticmethod + def backward(ctx, graddist1, graddist2, gradidx1, gradidx2): + xyz1, xyz2, idx1, idx2 = ctx.saved_tensors + graddist1 = graddist1.contiguous() + graddist2 = graddist2.contiguous() + device = graddist1.device + + gradxyz1 = torch.zeros(xyz1.size()) + gradxyz2 = torch.zeros(xyz2.size()) + + gradxyz1 = gradxyz1.to(device) + gradxyz2 = gradxyz2.to(device) + chamfer_5D.backward( + xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2 + ) + return gradxyz1, gradxyz2 + + +class chamfer_5DDist(nn.Module): + def __init__(self): + super(chamfer_5DDist, self).__init__() + + def forward(self, input1, input2): + input1 = input1.contiguous() + input2 = input2.contiguous() + return chamfer_5DFunction.apply(input1, input2) diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/chamfer5D/setup.py b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer5D/setup.py new file mode 100644 index 00000000..24292354 --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer5D/setup.py @@ -0,0 +1,14 @@ +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +setup( + name='chamfer_5D', + ext_modules=[ + CUDAExtension('chamfer_5D', [ + "/".join(__file__.split('/')[:-1] + ['chamfer_cuda.cpp']), + "/".join(__file__.split('/')[:-1] + ['chamfer5D.cu']), + ]), + ], + cmdclass={ + 'build_ext': BuildExtension + }) \ No newline at end of file diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/chamfer6D/chamfer6D.cu b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer6D/chamfer6D.cu new file mode 100644 index 00000000..bb515303 --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer6D/chamfer6D.cu @@ -0,0 +1,237 @@ + +#include +#include + +#include +#include + +#include + + + +__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){ + const int batch=2048; + __shared__ float buf[batch*6]; + for (int i=blockIdx.x;ibest){ + result[(i*n+j)]=best; + result_i[(i*n+j)]=best_i; + } + } + __syncthreads(); + } + } +} +// int chamfer_cuda_forward(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i, cudaStream_t stream){ +int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2){ + + const auto batch_size = xyz1.size(0); + const auto n = xyz1.size(1); //num_points point cloud A + const auto m = xyz2.size(1); //num_points point cloud B + + NmDistanceKernel<<>>(batch_size, n, xyz1.data(), m, xyz2.data(), dist1.data(), idx1.data()); + NmDistanceKernel<<>>(batch_size, m, xyz2.data(), n, xyz1.data(), dist2.data(), idx2.data()); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in nnd updateOutput: %s\n", cudaGetErrorString(err)); + //THError("aborting"); + return 0; + } + return 1; + + +} +__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){ + for (int i=blockIdx.x;i>>(batch_size,n,xyz1.data(),m,xyz2.data(),graddist1.data(),idx1.data(),gradxyz1.data(),gradxyz2.data()); + NmDistanceGradKernel<<>>(batch_size,m,xyz2.data(),n,xyz1.data(),graddist2.data(),idx2.data(),gradxyz2.data(),gradxyz1.data()); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in nnd get grad: %s\n", cudaGetErrorString(err)); + //THError("aborting"); + return 0; + } + return 1; + +} diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/chamfer6D/chamfer_cuda.cpp b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer6D/chamfer_cuda.cpp new file mode 100644 index 00000000..67574e21 --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer6D/chamfer_cuda.cpp @@ -0,0 +1,33 @@ +#include +#include + +///TMP +//#include "common.h" +/// NOT TMP + + +int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2); + + +int chamfer_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2); + + + + +int chamfer_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2) { + return chamfer_cuda_forward(xyz1, xyz2, dist1, dist2, idx1, idx2); +} + + +int chamfer_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, + at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2) { + + return chamfer_cuda_backward(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2); +} + + + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &chamfer_forward, "chamfer forward (CUDA)"); + m.def("backward", &chamfer_backward, "chamfer backward (CUDA)"); +} \ No newline at end of file diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/chamfer6D/dist_chamfer_6D.py b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer6D/dist_chamfer_6D.py new file mode 100644 index 00000000..0f073f83 --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer6D/dist_chamfer_6D.py @@ -0,0 +1,82 @@ +from torch import nn +from torch.autograd import Function +import torch +import importlib +import os + +chamfer_found = importlib.find_loader("chamfer_6D") is not None +if not chamfer_found: + ## Cool trick from https://github.com/chrdiller + print("Jitting Chamfer 6D") + cur_path = os.path.dirname(os.path.abspath(__file__)) + build_path = cur_path.replace('chamfer6D', 'tmp') + os.makedirs(build_path, exist_ok=True) + + from torch.utils.cpp_extension import load + chamfer_6D = load(name="chamfer_6D", + sources=[ + "/".join(os.path.abspath(__file__).split('/')[:-1] + ["chamfer_cuda.cpp"]), + "/".join(os.path.abspath(__file__).split('/')[:-1] + ["chamfer6D.cu"]), + ], build_directory=build_path) + print("Loaded JIT 6D CUDA chamfer distance") + +else: + import chamfer_6D + print("Loaded compiled 6D CUDA chamfer distance") + + +# Chamfer's distance module @thibaultgroueix +# GPU tensors only +class chamfer_6DFunction(Function): + @staticmethod + def forward(ctx, xyz1, xyz2): + batchsize, n, dim = xyz1.size() + assert dim==6, "Wrong last dimension for the chamfer distance 's input! Check with .size()" + _, m, dim = xyz2.size() + assert dim==6, "Wrong last dimension for the chamfer distance 's input! Check with .size()" + device = xyz1.device + + device = xyz1.device + + dist1 = torch.zeros(batchsize, n) + dist2 = torch.zeros(batchsize, m) + + idx1 = torch.zeros(batchsize, n).type(torch.IntTensor) + idx2 = torch.zeros(batchsize, m).type(torch.IntTensor) + + dist1 = dist1.to(device) + dist2 = dist2.to(device) + idx1 = idx1.to(device) + idx2 = idx2.to(device) + torch.cuda.set_device(device) + + chamfer_6D.forward(xyz1, xyz2, dist1, dist2, idx1, idx2) + ctx.save_for_backward(xyz1, xyz2, idx1, idx2) + return dist1, dist2, idx1, idx2 + + @staticmethod + def backward(ctx, graddist1, graddist2, gradidx1, gradidx2): + xyz1, xyz2, idx1, idx2 = ctx.saved_tensors + graddist1 = graddist1.contiguous() + graddist2 = graddist2.contiguous() + device = graddist1.device + + gradxyz1 = torch.zeros(xyz1.size()) + gradxyz2 = torch.zeros(xyz2.size()) + + gradxyz1 = gradxyz1.to(device) + gradxyz2 = gradxyz2.to(device) + chamfer_6D.backward( + xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2 + ) + return gradxyz1, gradxyz2 + + +class chamfer_6DDist(nn.Module): + def __init__(self): + super(chamfer_6DDist, self).__init__() + + def forward(self, input1, input2): + input1 = input1.contiguous() + input2 = input2.contiguous() + return chamfer_6DFunction.apply(input1, input2) diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/chamfer6D/setup.py b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer6D/setup.py new file mode 100644 index 00000000..4b9044c3 --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer6D/setup.py @@ -0,0 +1,14 @@ +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +setup( + name='chamfer_6D', + ext_modules=[ + CUDAExtension('chamfer_6D', [ + "/".join(__file__.split('/')[:-1] + ['chamfer_cuda.cpp']), + "/".join(__file__.split('/')[:-1] + ['chamfer6D.cu']), + ]), + ], + cmdclass={ + 'build_ext': BuildExtension + }) \ No newline at end of file diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/chamfer_python.py b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer_python.py new file mode 100644 index 00000000..9c3bc1d9 --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/chamfer_python.py @@ -0,0 +1,44 @@ +import torch + + +def pairwise_dist(x, y): + xx, yy, zz = torch.mm(x, x.t()), torch.mm(y, y.t()), torch.mm(x, y.t()) + rx = xx.diag().unsqueeze(0).expand_as(xx) + ry = yy.diag().unsqueeze(0).expand_as(yy) + P = rx.t() + ry - 2 * zz + return P + + +def NN_loss(x, y, dim=0): + dist = pairwise_dist(x, y) + values, indices = dist.min(dim=dim) + return values.mean() + + +def batched_pairwise_dist(a, b): + x, y = a.double(), b.double() + bs, num_points_x, points_dim = x.size() + bs, num_points_y, points_dim = y.size() + + xx = torch.pow(x, 2).sum(2) + yy = torch.pow(y, 2).sum(2) + zz = torch.bmm(x, y.transpose(2, 1)) + rx = xx.unsqueeze(1).expand(bs, num_points_y, num_points_x) # Diagonal elements xx + ry = yy.unsqueeze(1).expand(bs, num_points_x, num_points_y) # Diagonal elements yy + P = rx.transpose(2, 1) + ry - 2 * zz + return P + +def distChamfer(a, b): + """ + :param a: Pointclouds Batch x nul_points x dim + :param b: Pointclouds Batch x nul_points x dim + :return: + -closest point on b of points from a + -closest point on a of points from b + -idx of closest point on b of points from a + -idx of closest point on a of points from b + Works for pointcloud of any dimension + """ + P = batched_pairwise_dist(a, b) + return torch.min(P, 2)[0].float(), torch.min(P, 1)[0].float(), torch.min(P, 2)[1].int(), torch.min(P, 1)[1].int() + diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/fscore.py b/contrib/HSDF-Net/ChamferDistancePytorch/fscore.py new file mode 100644 index 00000000..265378b1 --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/fscore.py @@ -0,0 +1,17 @@ +import torch + +def fscore(dist1, dist2, threshold=0.001): + """ + Calculates the F-score between two point clouds with the corresponding threshold value. + :param dist1: Batch, N-Points + :param dist2: Batch, N-Points + :param th: float + :return: fscore, precision, recall + """ + # NB : In this depo, dist1 and dist2 are squared pointcloud euclidean distances, so you should adapt the threshold accordingly. + precision_1 = torch.mean((dist1 < threshold).float(), dim=1) + precision_2 = torch.mean((dist2 < threshold).float(), dim=1) + fscore = 2 * precision_1 * precision_2 / (precision_1 + precision_2) + fscore[torch.isnan(fscore)] = 0 + return fscore, precision_1, precision_2 + diff --git a/contrib/HSDF-Net/ChamferDistancePytorch/unit_test.py b/contrib/HSDF-Net/ChamferDistancePytorch/unit_test.py new file mode 100644 index 00000000..13af6a3a --- /dev/null +++ b/contrib/HSDF-Net/ChamferDistancePytorch/unit_test.py @@ -0,0 +1,69 @@ +import torch, time +import chamfer2D.dist_chamfer_2D +import chamfer3D.dist_chamfer_3D +import chamfer5D.dist_chamfer_5D +import chamfer_python + +cham2D = chamfer2D.dist_chamfer_2D.chamfer_2DDist() +cham3D = chamfer3D.dist_chamfer_3D.chamfer_3DDist() +cham5D = chamfer5D.dist_chamfer_5D.chamfer_5DDist() + +from torch.autograd import Variable +from fscore import fscore + +def test_chamfer(distChamfer, dim): + points1 = torch.rand(4, 100, dim).cuda() + points2 = torch.rand(4, 200, dim, requires_grad=True).cuda() + dist1, dist2, idx1, idx2= distChamfer(points1, points2) + + loss = torch.sum(dist1) + loss.backward() + + mydist1, mydist2, myidx1, myidx2 = chamfer_python.distChamfer(points1, points2) + d1 = (dist1 - mydist1) ** 2 + d2 = (dist2 - mydist2) ** 2 + assert ( + torch.mean(d1) + torch.mean(d2) < 0.00000001 + ), "chamfer cuda and chamfer normal are not giving the same results" + + xd1 = idx1 - myidx1 + xd2 = idx2 - myidx2 + assert ( + torch.norm(xd1.float()) + torch.norm(xd2.float()) == 0 + ), "chamfer cuda and chamfer normal are not giving the same results" + print(f"fscore :", fscore(dist1, dist2)) + print("Unit test passed") + + +def timings(distChamfer, dim): + p1 = torch.rand(32, 2000, dim).cuda() + p2 = torch.rand(32, 1000, dim).cuda() + print("Timings : Start CUDA version") + start = time.time() + num_it = 100 + for i in range(num_it): + points1 = Variable(p1, requires_grad=True) + points2 = Variable(p2) + mydist1, mydist2, idx1, idx2 = distChamfer(points1, points2) + loss = torch.sum(mydist1) + loss.backward() + print(f"Ellapsed time forward backward is {(time.time() - start)/num_it} seconds.") + + + print("Timings : Start Pythonic version") + start = time.time() + for i in range(num_it): + points1 = Variable(p1, requires_grad=True) + points2 = Variable(p2) + mydist1, mydist2, idx1, idx2 = chamfer_python.distChamfer(points1, points2) + loss = torch.sum(mydist1) + loss.backward() + print(f"Ellapsed time forward backward is {(time.time() - start)/num_it} seconds.") + + + +dims = [2,3,5] +for i,cham in enumerate([cham2D, cham3D, cham5D]): + print(f"testing Chamfer {dims[i]}D") + test_chamfer(cham, dims[i]) + timings(cham, dims[i]) diff --git a/contrib/HSDF-Net/LICENSE b/contrib/HSDF-Net/LICENSE new file mode 100644 index 00000000..552fd220 --- /dev/null +++ b/contrib/HSDF-Net/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 IGLICT + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/contrib/HSDF-Net/NDF_combine.py b/contrib/HSDF-Net/NDF_combine.py new file mode 100644 index 00000000..df2f9207 --- /dev/null +++ b/contrib/HSDF-Net/NDF_combine.py @@ -0,0 +1,192 @@ +from torch.distributions.utils import probs_to_logits +from trimesh.util import sigfig_round +import models.local_model as model +from models.data import dataloader_garments, voxelized_data_shapenet + +from models import generation +import torch +from torch.nn import functional as F +import mesh_to_sdf +import numpy as np + +chunk_num = 50 +cls_threshold=0.2 +cls_logits_threshold = np.log(cls_threshold) - np.log(1. - cls_threshold) +usesign_threshold = 0.005 + +def rot_YZ(points): + points_rot = points.copy() + points_rot[:, 1], points_rot[:, 2] = points[:, 2], points[:, 1] + return points_rot + +def to_grid(points): + grid_points = points.copy() + grid_points[:, 0], grid_points[:, 2] = points[:, 2], points[:, 0] + + return 2 * grid_points + +def from_grid(grid_points): + points = grid_points.copy() + points[:, 0], points[:, 2] = grid_points[:, 2], grid_points[:, 0] + + return 0.5 * points + +# 'test', 'val', 'train' +def loadNDF(index, pointcloud_samples, exp_name, data_dir, split_file, sample_distribution, sample_sigmas, res, mode = 'test'): + + global encoding + global net + global device + + net = model.NDF() + + device = torch.device("cuda") + + ''' + if 'garments' in exp_name.lower() : + + dataset = dataloader_garments.VoxelizedDataset(mode = mode, data_path = data_dir, split_file = split_file, + res = res, density =0, pointcloud_samples = pointcloud_samples, + sample_distribution=sample_distribution, + sample_sigmas=sample_sigmas, + ) + + + + checkpoint = 'checkpoint_127h:6m:33s_457593.9149734974' + + generator = generation.Generator(net,exp_name, checkpoint = checkpoint, device = device) + + if 'cars' in exp_name.lower() : + + dataset = voxelized_data_shapenet.VoxelizedDataset( mode = mode, res = res, pointcloud_samples = pointcloud_samples, + data_path = data_dir, split_file = split_file, + sample_distribution = sample_distribution, sample_sigmas = sample_sigmas, + batch_size = 1, num_sample_points = 1024, num_workers = 1 + ) + + + + checkpoint = 'checkpoint_108h:5m:50s_389150.3971107006' + + generator = generation.Generator(net, exp_name, checkpoint=checkpoint, device=device) + ''' + + dataset = voxelized_data_shapenet.VoxelizedDataset( mode = mode, res = res, pointcloud_samples = pointcloud_samples, + data_path = data_dir, split_file = split_file, + sample_distribution = sample_distribution, sample_sigmas = sample_sigmas, + batch_size = 1, num_sample_points = 1024, num_workers = 1 + ) + + + + generator = generation.Generator(net, exp_name, device=device) + + example = dataset[index] + + print('Object: ',example['path']) + inputs = torch.from_numpy(example['inputs']).unsqueeze(0).to(device) # lead inputs and samples including one batch channel + + for param in net.parameters(): + param.requires_grad = False + + encoding = net.encoder(inputs) + + return example['path'] + + + +def predictRotNDF(points): + + points = rot_YZ(points) + points = to_grid(points) + points = torch.from_numpy(points).unsqueeze(0).float().to(device) + + points_chunk = torch.chunk(points, chunk_num, dim=1) + + ndf = np.zeros((0)) + + for point in points_chunk: + point = point.detach() + point.requires_grad = True + + dist = net.decoder(point,*encoding)[0] + dist = torch.clamp(dist, max=0.1) + #logits = p_r.logits + + ''' + mask = distcls_logits_threshold).float()*2-1 + #sign_selected = ((grid_logit_grad * grid_dis_grad).sum(-1)>0).float()*2-1 + + dist[mask] = dist_s*sign_selected + + #sign = torch.ones_like(sign_all) + + #sign[dist + +Recent advances in neural implicit representation have set a new state of the art in 3D modeling and reconstruction by breaking the previous barrier in resolution and topology. However, as such approaches rely on the signed distance function (SDF) which divides the space into inside and outside of the object, they are limited to representing closed shapes. To lift the limitation, methods based on unsigned distance function (UDF) are proposed such that a much broader class of shapes containing open surfaces can be effectively represented and learned via deep neural networks. +However, as UDF is signless, directly applying the iso-surface extracting technique, e.g. the Marching Cubes algorithm, would convert all open surfaces into the closed mesh. To generate open structures, these approaches have to convert the resulting UDF field into discrete points and then apply the Ball-Pivoting algorithm (BPA) to obtain the meshing result. Nonetheless, the BPA technique is prone to introduce self-intersections and disconnected surface patches with inconsistent normals. In addition, BPA is highly sensitive to the input parameters and often requires per-shape parameter tuning in order to generate a complete meshing result. This hinders UDF-based approaches from being practically used in real-world applications as mesh remains the prominent standard for modeling and rendering in both industry and academia. +To address the above issue, we present a novel learnable implicit representation, named Hybrid Sign and Distance Function (HSDF), that can faithfully represent complex geometry containing both closed and open surfaces, while being compatible with off-the-shelf iso-surface extraction methods, e.g. the Marching Cubes algorithm, for easy and high-quality field-to-mesh conversion. The key idea of HSDF is to integrate the advantages of both SDF and UDF while avoiding their adversarial impacts. We empirically find that the learning of UDF is quite robust and can generalize well to novel data. Therefore, to inherit the benefit of UDF and overcome its limitation, we propose to learn an additional sign field in addition to UDF via a sign predictor. Unlike traditional SDF, HSDF is able to locate the surface of interest before performing level surface extraction. We achieve this by generating surface points via the gradient field of the unsigned distance function following NDF. Hence, we are able to create local SDFs by multiplying the UDF with the sign field and cast complex shapes containing both closed and open surfaces by incorporating an adaptive meshing algorithm that only instantiates the regions containing surface points into a polygon mesh. + +## Installation + +The code is tested with Ubuntu 18.04, Python 3.8.12 Jittor 1.3.5.21, CUDA 11.1 and cuDNN 8005. + +Set Up Environment + + install anaconda and Run 'conda env create -f environment.yml' to build a environment and 'conda activate HSDF_env' to activate it + + +For the jittor installation, please refer to [this link](https://cg.cs.tsinghua.edu.cn/jittor/download). + + +## Experiment Preparation + +First, create a configuration file in folder `configs/`, use `configs/example*.txt` as reference and see `configs/config_loader.py` for detailed explanation of all configuration options. + +Next, download the MGN data from official website https://datasets.d2.mpi-inf.mpg.de/MultiGarmentNetwork/Multi-Garmentdataset.zip + +Next, prepare the data for HSDF using +``` +python dataprocessing/preprocess.py --config configs/example.txt +``` + +and generate a random test/training/validation split of the data using +``` +python dataprocessing/create_split.py --config configs/example.txt +``` + +but replacing `configs/example.txt` in the commands with the desired configuration. + +> Note: The preprocessing with `dataprocessing/preprocess.py` can be time intensive. In case multiple compute machines are +> available, the script can be run in parralell on those machines by splitting the overall files to preprocess into +> chuncks. For this, use: \ +> `python dataprocessing/preprocess.py --num_chunks X --current_chunk Y` \ +> where X is the desired number of chunks (e.g. the number of availiable machines) and Y is the chunk to be processed +> with execution of this command. Y needs to be an integer between 0 to X-1, including O and X-1. In case you have SLURM +> available you can adapt `slurm_scripts/run_preprocessing.sh` and use it via +> ``` +> sbatch slurm_scripts/run_preprocessing.sh +> ``` + +## Training + + Train model 'python train.py --config ./configs/example.txt + +## Generate model + + Download pretrained models from [this link](https://mailsucaseducn-my.sharepoint.com/:u:/g/personal/yangjie161_mails_ucas_edu_cn/Ee6jnWgGLrpOru32pOSOHQoBRTHBfSnnB5KoPkrgtuBW3Q?e=u6LqZd). + Run 'python generate.py --config ./configs/example.txt' + +If you prefer Pytorch version, please switch to [this](https://github.com/IGLICT/HSDF-Net/tree/pytorch). + +## Citation + +If you find our work useful in your research, please consider citing: + + @inproceedings{Wang22HSDF, + author = {Wang, Li and Yang, Jie and Chen, Wei-Kai and Meng, Xiao-Xu and Yang, Bo and Li, Jin-Tao and Gao, Lin}, + title = {HSDF: Hybrid Sign and Distance Field for Modeling Surfaces with Arbitrary Topologies }, + booktitle={Neural Information Processing Systems (NeurIPS)}, + year = {2022}, + } + + @article{hu2020jittor, + title={Jittor: a novel deep learning framework with meta-operators and unified graph execution}, + author={Hu, Shi-Min and Liang, Dun and Yang, Guo-Ye and Yang, Guo-Wei and Zhou, Wen-Yang}, + journal={Science China Information Sciences}, + volume={63}, + number={222103}, + pages={1--21}, + year={2020} + } diff --git a/contrib/HSDF-Net/configs/config_loader.py b/contrib/HSDF-Net/configs/config_loader.py new file mode 100644 index 00000000..c0ad9bad --- /dev/null +++ b/contrib/HSDF-Net/configs/config_loader.py @@ -0,0 +1,156 @@ +from ast import parse +import configargparse +import numpy as np +import os + +def str2bool(inp): + return inp.lower() in 'true' + +def config_parser(): + parser = configargparse.ArgumentParser() + + # Experiment Setup + parser.add_argument('--config', is_config_file=True, default='configs/shapenet_cars.txt', + help='config file path') + parser.add_argument("--exp_name", type=str, default=None, + help='Experiment name, used as folder name for the experiment. If left blank, a \ + name will be auto generated based on the configuration settings.') + parser.add_argument("--data_dir", type=str, + help='input data directory') + parser.add_argument("--input_data_glob", type=str, + help='glob expression to find raw input files') + parser.add_argument("--split_file", type=str, + help='Path to read and write the data split file. Needs to end with ".npz"') + + # Training Data Parameters + parser.add_argument("--sample_std_dev", action='append', type=float, + help='Standard deviations of gaussian samples. \ + Used for displacing surface points to sample the distance field.') + parser.add_argument("--sample_ratio", action='append', type=float, + help='Ratio of standard deviations for samples used for training. \ + Needs to have the same len as sample_std with floats between 0-1 \ + and summing to 1.') + parser.add_argument("--bb_min", default=-0.5, type=float, + help='Training and testing shapes are normalized to be in a common bounding box.\ + This value defines the min value in x,y and z for the bounding box.') + parser.add_argument("--bb_max", default=0.5, type=float, + help='Training and testing shapes are normalized to be in a common bounding box.\ + This value defines the max value in x,y and z for the bounding box.') + parser.add_argument("--input_res", type=int, default=256, + help='Training and testing shapes are normalized to be in a common bounding box.\ + This value defines the max value in x,y and z for the bounding box.') + parser.add_argument("--num_points", type=int, default=10000, + help='Number of points sampled from each ground truth shape.') + + # apex + parser.add_argument("--local_rank", type=int, default=0) + + # Preprocessing - Multiprocessing + parser.add_argument("--num_chunks", type=int, default=1, + help='The preprocessing can be distributed over num_chunks multiple machines.\ + For this the raw files are split into num_chunks chunks. \ + Default is preprocessing on a single machine.') + parser.add_argument("--current_chunk", type=int, default=0, + help='Tells the script which chunk it should process. \ + Value between 0 till num_chunks-1.') + parser.add_argument("--num_cpus", type=int, default=-1, + help='Number of cpu cores to use for running the script. \ + Default is -1, that is, using all available cpus.') + + # Creating a data test/train/validation split + parser.add_argument('--class_folders', type=str, default=None, + help='If set to None, the split is created by creating a random sample from all input files. ' + 'If not None, the split is created per class of objects. Objects of the same class need to ' + 'be in a common parent folder for this. Variable class_folder is interpreted as glob ' + 'pattern, suffix of data_dir - i.e. data_dir + class_folder, e.g. class_folder="/*/".') + + parser_nval = parser.add_mutually_exclusive_group() + parser_nval.add_argument('--n_val', type=int, + help='Size of validation set.') + parser_nval.add_argument('--r_val', type=float, default=0.1, + help='Relative size of validation set.') + + parser_ntest = parser.add_mutually_exclusive_group() + parser_ntest.add_argument('--n_test', type=int, + help='Size of test set.') + parser_ntest.add_argument('--r_test', type=float, default=0.2, + help='Relative size of test set.') + + # Generation + parser.add_argument("--num_sample_points_generation", type=int, default=50000, + help='Number of point samples per object provided to the NDF network during generation.\ + Influences generation speed (larger batches result in faster generation) but also GPU \ + memory usage (higher values need more memory). Tip: choose largest possible value on GPU.') + parser.add_argument("--threshold", type=float, default=0.2, + help='threshold follow conv-onet') + + # Training + parser.add_argument("--num_sample_points_training", type=int, default=50000, + help='Number of point samples per object provided to the NDF network during training.\ + Influences training speed (larger batches result in shorter epochs) but also GPU \ + memory usage (higher values need more memory). Needs to be balanced with batch_size.') + parser.add_argument("--batch_size", type=int, default=4, + help='Number of objects provided to the NDF network in one batch during training.\ + Influences training speed (larger batches result in shorter epochs) but also GPU \ + memory usage (higher values need more memory). Needs to be balanced with \ + num_sample_points_training') + parser.add_argument("--num_epochs", type=int, default=1500, + help='Stopping citron for duration of training. Model converges much earlier: model convergence\ + can be checked via tensorboard and is logged within the experiment folder.') + parser.add_argument("--lr", type=float, default=1e-6, + help='Learning rate used during training.') + parser.add_argument("--optimizer", type=str, default='Adam', + help='Optimizer used during training.') + + ## Rendering arguments + parser.add_argument("--pc_samples", type=int, help='input pointcloud size') + parser.add_argument("--index", type=int, help='index to be rendered') + + ### + parser.add_argument("--size", type=int, help="the size of image", default=512) + parser.add_argument("--max_depth", type=float, help="the max depth of projected rays", default=2) + parser.add_argument("--alpha", type=float, help="the value by which the stepping distance should be multiplied", + default=0.6) + parser.add_argument("--step_back", type=float, default=0.005, help="the value by which we step back after stopping criteria met") + parser.add_argument("--epsilon", type=float, default=0.0026, help="epsilon ball - stopping criteria") + parser.add_argument("--screen_bound", type=float, default=0.4) + parser.add_argument("--screen_depth", type=float, default=-1) + + parser.add_argument('--cam_position', nargs='+', type=float, help='3D position of camera', default=[0, 0, -1]) + parser.add_argument('--light_position', nargs='+', type=float, help='3D position of light source', + default=[-1, -1, -1]) + parser.add_argument("--cam_orientation", nargs='+', type=float, + help="Camera Orientation in xyz euler angles (degrees)", default=[180.0, 0.0, -180.0]) + + parser.add_argument("--folder", type=str, default='./save', + help="location where images are to be saved") + parser.add_argument("--shade", type=str2bool, default=True, help="whether to save shade image") + parser.add_argument("--depth", type=str2bool, default=True, help="whether to save depth image") + parser.add_argument("--normal", type=str2bool, default=True, help="whether to save normal image") + + parser.add_argument("--debug_mode", type=str2bool, default=True, + help="to visualize everything in debug mode or not") + + + return parser + + +def get_config(): + parser = config_parser() + cfg = parser.parse_args() + + cfg.sample_ratio = np.array(cfg.sample_ratio) + cfg.sample_std_dev = np.array(cfg.sample_std_dev) + + assert np.sum(cfg.sample_ratio) == 1 + assert np.any(cfg.sample_ratio < 0) == False + assert len(cfg.sample_ratio) == len(cfg.sample_std_dev) + + if cfg.exp_name is None: + cfg.exp_name = 'data-{}dist-{}sigmas-{}res-{}'.format( + os.path.basename(cfg.data_dir), + ''.join(str(e) + '_' for e in cfg.sample_ratio), + ''.join(str(e) + '_' for e in cfg.sample_std_dev), + cfg.input_res) + + return cfg diff --git a/contrib/HSDF-Net/configs/example1.txt b/contrib/HSDF-Net/configs/example1.txt new file mode 100644 index 00000000..e19d40cb --- /dev/null +++ b/contrib/HSDF-Net/configs/example1.txt @@ -0,0 +1,10 @@ +exp_name = example1 +data_dir = datasets/clothing/data/0/ +split_file = datasets/clothing/data/split_clothing_overfit.npz +input_data_glob = /*/model.obj +sample_std_dev = [0.08, 0.02, 0.003] +sample_ratio = [0.01, 0.49, 0.5] +lr = 1e-6 +num_epochs = 200000000 + +num_points = 1000 \ No newline at end of file diff --git a/contrib/HSDF-Net/configs/example2.txt b/contrib/HSDF-Net/configs/example2.txt new file mode 100644 index 00000000..72928331 --- /dev/null +++ b/contrib/HSDF-Net/configs/example2.txt @@ -0,0 +1,18 @@ +exp_name = example2 +data_dir = datasets/mixamo_data/data/0/ +split_file = datasets/mixamo_data/data/split_mixamo_overfit.npz +input_data_glob = /*/model.obj +sample_std_dev = [0.08, 0.02, 0.003] +sample_ratio = [0.01, 0.49, 0.5] +lr = 1e-6 +num_epochs = 200000000 + +index = 0 +pc_samples = 10000 + +cam_position = [0, 2, 0] +cam_orientation = [-90.0, 180.0, 0.0] +debug_mode = True +screen_depth = -1 +max_depth = 3 +step_back = 0.0 \ No newline at end of file diff --git a/contrib/HSDF-Net/dataprocessing/boundary_sampling.py b/contrib/HSDF-Net/dataprocessing/boundary_sampling.py new file mode 100644 index 00000000..6b073a3b --- /dev/null +++ b/contrib/HSDF-Net/dataprocessing/boundary_sampling.py @@ -0,0 +1,64 @@ +import trimesh +import igl +import numpy as np +import glob +import multiprocessing as mp +from multiprocessing import Pool +import os +import traceback +from functools import partial +import random +import gc +import configs.config_loader as cfg_loader +from mesh_to_sdf import mesh_to_sdf + +# number of distance field samples generated per object +sample_num = 1000000 + +def boundary_sampling(path, sigma): + try: + + out_path = os.path.dirname(path) + file_name = os.path.splitext(os.path.basename(path))[0] + input_file = os.path.join(out_path,file_name + '_scaled.off') + out_file = out_path + '/boundary_{}_samples.npz'.format( sigma) + + if os.path.exists(out_file): + print('Exists: {}'.format(out_file)) + return + + print('processing {}'.format(input_file)) + + mesh = trimesh.load(input_file) + points = mesh.sample(sample_num) + + if sigma == 0: + boundary_points = points + else: + boundary_points = points + sigma * np.random.randn(sample_num, 3) + + grid_coords = boundary_points.copy() + grid_coords[:, 0], grid_coords[:, 2] = boundary_points[:, 2], boundary_points[:, 0] + + grid_coords = 2 * grid_coords + + #print('before mesh_to_sdf') + + if sigma == 0: + df = np.zeros(boundary_points.shape[0]) + else: + #df = np.abs(igl.signed_distance(boundary_points, mesh.vertices, mesh.faces)[0]) + df = igl.signed_distance(boundary_points, mesh.vertices, mesh.faces, True)[0] + # df = mesh_to_sdf(mesh, boundary_points, sign_method='normal', surface_point_method='sample') + + #print('after mesh_to_sdf') + + np.savez(out_file, points=boundary_points, df = df, grid_coords= grid_coords) + print('Finished: {}'.format(path)) + + except: + print('Error with {}: {}'.format(path, traceback.format_exc())) + + + del mesh, df, boundary_points, grid_coords, points + gc.collect() diff --git a/contrib/HSDF-Net/dataprocessing/convert_to_scaled_off.py b/contrib/HSDF-Net/dataprocessing/convert_to_scaled_off.py new file mode 100644 index 00000000..0735f370 --- /dev/null +++ b/contrib/HSDF-Net/dataprocessing/convert_to_scaled_off.py @@ -0,0 +1,71 @@ +import os +import glob +import multiprocessing as mp +from multiprocessing import Pool +import trimesh +import random +import sys +import traceback +import logging +import configs.config_loader as cfg_loader + + +logger = logging.getLogger() +logger.setLevel(logging.ERROR) + +class HiddenPrints: + def __enter__(self): + self._original_stdout = sys.stdout + sys.stdout = open(os.devnull, 'w') + + def __exit__(self, exc_type, exc_val, exc_tb): + sys.stdout.close() + sys.stdout = self._original_stdout + + +def as_mesh(scene_or_mesh): + """ + Convert a possible scene to a mesh. + + If conversion occurs, the returned mesh has only vertex and face data. + Suggested by https://github.com/mikedh/trimesh/issues/507 + """ + if isinstance(scene_or_mesh, trimesh.Scene): + if len(scene_or_mesh.geometry) == 0: + mesh = None # empty scene + else: + # we lose texture information here + mesh = trimesh.util.concatenate( + tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces) + for g in scene_or_mesh.geometry.values())) + else: + assert(isinstance(scene_or_mesh, trimesh.Trimesh)) + mesh = scene_or_mesh + return mesh + + +def to_off(path): + + file_path = os.path.dirname(path) + file_name = os.path.splitext(os.path.basename(path))[0] + output_file = os.path.join(file_path,file_name + '_scaled.off') + + if os.path.exists(output_file): + print('Exists: {}'.format(output_file)) + return + + try: + with HiddenPrints(): + input = trimesh.load(path) + mesh = as_mesh(input) + total_size = (mesh.bounds[1] - mesh.bounds[0]).max() + centers = (mesh.bounds[1] + mesh.bounds[0]) / 2 + + mesh.apply_translation(-centers) + mesh.apply_scale(1 / total_size) + mesh.export(output_file) + + print('Finished: {}'.format(path)) + except: + print('Error with {}: {}'.format(path, traceback.format_exc())) + diff --git a/contrib/HSDF-Net/dataprocessing/create_split.py b/contrib/HSDF-Net/dataprocessing/create_split.py new file mode 100644 index 00000000..54d8347d --- /dev/null +++ b/contrib/HSDF-Net/dataprocessing/create_split.py @@ -0,0 +1,64 @@ +import os +import random +from glob import glob +import numpy as np +import configs.config_loader as cfg_loader +from collections import defaultdict + +cfg = cfg_loader.get_config() + +train_all = [] +test_all = [] +val_all = [] + +# parse all input files +print('Finding raw files for preprocessing.') +paths = glob( cfg.data_dir + cfg.input_data_glob) +paths = [os.path.dirname(p) for p in paths] + +# sort according to class folders (optional) +if cfg.class_folders is None: + res = {'single_class': paths} +else: + class_paths = glob( cfg.data_dir + cfg.class_folders) + res = defaultdict(list) + for path in paths: + for class_path in class_paths: + if path.startswith(class_path): + res[class_path].append(path) + + +for class_path in res.keys(): + + all_samples = res[class_path] + + random.shuffle(all_samples) + + # Number of examples + n_total = len(all_samples) + + if cfg.n_val is not None: + n_val = cfg.n_val + else: + n_val = int(cfg.r_val * n_total) + + if cfg.n_test is not None: + n_test = cfg.n_test + else: + n_test = int(cfg.r_test * n_total) + + if n_total < n_val + n_test: + print('Error: too few training samples.') + exit() + + n_train = n_total - n_val - n_test + + assert(n_train >= 0) + + # Select elements + train_all.extend( all_samples[:n_train]) + val_all.extend( all_samples[n_train:n_train+n_val]) + test_all.extend( all_samples[n_train+n_val:]) + + +np.savez(cfg.data_dir + f'/../split_{cfg.exp_name}.npz', train = train_all, test = test_all, val = val_all) \ No newline at end of file diff --git a/contrib/HSDF-Net/dataprocessing/preprocess.py b/contrib/HSDF-Net/dataprocessing/preprocess.py new file mode 100644 index 00000000..045312ee --- /dev/null +++ b/contrib/HSDF-Net/dataprocessing/preprocess.py @@ -0,0 +1,48 @@ +from dataprocessing.convert_to_scaled_off import to_off +from dataprocessing.boundary_sampling import boundary_sampling +import dataprocessing.voxelized_pointcloud_sampling as voxelized_pointcloud_sampling +from glob import glob +import configs.config_loader as cfg_loader +import multiprocessing as mp +from multiprocessing import Pool +import numpy as np +from functools import partial + +cfg = cfg_loader.get_config() + + +print('Finding raw files for preprocessing.') +paths = glob( cfg.data_dir + cfg.input_data_glob) +paths = sorted(paths) + +chunks = np.array_split(paths,cfg.num_chunks) +paths = chunks[cfg.current_chunk] + + +if cfg.num_cpus == -1: + num_cpus = mp.cpu_count() + print('cpu count: {}'.format(num_cpus)) +else: + num_cpus = cfg.num_cpus + +def multiprocess(func): + p = Pool(num_cpus) + p.map(func, paths) + p.close() + p.join() + +print('Start scaling.') +multiprocess(to_off) + +print('Start distance field sampling.') +for sigma in cfg.sample_std_dev: + print(f'Start distance field sampling with sigma: {sigma}.') + #multiprocess(partial(boundary_sampling, sigma = sigma)) + # this process is multi-processed for each path: IGL parallelizes the distance field computation of multiple points. + for path in paths: + boundary_sampling(path, sigma) + +print('Start voxelized pointcloud sampling.') +voxelized_pointcloud_sampling.init(cfg) +multiprocess(voxelized_pointcloud_sampling.voxelized_pointcloud_sampling) + diff --git a/contrib/HSDF-Net/dataprocessing/voxelized_pointcloud_sampling.py b/contrib/HSDF-Net/dataprocessing/voxelized_pointcloud_sampling.py new file mode 100644 index 00000000..3ae598bd --- /dev/null +++ b/contrib/HSDF-Net/dataprocessing/voxelized_pointcloud_sampling.py @@ -0,0 +1,54 @@ +from scipy.spatial import cKDTree as KDTree +import numpy as np +import trimesh +import os +import traceback + +kdtree, grid_points, cfg = None, None, None +def voxelized_pointcloud_sampling(path): + try: + + out_path = os.path.dirname(path) + file_name = os.path.splitext(os.path.basename(path))[0] + input_file = os.path.join(out_path,file_name + '_scaled.off') + out_file = out_path + '/voxelized_point_cloud_{}res_{}points.npz'.format(cfg.input_res, cfg.num_points) + + + if os.path.exists(out_file): + print(f'Exists: {out_file}') + return + + + + mesh = trimesh.load(input_file) + point_cloud = mesh.sample(cfg.num_points) + + occupancies = np.zeros(len(grid_points), dtype=np.int8) + + _, idx = kdtree.query(point_cloud) + occupancies[idx] = 1 + + compressed_occupancies = np.packbits(occupancies) + + np.savez(out_file, point_cloud=point_cloud, compressed_occupancies = compressed_occupancies, bb_min = cfg.bb_min, bb_max = cfg.bb_max, res = cfg.input_res) + print('Finished: {}'.format(path)) + + except Exception as err: + print('Error with {}: {}'.format(path, traceback.format_exc())) + +def init(cfg_param): + global kdtree, grid_points, cfg + cfg = cfg_param + grid_points = create_grid_points_from_bounds(cfg.bb_min, cfg.bb_max, cfg.input_res) + kdtree = KDTree(grid_points) + +def create_grid_points_from_bounds(minimun, maximum, res): + x = np.linspace(minimun, maximum, res) + X, Y, Z = np.meshgrid(x, x, x, indexing='ij') + X = X.reshape((np.prod(X.shape),)) + Y = Y.reshape((np.prod(Y.shape),)) + Z = Z.reshape((np.prod(Z.shape),)) + + points_list = np.column_stack((X, Y, Z)) + del X, Y, Z, x + return points_list \ No newline at end of file diff --git a/contrib/HSDF-Net/environment.yml b/contrib/HSDF-Net/environment.yml new file mode 100644 index 00000000..1aecc1e6 --- /dev/null +++ b/contrib/HSDF-Net/environment.yml @@ -0,0 +1,171 @@ +name: HSDF_env +channels: + - conda-forge + - defaults +dependencies: + - _libgcc_mutex=0.1=main + - _openmp_mutex=4.5=1_gnu + - ca-certificates=2021.10.8=ha878542_0 + - certifi=2021.10.8=py38h578d9bd_1 + - igl=2.2.1=py38h52fb889_1 + - ld_impl_linux-64=2.35.1=h7274673_9 + - libblas=3.9.0=11_linux64_openblas + - libcblas=3.9.0=11_linux64_openblas + - libffi=3.3=he6710b0_2 + - libgcc-ng=9.3.0=h5101ec6_17 + - libgfortran-ng=11.2.0=h69a702a_14 + - libgfortran5=11.2.0=h5c6108e_14 + - libgomp=9.3.0=h5101ec6_17 + - liblapack=3.9.0=11_linux64_openblas + - libopenblas=0.3.17=pthreads_h8fe5266_1 + - libstdcxx-ng=9.3.0=hd4cf53a_17 + - ncurses=6.3=h7f8727e_2 + - openssl=1.1.1n=h7f8727e_0 + - python=3.8.12=h12debd9_0 + - python_abi=3.8=2_cp38 + - readline=8.1.2=h7f8727e_1 + - sqlite=3.37.2=hc218d9a_0 + - tk=8.6.11=h1ccaba5_0 + - wheel=0.37.1=pyhd3eb1b0_0 + - xz=5.2.5=h7b6447c_0 + - zlib=1.2.11=h7f8727e_4 + - pip: + - absl-py==1.0.0 + - addict==2.4.0 + - anyio==3.6.1 + - argon2-cffi==21.3.0 + - argon2-cffi-bindings==21.2.0 + - asttokens==2.0.5 + - astunparse==1.6.3 + - attrs==22.1.0 + - babel==2.10.3 + - backcall==0.2.0 + - beautifulsoup4==4.11.1 + - bleach==5.0.1 + - cachetools==5.0.0 + - cffi==1.15.1 + - charset-normalizer==2.0.12 + - colorama==0.4.4 + - configargparse==1.5.3 + - cycler==0.11.0 + - cython==0.29.30 + - debugpy==1.6.2 + - decorator==5.1.1 + - defusedxml==0.7.1 + - deprecation==2.1.0 + - entrypoints==0.4 + - executing==0.8.3 + - fastjsonschema==2.16.1 + - fonttools==4.34.4 + - freetype-py==2.2.0 + - google-auth==2.6.0 + - google-auth-oauthlib==0.4.6 + - grpcio==1.44.0 + - h5py==3.7.0 + - icecream==2.1.0 + - idna==3.3 + - imageio==2.16.1 + - importlib-metadata==4.11.2 + - importlib-resources==5.9.0 + - ipykernel==6.15.1 + - ipython==8.4.0 + - ipython-genutils==0.2.0 + - ipywidgets==7.7.1 + - jedi==0.18.1 + - jinja2==3.1.2 + - jittor==1.3.5.21 + - joblib==1.1.0 + - json5==0.9.9 + - jsonschema==4.9.0 + - kiwisolver==1.4.4 + - markdown==3.3.6 + - markupsafe==2.1.1 + - matplotlib==3.5.2 + - matplotlib-inline==0.1.3 + - mesh-to-sdf==0.0.14 + - mistune==0.8.4 + - mpi4py==3.1.3 + - nbclassic==0.4.3 + - nbclient==0.6.6 + - nbconvert==6.5.0 + - nbformat==5.4.0 + - nest-asyncio==1.5.5 + - networkx==2.7 + - notebook==6.4.12 + - notebook-shim==0.1.0 + - numexpr==2.8.1 + - numpy==1.22.2 + - oauthlib==3.2.0 + - open3d==0.15.2 + - opencv-python==4.5.2.52 + - packaging==21.3 + - pandas==1.4.3 + - pandocfilters==1.5.0 + - parso==0.8.3 + - pexpect==4.8.0 + - pickleshare==0.7.5 + - pillow==9.0.1 + - pip==22.0.3 + - pkgutil-resolve-name==1.3.10 + - prometheus-client==0.14.1 + - prompt-toolkit==3.0.30 + - protobuf==3.19.4 + - psutil==5.9.1 + - ptyprocess==0.7.0 + - pure-eval==0.2.2 + - pyasn1==0.4.8 + - pyasn1-modules==0.2.8 + - pycparser==2.21 + - pyglet==1.5.23 + - pygments==2.11.2 + - pyhocon==0.3.57 + - pymcubes==0.1.2 + - pymeshlab==2022.2.post2 + - pyopengl==3.1.0 + - pyparsing==3.0.7 + - pyquaternion==0.9.9 + - pyrender==0.1.45 + - pyrsistent==0.18.1 + - python-dateutil==2.8.2 + - pytz==2022.1 + - pywavelets==1.2.0 + - pyyaml==6.0 + - pyzmq==23.2.0 + - requests==2.27.1 + - requests-oauthlib==1.3.1 + - rsa==4.8 + - rtree==1.0.1 + - scikit-image==0.19.2 + - scikit-learn==1.0.2 + - scipy==1.7.0 + - send2trash==1.8.0 + - setuptools==63.3.0 + - six==1.16.0 + - sklearn==0.0 + - sniffio==1.2.0 + - soupsieve==2.3.2.post1 + - stack-data==0.3.0 + - tables==3.7.0 + - tensorboard==2.8.0 + - tensorboard-data-server==0.6.1 + - tensorboard-plugin-wit==1.8.1 + - tensorboardx==2.5.1 + - terminado==0.15.0 + - threadpoolctl==3.1.0 + - tifffile==2022.2.9 + - tinycss2==1.1.1 + - tomlkit==0.11.1 + - torchaudio==0.8.0 + - torchvision==0.9.0+cu111 + - tornado==6.2 + - tqdm==4.50.2 + - traitlets==5.3.0 + - trimesh==3.9.8 + - typing-extensions==4.1.1 + - urllib3==1.26.8 + - wcwidth==0.2.5 + - webencodings==0.5.1 + - websocket-client==1.3.3 + - werkzeug==2.0.3 + - widgetsnbextension==3.6.1 + - zipp==3.7.0 diff --git a/contrib/HSDF-Net/generate.py b/contrib/HSDF-Net/generate.py new file mode 100644 index 00000000..2a0d92a2 --- /dev/null +++ b/contrib/HSDF-Net/generate.py @@ -0,0 +1,105 @@ +import models.local_model as model +import models.data.voxelized_data_shapenet as voxelized_data +from models.generation import Generator +import jittor +import configs.config_loader as cfg_loader +import os +import trimesh +import numpy as np +from tqdm import tqdm +from utils import voxel2obj + +jittor.flags.use_cuda=1 + +cfg = cfg_loader.get_config() + +#device = torch.device("cuda") +net = model.HSDF() + +dataset = voxelized_data.VoxelizedDataset('test', + res=cfg.input_res, + pointcloud_samples=cfg.num_points, + data_path=cfg.data_dir, + split_file=cfg.split_file, + batch_size=1, + num_sample_points=cfg.num_sample_points_generation, + num_workers=8, + sample_distribution=cfg.sample_ratio, + sample_sigmas=cfg.sample_std_dev) + +gen = Generator(net, cfg.exp_name, cls_threshold=cfg.threshold) + +out_path = 'experiments/{}/evaluation_0.02_128/'.format(cfg.exp_name) + + +def gen_iterator(out_path, dataset, gen_p): + global gen + gen = gen_p + + if not os.path.exists(out_path): + os.makedirs(out_path) + print(out_path) + + # can be run on multiple machines: dataset is shuffled and already generated objects are skipped. + #loader = dataset.get_loader(shuffle=True) + + for i, data in tqdm(enumerate(dataset)): + + path = os.path.normpath(data['path'][0]) + + ''' + # selected testing cases for car + if 'bceb15ddfb9fe56aa13d6c605d0084d3' not in path \ + and '2b4664cf53176418faeea7738551d104' not in path \ + and 'a7c0f3bcc2347710a312b3f0b49ff828' not in path \ + and '2e8c4fd40a1be2fa5f38ed4497f2c53c' not in path \ + and '32924c86ee4a2c69aa4eefa8f42b566e' not in path \ + and '7e412497b8ab74963f2c3a55558a78f' not in path \ + and '7d33cb52d556c1fe618e9d35559b7aa' not in path \ + and '5ce5d2b8c3a7b846d13b7e043606607d' not in path \ + and '1f43243b81df21277925d1ea63246010' not in path \ + and '1ffe99aba88ca413ca71c17c1eef7213' not in path: + continue + ''' + + export_path = out_path + '/generation/{}/{}/'.format(path.split(os.sep)[-2], path.split(os.sep)[-1]) + + if os.path.exists(export_path): + print('Path exists - skip! {}'.format(export_path)) + continue + else: + os.makedirs(export_path) + + for num_steps in [7]: + + #debug + verts, faces, verts_nomask, faces_nomask, duration, voxel, verts_udf, faces_udf, voxel_gradnorm = gen.generate_mesh(data, voxel_resolution=128, chunk_num=16) + #verts, faces, verts_nomask, faces_nomask, duration, voxel, verts_udf, faces_udf, voxel_gradnorm = gen.generate_mesh(data, voxel_resolution=512, chunk_num=4096) + #np.savez(export_path + 'dense_point_cloud_{}'.format(num_steps), point_cloud=verts, duration=duration) + #np.savez(export_path + 'dense_point_cloud_{}_nomask'.format(num_steps), point_cloud=verts_nomask, duration=duration) + print('num_steps', num_steps, 'duration', duration) + #trimesh.Trimesh(vertices=verts, faces=faces).export( + # export_path + 'dense_point_cloud_{}.off'.format(num_steps)) + trimesh.Trimesh(vertices=verts_nomask, faces=faces_nomask).export( + export_path + 'dense_point_cloud_{}_nomask.off'.format(num_steps)) + trimesh.Trimesh(vertices=verts_udf, faces=faces_udf).export( + export_path + 'dense_point_cloud_{}_udf.off'.format(num_steps)) + + #trimesh.Trimesh(vertices=pos_pts, faces=[]).export( + # export_path + 'dense_point_cloud_{}_pos.off'.format(num_steps)) + #trimesh.Trimesh(vertices=neg_pts, faces=[]).export( + # export_path + 'dense_point_cloud_{}_neg.off'.format(num_steps)) + #trimesh.Trimesh(vertices=zero_pts, faces=[]).export( + # export_path + 'dense_point_cloud_{}_zero.off'.format(num_steps)) + + voxel2obj(export_path + 'voxel_gradnorm.obj', voxel_gradnorm) + + + pc, duration = gen.generate_point_cloud(data, num_steps) + print('pc duration', duration) + trimesh.Trimesh(vertices=pc, faces=[]).export( + export_path + 'dense_point_cloud_{}_pc.off'.format(num_steps)) + + + +gen_iterator(out_path, dataset, gen) diff --git a/contrib/HSDF-Net/models/__init__.py b/contrib/HSDF-Net/models/__init__.py new file mode 100644 index 00000000..8d1c8b69 --- /dev/null +++ b/contrib/HSDF-Net/models/__init__.py @@ -0,0 +1 @@ + diff --git a/contrib/HSDF-Net/models/generation.py b/contrib/HSDF-Net/models/generation.py new file mode 100644 index 00000000..7f5b2131 --- /dev/null +++ b/contrib/HSDF-Net/models/generation.py @@ -0,0 +1,543 @@ +import jittor +import os +import numpy as np +from glob import glob +import jittor +import jittor.nn as nn +import time +from skimage.measure import marching_cubes +from sklearn.preprocessing import normalize + +class Generator(object): + def __init__(self, model, exp_name, threshold = 0.05, checkpoint = None, cls_threshold=0.2): + #self.model = model.to(device) + self.model = model + self.model.eval() + #self.device = device + self.checkpoint_path = os.path.dirname(__file__) + '/../experiments/{}/checkpoints/'.format( exp_name) + self.load_checkpoint(checkpoint) + self.threshold = threshold + + # compute binary cls threshold of Bernoulli logits + self.cls_logits_threshold = np.log(cls_threshold) - np.log(1. - cls_threshold) + + print("cls_logits_threshold: {}".format(self.cls_logits_threshold)) + + + + def generate_mesh(self, data, voxel_resolution=128, EPS=0, chunk_num=128, num_steps=5): + + start = time.time() + inputs = data['inputs'] + + # add noises + sigma = 0.02 + inputs += sigma * jittor.randn(inputs.shape) + + for param in self.model.parameters(): + param.requires_grad = False + + bound = 1. + points = np.meshgrid( + np.linspace(-bound, bound, voxel_resolution), + np.linspace(-bound, bound, voxel_resolution), + np.linspace(-bound, bound, voxel_resolution) + ) + points = np.stack(points) + points = np.swapaxes(points, 1, 2) + points = points.reshape(3, -1).transpose().reshape( + [voxel_resolution]*3 + [3]).astype(np.float32) + #print(points.shape) + + #points = torch.from_numpy( + # get_raster_points( + # voxel_resolution=voxel_resolution)).to(self.device) + #points = points.reshape([voxel_resolution]*3 + [3]) + points_gpu = jittor.array(points) + + avgpool3d = nn.AvgPool3d(kernel_size=2, stride=1) + centers = avgpool3d(jittor.array(np.expand_dims(points_gpu.permute(3,0,1,2), axis=0))).permute(0,2,3,4,1) + centers = centers.reshape([1,-1,3]) + points_gpu = points_gpu.reshape([1,-1,3]) + centers = centers.detach() + points_gpu = points_gpu.detach() + + # split into chunks + center_chunks = jittor.chunk(centers, chunks=chunk_num, dim=1) + points_gpu_chunks = jittor.chunk(points_gpu, chunks=chunk_num, dim=1) + + # encode + encoding = self.model.encoder(inputs) + + gradient_cpu = np.zeros((1,0,3)) + centers_sdf_cpu = np.zeros((1,0)) + centers_cpu = np.zeros((1,0,3)) + sign_cpu = np.zeros((1,0)) + + points_udf_cpu = np.zeros((1,0)) + + print('-------begin computing mask--------') + + for i in range(chunk_num): + point = points_gpu_chunks[i] + center = center_chunks[i] + + center = center.detach() + center.requires_grad = True + + with jittor.no_grad(): + udf, p_r_init = self.model.decoder(point, *encoding) + sign = (p_r_init>0.).float()*2-1 + + center_debug = self.model.decoder(center, *encoding)[0] + center_debug.sync() + #centers_df_pred = jittor.clamp(center_debug, min_v=None, max_v=self.threshold) + #np.savetxt("center_log.txt", center_debug.numpy()) + centers_df_pred = center_debug.minimum(self.threshold) + # decode + #centers_df_pred = jittor.clamp( + # self.model.decoder(center, *encoding)[0], + # max_v=self.threshold) + + #centers_df_pred.sum().backward() + center_grad = jittor.grad(centers_df_pred.sum(), center, retain_graph=False) + + #gradient_cpu = np.concatenate([gradient_cpu, center.grad.detach().numpy()], axis=1) + gradient_cpu = np.concatenate([gradient_cpu, center_grad.detach().numpy()], axis=1) + centers_cpu = np.concatenate([centers_cpu, center.detach().numpy()], axis=1) + centers_sdf_cpu = np.concatenate([centers_sdf_cpu, centers_df_pred.detach().numpy()], axis=1) + sign_cpu = np.concatenate([sign_cpu, sign.detach().numpy()], axis=1) + points_udf_cpu = np.concatenate([points_udf_cpu, udf.detach().numpy()], axis=1) + + + gradient = gradient_cpu.reshape([voxel_resolution-1]*3 + [3]) + centers_df_pred = centers_sdf_cpu.reshape([voxel_resolution-1]*3) + centers = centers_cpu.reshape([voxel_resolution-1]*3 + [3]) + sign_cpu = sign_cpu.reshape([voxel_resolution]*3) + points_udf_cpu = points_udf_cpu.reshape([voxel_resolution]*3) + + gradient_norm = np.linalg.norm(gradient, axis=-1) + #print('gradient norm max: {}'.format(np.max(gradient_norm))) + #print('gradient norm min: {}'.format(np.min(gradient_norm))) + #print('gradient > 1 cell: {}'.format(gradient_norm[gradient_norm>1.].shape)) + voxel_gradnorm = (gradient_norm>1.).astype(np.int) + + # generate mask + mask = np.zeros([voxel_resolution]*3) + voxel = np.zeros([voxel_resolution-1]*3) + + #max_sum = -1 + + for i in range(voxel_resolution-1): + for j in range(voxel_resolution-1): + for k in range(voxel_resolution-1): + + if np.abs(centers_df_pred[i,j,k])>=self.threshold: + continue + + # TODO: gradient normalize + closest_surfpt = centers[i,j,k] - normalize(gradient[i,j,k].reshape((-1,1)), axis=0).ravel()*centers_df_pred[i,j,k] + + # check if closest surfpt is inside this cube + if (points[i,j,k,0]-EPS < closest_surfpt[0] < points[i+1,j,k,0]+EPS and + points[i,j,k,1]-EPS < closest_surfpt[1] < points[i,j+1,k,1]+EPS and + points[i,j,k,2]-EPS < closest_surfpt[2] < points[i,j,k+1,2]+EPS): + + ''' + grid_sign_sum = 0 + + for ii in range(2): + for jj in range(2): + for kk in range(2): + grid_sign_sum += sign_by_value[i+ii,j+jj,k+kk] + ''' + + # get voxel to visualize + voxel[i,j,k] = 1 + #mask[i+1,j+1,k+1] = 1 + + # 8 corners of a cube + for ii in range(2): + for jj in range(2): + for kk in range(2): + + mask[i+ii,j+jj,k+kk] = 1 + ''' + # generate mask + mask_gradnorm = np.zeros([voxel_resolution]*3) + #voxel = np.zeros([voxel_resolution-1]*3) + + #max_sum = -1 + + for i in range(voxel_resolution-1): + for j in range(voxel_resolution-1): + for k in range(voxel_resolution-1): + + if voxel_gradnorm[i,j,k] == 0: + + # 8 corners of a cube + for ii in range(2): + for jj in range(2): + for kk in range(2): + + mask_gradnorm[i+ii,j+jj,k+kk] = 1 + ''' + + + #mask_cpu = np.array_split(mask.reshape([1,-1,1]), chunk_num, axis=1) + + print('-------begin computing sign and distance--------') + + #print('points: {}'.format(points)) + + #point_chunks = torch.chunk(points_gpu, chunks=chunk_num, dim=1) + + points_sdf_cpu = sign_cpu.copy() * 10 + points_sdf_cpu_filtered = np.zeros((1,0)) + #points_cpu = np.zeros((1,0,3)) + + sign_by_value_cpu = sign_cpu.copy() * 10 + sign_by_value_cpu_filtered = np.zeros((1,0)) + + points_filtered = jittor.array(points[mask==1]).reshape(1,-1,3) + + #print('points_filtered shape: {}'.format(points_filtered.shape)) + + point_chunks = jittor.chunk(points_filtered, chunks=chunk_num, dim=1) + + for i in range(len(point_chunks)): + + #mask_chunk = mask_cpu[i] + #if mask_chunk.sum() == 0: + # continue + + point = point_chunks[i] + point = point.detach() + point.requires_grad = True + + #print('point shape: {}'.format(point.shape)) + + # compute sign by mere value + #with torch.no_grad(): + udf, sign_by_value = self.model.decoder(point, *encoding) + + + ''' + for j in range(num_steps): + # generate grid gradient of UDF and OCC + #if j>0: + # dis_pred_laststep = dis_pred + + dis_pred = self.model.decoder(point, *encoding)[0] + + #print('sum: {}'.format((dis_pred0: + # dis_mask = dis_pred>=dis_pred_laststep + # dis_pred[dis_mask] = torch.zeros_like(dis_pred)[dis_mask] + + # debug + #dis_pred = torch.zeros_like(dis_pred) + + point = point - F.normalize(grid_dis_grad, dim=-1) * dis_pred.unsqueeze(-1) + point = point.detach() + point.requires_grad = True + + print('near ratio: {}'.format((dis_pred0).float()*2-1 + + #grid_point_shift = grid_point_init + grid_logits_grad * dis_pred_init.unsqueeze(-1) + #with torch.no_grad(): + # grid_dis_shift = self.model.decoder(grid_point_shift, *encoding)[0] + #sign = (grid_dis_shift>dis_pred_init).float()*2-1 + udf.sync() + sign.sync() + points_df_pred = udf * sign + + # DEBUG + #points_df_pred = (cls_pred.argmax(dim=1)*2-1).float() + #points_df_pred = p_r_init.logits + + #points_cpu = np.concatenate([points_cpu, point.detach().numpy()], axis=1) + points_sdf_cpu_filtered = np.concatenate([points_sdf_cpu_filtered, points_df_pred.detach().numpy()], axis=1) + #points_udf_cpu_filtered = np.concatenate([points_udf_cpu_filtered, dis_pred_init.detach().numpy()], axis=1) + + sign_by_value_cpu_filtered = np.concatenate([sign_by_value_cpu_filtered, sign_by_value.detach().numpy()], axis=1) + + ''' + # plot vector field + point_cpu = point[0].detach().numpy() + grid_logits_grad_cpu = grid_logits_grad[0].detach().numpy() + grid_dis_grad_cpu = grid_dis_grad[0].detach().numpy() + + fig = plt.figure() + ax = fig.gca(projection='3d') + + ax.quiver(point_cpu[:,0],point_cpu[:,1],point_cpu[:,2],grid_logits_grad_cpu[:,0],grid_logits_grad_cpu[:,1],grid_logits_grad_cpu[:,2],length=0.05) + + plt.savefig('chunk_{}_logits.png'.format(i)) + + plt.close() + ''' + + # CPU compute mask (could be parallelized) + #points_df_pred = points_sdf_cpu.reshape([voxel_resolution]*3) + #points = points_cpu.reshape([voxel_resolution]*3 + [3]) + + #points_udf_pred = points_udf_cpu.reshape([voxel_resolution]*3) + + #sign_by_value = sign_by_value_cpu.reshape([voxel_resolution]*3)#.astype(np.bool) + + points_sdf_cpu[mask==1] = points_sdf_cpu_filtered.ravel() + #points_udf_cpu[mask==1] = points_udf_cpu_filtered.ravel() + sign_by_value_cpu[mask==1] = sign_by_value_cpu_filtered.ravel() + + + #pos_pts = points[np.logical_and(points_df_pred>0, points_df_pred<0.1)] + #neg_pts = points[np.logical_and(points_df_pred<0, points_df_pred>-0.1)] + #zero_pts = points[points_df_pred==0] + + print('-------begin marching cube-------') + verts = [] + faces = [] + verts_nomask = [] + faces_nomask = [] + duration = time.time() - start + + #verts, faces, norms, vals = marching_cubes( + # points_sdf_cpu, + # #np.abs(points_sdf_cpu) * ((sign_by_value_cpu>self.cls_logits_threshold)*2-1), + # 0, + # mask=mask.astype(bool)) + + + + #verts_nomask, faces_nomask, _, _ = marching_cubes( + # points_df_pred, + # 0) + + verts_nomask, faces_nomask, _, _ = marching_cubes( + sign_by_value_cpu, + 0., + mask=mask.astype(bool) + ) + + duration = time.time() - start + + + verts_udf, faces_udf, _, _ = marching_cubes( + points_udf_cpu, + 6e-3 + ) + + ''' + verts_udf, faces_udf, _, _ = marching_cubes( + points_udf_cpu, + 0., + mask=mask_gradnorm.astype(bool) + ) + ''' + + #print(verts_nomask.shape) # verts_nomask shape (verts, 3) + centers = np.array((1,1,1)) + spacing = 2./(voxel_resolution-1) + scale = (points.reshape([-1, 3]).max(0) - points.reshape([-1, 3]).min(0))[0] + pts_center = points.reshape([-1, 3]).mean(0) + + + verts_nomask_normalized = (verts_nomask*spacing - centers)*0.5 + + verts_nomask = verts_nomask_normalized*scale + pts_center + + + return verts, faces, verts_nomask, faces_nomask, duration, voxel, verts_udf, faces_udf, voxel_gradnorm#, pos_pts, neg_pts, zero_pts + + + + def generate_point_cloud(self, data, num_steps = 5, num_points = 900000, filter_val = 0.009): + + jittor.gc() + start = time.time() + inputs = data['inputs'] + + + for param in self.model.parameters(): + param.requires_grad = False + + sample_num = 100000 + samples_cpu = np.zeros((0, 3)) + samples = jittor.rand(1, sample_num, 3).float() * 3 - 1.5 + samples.requires_grad = True + + encoding = self.model.encoder(inputs) + + i = 0 + while len(samples_cpu) < num_points: + print('iteration', i) + + for j in range(num_steps): + print('refinement', j) + df_pred = jittor.clamp(self.model.decoder(samples, *encoding)[0], max_v=self.threshold) + + sample_grad = jittor.grad(df_pred.sum(), samples, retain_graph=False) + sample_grad.sync() + #df_pred.sum().backward() + + gradient = sample_grad.detach() + samples = samples.detach() + df_pred = df_pred.detach() + inputs = inputs.detach() + samples = samples - jittor.normalize(gradient, dim=2) * df_pred.reshape(-1, 1) # better use Tensor.copy method? + samples = samples.detach() + samples.requires_grad = True + + + print('finished refinement') + + if not i == 0: + samples_cpu = np.vstack((samples_cpu, samples[df_pred < filter_val].detach().numpy())) + + samples = samples.detach() + df_pred = df_pred.detach() + df_pred.sync() + samples.sync() + samples_tmp = samples[df_pred < 0.03] + samples_tmp.sync() + samples = samples_tmp.unsqueeze(0) + indices = jittor.randint(samples.shape[1], shape=(1, sample_num)) + samples = samples[[[0, ] * sample_num], indices] + samples += (self.threshold / 3) * jittor.randn(samples.shape) # 3 sigma rule + samples = samples.detach() + samples.requires_grad = True + + i += 1 + print(samples_cpu.shape) + + duration = time.time() - start + + return samples_cpu, duration + + + + def load_checkpoint(self, checkpoint): + checkpoints = glob(self.checkpoint_path + '/*') + #print(checkpoints) + if checkpoint is None: + if len(checkpoints) == 0: + print('No checkpoints found at {}'.format(self.checkpoint_path)) + return 0, 0 + + checkpoints = [os.path.splitext(os.path.basename(path))[0].split('_')[-1] for path in checkpoints] + checkpoints = np.array(checkpoints, dtype=float) + checkpoints = np.sort(checkpoints) + + for name in glob(self.checkpoint_path + '/*'): + if str(checkpoints[-1]) in name: + path = self.checkpoint_path + os.path.basename(name) + + #path = self.checkpoint_path + 'checkpoint_{}h:{}m:{}s_{}.tar'.format( + # *[*convertSecs(checkpoints[-1]), checkpoints[-1]]) + else: + path = self.checkpoint_path + '{}.tar'.format(checkpoint) + print('Loaded checkpoint from: {}'.format(path)) + ''' + #torch to jittor + + import torch + check_torch = torch.load(path) + if 'module' in list(check_torch['model_state_dict'].keys())[0]: + self.model.load_state_dict({k[7:]:v for k,v in check_torch['model_state_dict'].items()}) + else: + self.model.load_state_dict(check_torch['model_state_dict']) + epoch = check_torch['epoch'] + training_time = check_torch['training_time'] + del torch + ''' + checkpoint = jittor.load(path) + if 'module' in list(checkpoint['model_state_dict'].keys())[0]: + self.model.load_state_dict({k[7:]:v for k,v in checkpoint['model_state_dict'].items()}) + else: + self.model.load_state_dict(checkpoint['model_state_dict']) + epoch = checkpoint['epoch'] + training_time = checkpoint['training_time'] + + return epoch, training_time + + +def convertMillis(millis): + seconds = int((millis / 1000) % 60) + minutes = int((millis / (1000 * 60)) % 60) + hours = int((millis / (1000 * 60 * 60))) + return hours, minutes, seconds + +def convertSecs(sec): + seconds = int(sec % 60) + minutes = int((sec / 60) % 60) + hours = int((sec / (60 * 60))) + return hours, minutes, seconds diff --git a/contrib/HSDF-Net/models/local_model.py b/contrib/HSDF-Net/models/local_model.py new file mode 100644 index 00000000..baf48712 --- /dev/null +++ b/contrib/HSDF-Net/models/local_model.py @@ -0,0 +1,153 @@ +import jittor +import jittor.nn as nn + +class HSDF(nn.Module): + + def __init__(self, hidden_dim=256): + super(HSDF, self).__init__() + self.conv_in = nn.Conv3d(1, 16, 3, padding=1) # out: 256 ->m.p. 128 + self.conv_0 = nn.Conv3d(16, 32, 3, padding=1) # out: 128 + self.conv_0_1 = nn.Conv3d(32, 32, 3, padding=1) # out: 128 ->m.p. 64 + self.conv_1 = nn.Conv3d(32, 64, 3, padding=1) # out: 64 + self.conv_1_1 = nn.Conv3d(64, 64, 3, padding=1) # out: 64 -> mp 32 + self.conv_2 = nn.Conv3d(64, 128, 3, padding=1) # out: 32 + self.conv_2_1 = nn.Conv3d(128, 128, 3, padding=1) # out: 32 -> mp 16 + self.conv_3 = nn.Conv3d(128, 128, 3, padding=1) # out: 16 + self.conv_3_1 = nn.Conv3d(128, 128, 3, padding=1) # out: 16 -> mp 8 + self.conv_4 = nn.Conv3d(128, 128, 3, padding=1) # out: 8 + self.conv_4_1 = nn.Conv3d(128, 128, 3, padding=1) # out: 8 + + feature_size = (1 + 16 + 32 + 64 + 128 + 128 + 128) * 7 + 3 + self.fc_0 = nn.Conv1d(feature_size, hidden_dim * 2, 1) + self.fc_1 = nn.Conv1d(hidden_dim *2, hidden_dim, 1) + self.fc_2 = nn.Conv1d(hidden_dim , hidden_dim, 1) + self.fc_out = nn.Conv1d(hidden_dim, 1, 1) + + # classification head + self.fc_0_cls = nn.Conv1d(feature_size, hidden_dim * 2, 1) + self.fc_1_cls = nn.Conv1d(hidden_dim *2, hidden_dim, 1) + self.fc_2_cls = nn.Conv1d(hidden_dim , hidden_dim, 1) + self.fc_out_cls = nn.Conv1d(hidden_dim, 1, 1) + + self.actvn = nn.ReLU() + + self.maxpool = nn.MaxPool3d(2) + + self.conv_in_bn = nn.BatchNorm3d(16) + self.conv0_1_bn = nn.BatchNorm3d(32) + self.conv1_1_bn = nn.BatchNorm3d(64) + self.conv2_1_bn = nn.BatchNorm3d(128) + self.conv3_1_bn = nn.BatchNorm3d(128) + self.conv4_1_bn = nn.BatchNorm3d(128) + + # add remez_net + #self.m = 7 + #self.n = 7 + #self.max_mn = self.m if self.m >= self.n else self.n + #self.rat = rational_net(self.max_mn, self.max_mn, feature_size=feature_size) + + + displacment = 0.0722 + displacments = [] + displacments.append([0, 0, 0]) + for x in range(3): + for y in [-1, 1]: + input = [0, 0, 0] + input[x] = y * displacment + displacments.append(input) + + self.displacments = jittor.array(displacments) + + def encoder(self,x): + x = x.unsqueeze(1) + f_0 = x + + net = self.actvn(self.conv_in(x)) + net = self.conv_in_bn(net) + f_1 = net + net = self.maxpool(net) # out 128 + + net = self.actvn(self.conv_0(net)) + net = self.actvn(self.conv_0_1(net)) + net = self.conv0_1_bn(net) + f_2 = net + net = self.maxpool(net) # out 64 + + net = self.actvn(self.conv_1(net)) + net = self.actvn(self.conv_1_1(net)) + net = self.conv1_1_bn(net) + f_3 = net + net = self.maxpool(net) + + net = self.actvn(self.conv_2(net)) + net = self.actvn(self.conv_2_1(net)) + net = self.conv2_1_bn(net) + f_4 = net + net = self.maxpool(net) + + net = self.actvn(self.conv_3(net)) + net = self.actvn(self.conv_3_1(net)) + net = self.conv3_1_bn(net) + f_5 = net + net = self.maxpool(net) + + net = self.actvn(self.conv_4(net)) + net = self.actvn(self.conv_4_1(net)) + net = self.conv4_1_bn(net) + f_6 = net + + return f_0, f_1, f_2, f_3, f_4, f_5, f_6 + + def decoder(self, p, f_0, f_1, f_2, f_3, f_4, f_5, f_6): + + p_features = p.transpose(1, -1) + p = p.unsqueeze(1).unsqueeze(1) + p = jittor.concat([p + d for d in self.displacments], dim=2) + + + # feature extraction + feature_0 = nn.grid_sample(f_0, p, padding_mode='border', align_corners=True) + feature_1 = nn.grid_sample(f_1, p, padding_mode='border', align_corners=True) + feature_2 = nn.grid_sample(f_2, p, padding_mode='border', align_corners=True) + feature_3 = nn.grid_sample(f_3, p, padding_mode='border', align_corners=True) + feature_4 = nn.grid_sample(f_4, p, padding_mode='border', align_corners=True) + feature_5 = nn.grid_sample(f_5, p, padding_mode='border', align_corners=True) + feature_6 = nn.grid_sample(f_6, p, padding_mode='border', align_corners=True) + + + # here every channel corresponds to one feature. + + features = jittor.concat((feature_0, feature_1, feature_2, feature_3, feature_4, feature_5, feature_6), + dim=1) # (B, features, 1,7,sample_num) + shape = features.shape + features = jittor.reshape(features, + (shape[0], shape[1] * shape[3], shape[4])) # (B, featues_per_sample, samples_num) + features = jittor.concat((features, p_features), dim=1) # (B, featue_size, samples_num) + + p_r = None + + net = self.actvn(self.fc_0(features)) + net = self.actvn(self.fc_1(net)) + net = self.actvn(self.fc_2(net)) + net = self.actvn(self.fc_out(net)) + #net = self.fc_out(net) + out = net.squeeze(1) + + # classification head + net_cls = self.actvn(self.fc_0_cls(features)) + net_cls = self.actvn(self.fc_1_cls(net_cls)) + net_cls = self.actvn(self.fc_2_cls(net_cls)) + + # classification task with no actvn + out_cls = self.fc_out_cls(net_cls).squeeze(1) # (B, 1, samples_num) -> (B, samples_num) + + # return occupancy probabilities for the sampled points + #print(out_cls) + #p_r = dist.Bernoulli(logits=out_cls) + + + return out, out_cls + + def execute(self, p, x): + out, p_r = self.decoder(p, *self.encoder(x)) + return out, p_r \ No newline at end of file diff --git a/contrib/HSDF-Net/models/training.py b/contrib/HSDF-Net/models/training.py new file mode 100644 index 00000000..7fdeaeb8 --- /dev/null +++ b/contrib/HSDF-Net/models/training.py @@ -0,0 +1,307 @@ +from __future__ import division +import jittor +import jittor.optim as optim +import jittor.nn as nn + +from trainers.losses.filtering_losses import loss_lap, loss_lap_dsdf +from trainers.losses.eikonal_loss import loss_eikonal, loss_eikonal_dsdf + +from glob import glob +import numpy as np +import time +import os + +from tensorboardX import SummaryWriter + +class Trainer(object): + + def __init__( + self, + model, + #device, + train_dataset, + val_dataset, + exp_name, + optimizer='Adam', + lr = 1e-4, + threshold = 0.1, + cls_threshold=0.2): + + self.model = model + + if optimizer == 'Adam': + self.optimizer = optim.Adam(self.model.parameters(), lr= lr) + if optimizer == 'RMSprop': + self.optimizer = optim.RMSprop(self.model.parameters(), momentum=0.9) + + + self.train_dataset = train_dataset + self.val_dataset = val_dataset + self.exp_path = os.path.dirname(__file__) + '/../experiments/{}/'.format( exp_name) + self.checkpoint_path = self.exp_path + 'checkpoints/'.format( exp_name) + if jittor.rank == 0: + if not os.path.exists(self.checkpoint_path): + print(self.checkpoint_path) + os.makedirs(self.checkpoint_path) + self.writer = SummaryWriter(self.exp_path + 'summary'.format(exp_name)) + self.val_min = None + self.max_dist = threshold + + # compute binary cls threshold of Bernoulli logits + self.cls_logits_threshold = np.log(cls_threshold) - np.log(1. - cls_threshold) + + + def train_step(self,batch): + self.model.train() + self.optimizer.zero_grad() + loss, acc = self.compute_loss(batch) + #loss.backward() + #self.optimizer.step(loss) + print("loss {}".format(loss)) + self.optimizer.backward(loss) + self.optimizer.step() + + # map-reduce loss and acc + reduced_loss, reduced_acc = loss, acc + if jittor.in_mpi: + reduced_loss = loss.mpi_all_reduce()# / jittor.world_size + reduced_acc = acc.mpi_all_reduce()# / jittor.world_size + + return reduced_loss.item(), reduced_acc.item() + + def compute_loss(self,batch): + #device = self.device + + p = batch.get('grid_coords')#.to(device) + + # clamp p to -1,1 + #p = torch.clamp(p, max=1., min=-1.) + + # for computing curvature + #p.requires_grad = True + + df_gt = batch.get('df')#.to(device) #(Batch,num_points) + inputs = batch.get('inputs')#.to(device) + + #print('input max: {}'.format(torch.max(inputs))) + #print('input min: {}'.format(torch.min(inputs))) + + #print('p max: {}'.format(torch.max(p))) + #print('p min: {}'.format(torch.min(p))) + + + df_pred, p_r = self.model(p,inputs) #(Batch,num_points) + + #print('df_pred max: {}'.format(torch.max(df_pred))) + #print('df_pred min: {}'.format(torch.min(df_pred))) + + # have to split abs val traning and sign traning, cauz they're conflicting at open regions + # regression loss + loss_r = nn.L1Loss()( + jittor.clamp(df_pred, max_v=self.max_dist, min_v=0.), + jittor.clamp(jittor.abs(df_gt), max_v=self.max_dist))# out = (B,num_points) by componentwise comparing vecots of size num_samples: + + #loss_r = torch.nn.L1Loss(reduction='none')( + # df_pred, + # torch.abs(df_gt))# out = (B,num_points) by componentwise comparing vecots of size num_samples: + + loss_c = nn.L1Loss()( + jittor.clamp(p_r, max_v=self.max_dist, min_v=-self.max_dist), + jittor.clamp(df_gt, max_v=self.max_dist, min_v=-self.max_dist))# out = (B,num_points) by componentwise comparing vecots of size num_samples: + + + # classification loss + #sign_gt = torch.sign(df_gt) + + #cls_gt = (sign_gt+1)/2 + + #loss_c = torch.nn.CrossEntropyLoss()( + # cls_pred.permute(0,2,1).reshape(-1,2), + # cls_gt.view(-1)) + + # soft label + #cls_gt = cls_gt - sign_gt * 0.5 * torch.exp(-1/torch.abs(df_gt)) + + #loss_c = F.binary_cross_entropy_with_logits( + # p_r.logits, cls_gt, reduction='none'#, weight = 1/torch.exp(df_pred.detach()) + #) + + # introduce curvature loss for sharpening + ''' + loss_lap_scaling = 1. * loss_lap_dsdf( + df_pred, + df_pred, + x = p, + npoints = 5000, + beta = 0., + masking_thr = 50 + ) + + loss_unit_grad_norm = loss_eikonal_dsdf( + y = df_pred, + x = p, + weights = 50. + ) + ''' + + #print('loss_lap_scaling: {}'.format(loss_lap_scaling.sum(-1).mean())) + #print('loss_unit_grad_norm: {}'.format(loss_unit_grad_norm.sum(-1).mean())) + + + # compute accuracy for multi-class classification + + acc = ((p_r>0.).long()==(df_gt>0.).long()).sum().double() / (df_gt.shape[0] * df_gt.shape[1]) + #acc = torch.zeros((1)) + + #print('loss_r: {}'.format(loss_r.sum(-1).mean())) + #print('loss_c: {}'.format(loss_c.sum(-1).mean())) + + loss = loss_r.sum(-1).mean() + loss_c.sum(-1).mean()# + loss_unit_grad_norm.sum(-1).mean() + loss_lap_scaling.sum(-1).mean() # loss_i summed over all #num_samples samples -> out = (B,1) and mean over batch -> out = (1) + + return loss, acc + + def train_model(self, epochs): + loss = 0 + train_data_loader = self.train_dataset + start, training_time = self.load_checkpoint() + iteration_start_time = time.time() + + for epoch in range(start, epochs): + sum_loss = 0 + sum_acc = 0 + if jittor.rank == 0: + print('Start epoch {}'.format(epoch)) + + # shuffle ditributed sampler + #train_data_loader.sampler.set_epoch(epoch) + + print("loader length: {}".format(len(train_data_loader))) + + for idx, batch in enumerate(train_data_loader): + #print('idx: {}'.format(idx)) + + if jittor.rank == 0: + #save model + iteration_duration = time.time() - iteration_start_time + if iteration_duration > 60 * 60: # eve model every X min and at start + #print('{} eval'.format(self.local_rank)) + + training_time += iteration_duration + iteration_start_time = time.time() + + self.save_checkpoint(epoch, training_time) + val_loss, val_acc = self.compute_val_loss() + + if self.val_min is None: + self.val_min = val_loss + + if val_loss < self.val_min: + self.val_min = val_loss + for path in glob(self.exp_path + 'val_min=*'): + os.remove(path) + np.save(self.exp_path + 'val_min={}'.format(epoch), [epoch, val_loss]) + + self.writer.add_scalar('val loss batch avg', val_loss, epoch) + self.writer.add_scalar('val acc batch avg', val_acc, epoch) + + + #optimize model + loss, acc = self.train_step(batch) + if jittor.rank == 0: + print("Current loss: {} acc: {}".format(loss / self.train_dataset.num_sample_points, acc)) + sum_loss += loss + sum_acc += acc + + + + if jittor.rank == 0: + self.writer.add_scalar('training loss last batch', loss, epoch) + self.writer.add_scalar('training loss batch avg', sum_loss / len(train_data_loader), epoch) + self.writer.add_scalar('training acc batch avg', sum_acc / len(train_data_loader), epoch) + + + + + def save_checkpoint(self, epoch, training_time): + path = self.checkpoint_path + 'checkpoint_{}h_{}m_{}s_{}.tar'.format(*[*convertSecs(training_time),training_time]) + if not os.path.exists(path) and jittor.rank == 0: + jittor.save({ #'state': torch.cuda.get_rng_state_all(), + 'training_time': training_time ,'epoch':epoch, + 'model_state_dict': self.model.state_dict(), + 'optimizer_state_dict': self.optimizer.state_dict()}, path) + + + + def load_checkpoint(self): + checkpoints = glob(self.checkpoint_path+'/*') + if len(checkpoints) == 0: + print('No checkpoints found at {}'.format(self.checkpoint_path)) + return 0,0 + + checkpoints = [os.path.splitext(os.path.basename(path))[0].split('_')[-1] for path in checkpoints] + checkpoints = np.array(checkpoints, dtype=float) + checkpoints = np.sort(checkpoints) + + for name in glob(self.checkpoint_path + '/*'): + if str(checkpoints[-1]) in name: + path = self.checkpoint_path + os.path.basename(name) + #path = self.checkpoint_path + 'checkpoint_{}h:{}m:{}s_{}.tar'.format(*[*convertSecs(checkpoints[-1]),checkpoints[-1]]) + + print('Loaded checkpoint from: {}'.format(path)) + checkpoint = jittor.load(path) + + # load partially + #print(checkpoint['optimizer_state_dict']['param_groups']) + #exit() + #print(self.optimizer.state_dict()['param_groups']) + #exit() + + if 'module' in list(checkpoint['model_state_dict'].keys())[0]: + self.model.load_state_dict(checkpoint['model_state_dict']) + else: + print('load from without apex') + self.model.load_state_dict({'module.'+k:v for k,v in checkpoint['model_state_dict'].items()}) + + epoch = 0 + training_time = 0 + + try: + self.optimizer.load_state_dict(checkpoint['optimizer_state_dict']) + #amp.load_state_dict(checkpoint['amp_state_dict']) + epoch = checkpoint['epoch'] + training_time = checkpoint['training_time'] + except: + print('find pretrained weights, epoch reduce to 0') + # torch.cuda.set_rng_state_all(checkpoint['state']) # batch order is restored. unfortunately doesn't work like that. + return epoch, training_time + + def compute_val_loss(self): + self.model.eval() + + sum_val_loss = 0 + sum_val_acc = 0 + num_batches = 15 + for _ in range(num_batches): + try: + val_batch = self.val_data_iterator.next() + except: + self.val_data_iterator = self.val_dataset.__iter__() + val_batch = self.val_data_iterator.__next__() + + #val_loss, val_acc = self.compute_loss( val_batch) + sum_val_loss += self.compute_loss( val_batch)[0].data.item() + sum_val_acc += self.compute_loss( val_batch)[1].data.item() + + return sum_val_loss / num_batches, sum_val_acc / num_batches + +def convertMillis(millis): + seconds = int((millis / 1000) % 60) + minutes = int((millis / (1000 * 60)) % 60) + hours = int((millis / (1000 * 60 * 60))) + return hours, minutes, seconds + +def convertSecs(sec): + seconds = int(sec % 60) + minutes = int((sec / 60) % 60) + hours = int((sec / (60 * 60))) + return hours, minutes, seconds diff --git a/contrib/HSDF-Net/ndf_postprocess.mlx b/contrib/HSDF-Net/ndf_postprocess.mlx new file mode 100644 index 00000000..a76c4e8a --- /dev/null +++ b/contrib/HSDF-Net/ndf_postprocess.mlx @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/contrib/HSDF-Net/renderer.py b/contrib/HSDF-Net/renderer.py new file mode 100644 index 00000000..932f89b9 --- /dev/null +++ b/contrib/HSDF-Net/renderer.py @@ -0,0 +1,386 @@ +import math +import numpy as np +import trimesh +import cv2 +import os + +import configs.config_loader as cfg_loader + +import NDF_combine as NDF + +def ray_trace_mesh(trimesh_mesh, xyz_world, rays, batch_size=20004): + intersector = trimesh.ray.ray_triangle.RayMeshIntersector(trimesh_mesh) + all_loc_intersect = [] + all_ray_index = [] + all_face_index = [] + + for x in range(0, rays.shape[0], batch_size): + loc_intersect, ray_index, face_index = \ + intersector.intersects_location(xyz_world[x:x+batch_size], rays[x:x+batch_size]) + + if len(ray_index) != 0: + ray_index = batch_size * int(x / batch_size) + ray_index + all_loc_intersect.append(loc_intersect) + all_ray_index.append(ray_index) + all_face_index.append(face_index) + + if rays.shape[0] % batch_size: + x = batch_size * int(rays.shape[0] / batch_size) + loc_intersect, ray_index, face_index = \ + intersector.intersects_location(xyz_world[x:], rays[x:]) + if len(ray_index) != 0: + ray_index = x + ray_index + all_loc_intersect.append(loc_intersect) + all_ray_index.append(ray_index) + all_face_index.append(face_index) + + dists = np.ones(rays.shape[0]) * 100 + normals = np.ones(rays.shape) * 0.1 + + try: + all_loc_intersect = np.concatenate(all_loc_intersect, 0) + all_face_index = np.concatenate(all_face_index, 0) + all_ray_index = np.concatenate(all_ray_index, 0) + + for ct, xx in enumerate(all_ray_index): + dist = np.sqrt(np.sum((xyz_world[xx] - all_loc_intersect[ct])**2, 0)) + + if dist < dists[xx]: + dists[xx] = dist + yy = all_face_index[ct] + normals_ = trimesh_mesh.face_normals[yy] + #reverse = np.sum(normals_ * rays[xx]) > 0 + normals__ = -normals_ + #if reverse: + # normals__ = -normals_ + normals[xx] = normals__ + except: + dists[dists==100] = -1 + return normals, dists, xyz_world + + + return normals, dists, all_loc_intersect + + +def str2bool(inp): + return inp.lower() in 'true' + +class Renderer(): + def __init__(self): + self.get_args() + self.create_plane_points_from_bounds() + self.define_screen_points() + self.define_unit_rays() + + def get_args(self): + """ + :return: + """ + self.args = cfg_loader.get_config() + + # print(self.args.cam_position) + # print(self.args.cam_orientation) + os.makedirs(self.args.folder, exist_ok=True) + + def create_plane_points_from_bounds(self): + """ + Creates a plane of points which acts as the screen for rendering + """ + # create an xy plane + x = np.linspace(-self.args.screen_bound, self.args.screen_bound, self.args.size) + y = np.linspace(-self.args.screen_bound, self.args.screen_bound, self.args.size) + X, Y = np.meshgrid(x, y, indexing='ij') + X = X.reshape((np.prod(X.shape),)) + Y = Y.reshape((np.prod(Y.shape),)) + + # append the third dimension coordinate to the xy plane + points_list = np.column_stack((X, Y)) + points_list = np.insert(points_list, 2, self.args.screen_depth, axis=1) + self.points_list = points_list + + def to_rotation_matrix(self): + """ + Creates rotation matrix from the input euler angles + """ + euler_angles = np.array(self.args.cam_orientation) + R_x = np.array([[1, 0, 0], + [0, math.cos(math.radians(euler_angles[0])), -math.sin(math.radians(euler_angles[0]))], + [0, math.sin(math.radians(euler_angles[0])), math.cos(math.radians(euler_angles[0]))] + ]) + + R_y = np.array([[math.cos(math.radians(euler_angles[1])), 0, math.sin(math.radians(euler_angles[1]))], + [0, 1, 0], + [-math.sin(math.radians(euler_angles[1])), 0, math.cos(math.radians(euler_angles[1]))] + ]) + + R_z = np.array([[math.cos(math.radians(euler_angles[2])), -math.sin(math.radians(euler_angles[2])), 0], + [math.sin(math.radians(euler_angles[2])), math.cos(math.radians(euler_angles[2])), 0], + [0, 0, 1] + ]) + + R = np.dot(R_z, np.dot(R_y, R_x)) + + self.rot_matrix = R + + def to_transf_matrix(self): + """ + Creates a transformation matrix from rotation matrix and translation vector + """ + self.to_rotation_matrix() + + temp_trans = np.array([0, 0, 0]) + temp_trans = np.reshape(temp_trans, (1, 3)) + rot = np.concatenate((self.rot_matrix, temp_trans), axis=0) + rot = np.concatenate((rot, np.reshape(np.array([0, 0, 0, 1]), (4, 1))), axis=1) + + inp_trans = np.reshape(self.args.cam_position, (3,)) + inp_trans = np.concatenate((inp_trans, [1]), axis=0) + + rot[:, 3] = inp_trans + + self.trans_mat = rot + + def append_one(self, arr): + """ + :param arr: + :return: + """ + append = np.ones(arr.shape[0]) + append = np.reshape(append, (append.shape[0], 1)) + new_arr = np.concatenate((arr, append), axis=1) + return new_arr + + def define_screen_points(self): + """ + Transforms the screen points and camera position using the camera translation and orientation information provided by the user + """ + self.create_plane_points_from_bounds() + self.to_transf_matrix() + + cam_loc = np.array([0, 0, 0]) + screen_and_cam = np.vstack((cam_loc, self.points_list)) + screen_and_cam_hom = self.append_one(screen_and_cam) + + # 4 X SIZE^2 + screen_and_cam_hom_T = np.transpose(screen_and_cam_hom, (1, 0)) + screen_and_cam_hom_T_transformed = np.matmul(self.trans_mat, screen_and_cam_hom_T) + + # SIZE^2 X 4 + screen_and_cam_hom_transformed = np.transpose(screen_and_cam_hom_T_transformed, (1, 0)) + + # SIZE^2 X 3 + self.screen_and_cam_transformed = screen_and_cam_hom_transformed[:, :3] + + if self.args.debug_mode: + trimesh.Trimesh(vertices=self.screen_and_cam_transformed, faces=[]).export('setup_camera_rot.off') + + def define_unit_rays(self): + """ + Defines rays from camera to the screen along which + """ + # Separate screen points and camera point + points = self.screen_and_cam_transformed[1:, :] + self.cam_trans = np.reshape(self.screen_and_cam_transformed[0, :], (1, 3)) + + # Define ray paths from camera + ray_vector = (points - self.cam_trans) + + # Normalize ray vectors + norm_ray = np.linalg.norm(ray_vector, ord=2, axis=1) + norm_ray = np.reshape(norm_ray, (self.args.size * self.args.size, 1)) + + self.unit_rays = ray_vector / norm_ray + + def get_lgth_rays(self): + """ + :return: + """ + src_batch = np.repeat([self.args.light_position], self.args.size * self.args.size, axis=0) + rays = src_batch - self.final_points + norm_ray = np.linalg.norm(rays, ord=2, axis=1) + norm_ray = np.reshape(norm_ray, (self.args.size * self.args.size, 1)) + + self.ray_to_src = rays / norm_ray + + def run(self): + """ + Runs the ray marching algorithm + """ + print(self.args) + path = NDF.loadNDF( + mode = 'test', index = self.args.index, + pointcloud_samples = self.args.pc_samples, + exp_name = self.args.exp_name, data_dir = self.args.data_dir, + split_file = self.args.split_file, sample_distribution = self.args.sample_ratio, + sample_sigmas = self.args.sample_std_dev, res = self.args.input_res + ) + + depth = np.zeros((self.args.size * self.args.size, 1)) + depth_gt = np.zeros((self.args.size * self.args.size, 1)) + + cam_batch = np.repeat(self.cam_trans, self.args.size * self.args.size, axis=0) + points = cam_batch.copy() + points_gt = cam_batch.copy() + + iter = 1 + + ray = self.unit_rays.copy() + ray_gt = self.unit_rays.copy() + + indices_cont_all = list(range(self.args.size * self.args.size)) + indices_cont_all_gt = list(range(self.args.size * self.args.size)) + + trimesh_mesh = trimesh.load(os.path.join(path, 'model_scaled.off')) + # compute gt surface points + + #dists_gt = NDF.predictGtNDF(points, trimesh_mesh) + #points_gt = points + ray * np.expand_dims(dists_gt, axis=-1) + + ''' + while len(indices_cont_all) > 0: + + print('Iter:', iter) + dists_points_gt = NDF.predictGtNDF(points_gt, trimesh_mesh) + dists_points_gt = np.reshape(dists_points_gt, (self.args.size * self.args.size, 1)) + + indices_stop_gt = np.where(dists_points_gt < self.args.epsilon)[0] + indices_stop2_gt = np.where(depth_gt > self.args.max_depth)[0] + indices_stop_all_gt = list(set(indices_stop_gt).union(set(indices_stop2_gt))) + # print(len(indices_stop_all)) + + ray_gt[indices_stop_all_gt] = 0 + setA = set(range(self.args.size * self.args.size)) + setB = set(indices_stop_all_gt) + indices_cont_all_gt = list(setA.difference(setB)) + + + # print(len(indices_cont_all)) + depth[indices_cont_all_gt] = depth[indices_cont_all_gt] + self.args.alpha * dists_points_gt[indices_cont_all_gt] + points_gt = points_gt + (ray_gt * (self.args.alpha * dists_points_gt)) + iter = iter + 1 + + iter = 1 + ''' + + #print(points.shape) + + + while len(indices_cont_all) > 0: + + print('Iter:', iter) + dists_points = NDF.predictRotNDF(points) + dists_points = np.reshape(dists_points, (self.args.size * self.args.size, 1)) + + indices_stop = np.where(dists_points < self.args.epsilon)[0] + indices_stop2 = np.where(depth > self.args.max_depth)[0] + indices_stop_all = list(set(indices_stop).union(set(indices_stop2))) + # print(len(indices_stop_all)) + + ray[indices_stop_all] = 0 + setA = set(range(self.args.size * self.args.size)) + setB = set(indices_stop_all) + indices_cont_all = list(setA.difference(setB)) + + + # print(len(indices_cont_all)) + depth[indices_cont_all] = depth[indices_cont_all] + self.args.alpha * dists_points[indices_cont_all] + points = points + (ray * (self.args.alpha * dists_points)) + iter = iter + 1 + + #print(np.max(dists_points)) + #print(points.shape) + #print(np.max(depth)) + #print(np.min(depth)) + #print(len(depth>self.args.max_depth)) + + points = points - (self.unit_rays * self.args.step_back) + + self.final_points = points.copy() + + ## NORMALS + self.depth_np = depth.copy() + self.depth_np[self.depth_np > self.args.max_depth] = self.args.max_depth + + dists, gradients = NDF.predictRotGradientNDF(points) + + #dist_gt, gradients_gt = NDF.predictGtGradientNDF(points_gt, trimesh_mesh) + vertices = trimesh_mesh.vertices.copy() + trimesh_mesh.vertices[:,1], trimesh_mesh.vertices[:,2] = vertices[:,2], vertices[:,1] + gradients_gt, depth_gt, all_loc_intersect = ray_trace_mesh(trimesh_mesh, points_gt, ray_gt) + + self.depth_gt = depth_gt.copy() + self.depth_gt[self.depth_gt > self.args.max_depth] = self.args.max_depth + + self.final_gradients = gradients.copy() + self.normals = np.reshape(gradients, (self.args.size * self.args.size, 3)) + + self.final_gradients_gt = gradients_gt.copy() + self.normals_gt = np.reshape(gradients_gt, (self.args.size * self.args.size, 3)) + + + def save(self, image, name, size, normalize): + """ + :param image: Input image as np array + :param name: Name of file to be stored + :param size: Size of the image + :param normalize: whether to normalize all values to 0-1 + Saves individual images + """ + if normalize: + image = (image + 1)/2 + image = np.reshape(image, (self.args.size, self.args.size, size)) + + image = cv2.transpose(image) + image = cv2.flip(image, 0) + image = image[90:610, :] + + cv2.imwrite(os.path.join(self.args.folder, name), np.uint8(255 * image)) + + def save_images(self): + """ + Saves Images after completion of the rendering algorithm + """ + shade = np.sum(np.multiply(-self.unit_rays, self.normals), axis=1) + shade = np.reshape(shade, (shade.shape[0], 1)) + + shade[self.depth_np == self.args.max_depth] = 1 + self.save(shade, 'shade.jpg', 1, True) + + # SHADE WITH LIGhT SOURCE + if self.args.shade: + self.get_lgth_rays() + shd_lgth = np.sum(np.multiply(self.ray_to_src, self.normals), axis=1) + shd_lgth = np.reshape(shd_lgth, (shd_lgth.shape[0], 1)) + shd_lgth[self.depth_np == self.args.max_depth ] = 1 + self.save(shd_lgth, 'shade_src.jpg', 1, True) + + if self.args.normal: + RGB_normals = self.final_gradients.copy() + inds = (self.depth_np == self.args.max_depth) + for j in range(3): + new_arr = np.reshape(RGB_normals[:, j], (self.args.size * self.args.size, 1)) + new_arr[inds] = 1 + + black_pixels_mask = np.all(RGB_normals == [0, 0, 0], axis=-1) + RGB_normals[black_pixels_mask] = np.array([1, 1, 1]) + self.save(RGB_normals, 'normals.jpg', 3, True) + + # save gt mesh normal map + RGB_normals_gt = self.final_gradients_gt.copy() + inds = (self.depth_gt == self.args.max_depth) + for j in range(3): + new_arr = np.reshape(RGB_normals_gt[:, j], (self.args.size * self.args.size, 1)) + new_arr[inds] = 1 + + black_pixels_mask_gt = np.all(RGB_normals_gt == [0, 0, 0], axis=-1) + RGB_normals_gt[black_pixels_mask_gt] = np.array([1, 1, 1]) + self.save(RGB_normals_gt, 'normals_gt.jpg', 3, True) + + if self.args.depth: + depth_normalized = np.copy(self.depth_np / self.args.max_depth) + self.save(depth_normalized, 'depth_final.jpg', 1, False) + +if __name__ == "__main__": + renderer = Renderer() + renderer.run() + renderer.save_images() \ No newline at end of file diff --git a/contrib/HSDF-Net/slurm_scripts/out/.gitkeep b/contrib/HSDF-Net/slurm_scripts/out/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/contrib/HSDF-Net/slurm_scripts/run_preprocessing.sh b/contrib/HSDF-Net/slurm_scripts/run_preprocessing.sh new file mode 100644 index 00000000..fc99ffc5 --- /dev/null +++ b/contrib/HSDF-Net/slurm_scripts/run_preprocessing.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +#SBATCH -p cpu20 +#SBATCH -o ./slurm_scripts/out/%j.out +#SBATCH -e ./slurm_scripts/out/%j.err +#SBATCH -t 1:00:00 +#SBATCH -a 0-199%30 +#SBATCH -c 32 + + +python dataprocessing/preprocess.py --num_cpus 32 --num_chunks 200 --current_chunk $SLURM_ARRAY_TASK_ID diff --git a/contrib/HSDF-Net/teaser.jpg b/contrib/HSDF-Net/teaser.jpg new file mode 100644 index 00000000..5f5e0a28 Binary files /dev/null and b/contrib/HSDF-Net/teaser.jpg differ diff --git a/contrib/HSDF-Net/train.py b/contrib/HSDF-Net/train.py new file mode 100644 index 00000000..06b09364 --- /dev/null +++ b/contrib/HSDF-Net/train.py @@ -0,0 +1,57 @@ +import models.local_model as model +import models.data.voxelized_data_shapenet as voxelized_data +from models import training +import jittor +import configs.config_loader as cfg_loader +import os + +jittor.flags.use_cuda=1 + +cfg = cfg_loader.get_config() + +# # configure apex +# torch.cuda.set_device(cfg.local_rank) +# torch.distributed.init_process_group( +# 'nccl', +# init_method='env://' +# ) + +net = model.HSDF() + +print("local rank: {}".format(cfg.local_rank)) + +train_dataset = voxelized_data.VoxelizedDataset('train', + res=cfg.input_res, + pointcloud_samples=cfg.num_points, + data_path=cfg.data_dir, + split_file=cfg.split_file, + batch_size=cfg.batch_size, + num_sample_points=cfg.num_sample_points_training, + num_workers=2, + sample_distribution=cfg.sample_ratio, + sample_sigmas=cfg.sample_std_dev) +val_dataset = voxelized_data.VoxelizedDataset('val', + res=cfg.input_res, + pointcloud_samples=cfg.num_points, + data_path=cfg.data_dir, + split_file=cfg.split_file, + batch_size=cfg.batch_size, + num_sample_points=cfg.num_sample_points_training, + num_workers=2, + sample_distribution=cfg.sample_ratio, + sample_sigmas=cfg.sample_std_dev) + +# # debug for NaN +# torch.autograd.set_detect_anomaly(True) + +trainer = training.Trainer(net, + #torch.device('cuda:{}'.format(cfg.local_rank)), + train_dataset, + val_dataset, + cfg.exp_name, + optimizer=cfg.optimizer, + lr=cfg.lr, + # local_rank=cfg.local_rank, + cls_threshold=cfg.threshold) + +trainer.train_model(cfg.num_epochs) diff --git a/contrib/HSDF-Net/trainers/base_trainer.py b/contrib/HSDF-Net/trainers/base_trainer.py new file mode 100644 index 00000000..e58ce607 --- /dev/null +++ b/contrib/HSDF-Net/trainers/base_trainer.py @@ -0,0 +1,37 @@ + +class BaseTrainer(): + + def __init__(self, cfg, args): + pass + + def update(self, data, *args, **kwargs): + raise NotImplementedError("Trainer [update] not implemented.") + + def epoch_end(self, epoch, writer=None, **kwargs): + # Signal now that the epoch ends.... + pass + + def multi_gpu_wrapper(self, wrapper): + raise NotImplementedError("Trainer [multi_gpu_wrapper] not implemented.") + + def log_train(self, train_info, train_data, + writer=None, step=None, epoch=None, visualize=False, + **kwargs): + raise NotImplementedError("Trainer [log_train] not implemented.") + + def validate(self, test_loader, epoch, *args, **kwargs): + raise NotImplementedError("Trainer [validate] not implemented.") + + def log_val(self, val_info, writer=None, step=None, epoch=None, **kwargs): + if writer is not None: + for k, v in val_info.items(): + if step is not None: + writer.add_scalar(k, v, step) + else: + writer.add_scalar(k, v, epoch) + + def save(self, epoch=None, step=None, appendix=None, **kwargs): + raise NotImplementedError("Trainer [save] not implemented.") + + def resume(self, path, strict=True, **kwargs): + raise NotImplementedError("Trainer [resume] not implemented.") diff --git a/contrib/HSDF-Net/trainers/implicit_deform.py b/contrib/HSDF-Net/trainers/implicit_deform.py new file mode 100644 index 00000000..c9fec817 --- /dev/null +++ b/contrib/HSDF-Net/trainers/implicit_deform.py @@ -0,0 +1,317 @@ +import os +#import torch +import jittor +import importlib +import os.path as osp +from argparse import Namespace +#import torch.nn.functional as F +import jittor.nn as F +from trainers.base_trainer import BaseTrainer +from trainers.utils.utils import set_random_seed +from trainers.utils.igp_utils import sample_points +from trainers.losses.eikonal_loss import loss_eikonal +from models.igp_wrapper import distillation, deformation +from trainers.losses.implicit_thin_shell_losses import \ + stretch_loss, bending_loss + + +def deform_step( + net, opt, original, handles_ts, targets_ts, dim=3, + # Clip gradient + grad_clip=None, + # Sample points + sample_cfg=None, x=None, weights=1, + # Loss handle + loss_h_weight=1., use_l1_loss=False, loss_h_thr=None, + # Loss G + loss_g_weight=1e-2, n_g_pts=5000, + # Loss bending + loss_hess_weight=0., n_hess_pts=5000, hess_use_surf_points=True, + hess_invert_sample=True, hess_detach_weight=True, hess_use_rejection=False, + # Loss stretch + loss_stretch_weight=0., n_s_pts=5000, stretch_use_surf_points=True, + stretch_invert_sample=True, stretch_loss_type='area_length', + stretch_use_weight=False, stretch_detach_weight=True, + stretch_use_rejection=False, +): + opt.zero_grad() + + # Compute handle losses + # x + handles_ts = handles_ts.clone().detach().float().cuda() + # y + targets_ts = targets_ts.clone().detach().float().cuda() + constr = ( + net(targets_ts, return_delta=True)[0] + targets_ts - handles_ts + ).view(-1, dim).norm(dim=-1, keepdim=False) + if loss_h_thr is not None: + loss_h_thr = float(loss_h_thr) + constr = F.relu(constr - loss_h_thr) + if use_l1_loss: + loss_h = F.l1_loss( + constr, jittor.zeros_like(constr)) * loss_h_weight + else: + loss_h = F.mse_loss( + constr, jittor.zeros_like(constr)) * loss_h_weight + + if sample_cfg is not None and x is None: + x, weights = sample_points( + npoints=getattr(sample_cfg, "num_points", 5000), + dim=dim, inp_nf=original, out_nf=net, deform=net.deform, + sample_surf_points=getattr(sample_cfg, "use_surf_points", True), + invert_sampling=getattr(sample_cfg, "invert_sample", True), + detach_weight=getattr(sample_cfg, "detach_weight", True), + use_rejection=getattr(sample_cfg, "use_rejection", False) + ) + + if loss_g_weight > 0.: + loss_g = loss_eikonal(net, npoints=n_g_pts, dim=dim, x=x) * loss_g_weight + else: + loss_g = jittor.zeros(1).cuda().float() + + if loss_hess_weight > 0.: + loss_hess = bending_loss( + inp_nf=original, out_nf=net, deform=net.deform, + dim=dim, npoints=n_hess_pts, + use_surf_points=hess_use_surf_points, + invert_sampling=hess_invert_sample, + x=x, weights=weights, + detach_weight=hess_detach_weight, + use_rejection=hess_use_rejection, + ) + loss_hess *= loss_hess_weight + else: + loss_hess = jittor.zeros(1).cuda().float() + + if loss_stretch_weight > 0.: + loss_stretch = stretch_loss( + inp_nf=original, out_nf=net, deform=net.deform, + npoints=n_s_pts, dim=dim, + use_surf_points=stretch_use_surf_points, + invert_sampling=stretch_invert_sample, + loss_type=stretch_loss_type, + x=x, weights=weights, + detach_weight=stretch_detach_weight, + use_rejection=stretch_use_rejection, + ) + loss_stretch *= loss_stretch_weight + else: + loss_stretch = jittor.zeros(1).cuda().float() + + loss = loss_h + loss_g + loss_hess + loss_stretch + opt.backward(loss) + if grad_clip is not None: + opt.clip_grad_norm(net.deform.parameters(), grad_clip) + + opt.step() + + return { + 'loss': loss.detach().cpu().item(), + 'loss_h': loss_h.detach().cpu().item(), + # Repairing + 'loss_g': loss_g.detach().cpu().item(), + # Shell energy + 'loss_hess': loss_hess.detach().cpu().item(), + 'loss_stretch': loss_stretch.detach().cpu().item() + } + + +class Trainer(BaseTrainer): + + def __init__(self, cfg, args, original_decoder=None): + super().__init__(cfg, args) + self.cfg = cfg + self.args = args + set_random_seed(getattr(self.cfg.trainer, "seed", 666)) + + # The networks + # TODO: add recursive loading of trainers. + if original_decoder is None: + sn_lib = importlib.import_module(cfg.models.decoder.type) + self.original_net = sn_lib.Net(cfg, cfg.models.decoder) + self.original_net.cuda() + self.original_net.load_state_dict( + jittor.load(cfg.models.decoder.path)['net']) + print("Original Decoder:") + print(self.original_net) + else: + self.original_net = original_decoder + + # Get the wrapper for the operation + self.wrapper_type = getattr( + cfg.trainer, "wrapper_type", "distillation") + if self.wrapper_type in ['distillation']: + self.net, self.opt, self.sch = distillation( + cfg, self.original_net, + reload=getattr(self.cfg.trainer, "reload_decoder", True)) + elif self.wrapper_type in ['deformation']: + self.net, self.opt, self.sch = deformation( + cfg, self.original_net) + else: + raise ValueError("wrapper_type:", self.wrapper_type) + + # Prepare save directory + os.makedirs(osp.join(cfg.save_dir, "images"), exist_ok=True) + os.makedirs(osp.join(cfg.save_dir, "checkpoints"), exist_ok=True) + os.makedirs(osp.join(cfg.save_dir, "val"), exist_ok=True) + os.makedirs(osp.join(cfg.save_dir, "vis"), exist_ok=True) + + # Set-up counter + self.num_update_step = 0 + self.boundary_points = None + + # Set up basic parameters + self.dim = getattr(cfg.trainer, "dim", 3) + self.grad_clip = getattr(cfg.trainer, "grad_clip", None) + self.loss_h_weight = getattr(cfg.trainer, "loss_h_weight", 100) + self.loss_h_thr = getattr(cfg.trainer, "loss_h_thr", 1e-3) + + if hasattr(cfg.trainer, "loss_g"): + self.loss_g_cfg = cfg.trainer.loss_g + else: + self.loss_g_cfg = Namespace(**{}) + + if hasattr(cfg.trainer, "loss_bend"): + self.loss_bend_cfg = cfg.trainer.loss_bend + else: + self.loss_bend_cfg = Namespace(**{}) + + if hasattr(cfg.trainer, "loss_stretch"): + self.loss_stretch_cfg = cfg.trainer.loss_stretch + else: + self.loss_stretch_cfg = Namespace() + + if hasattr(cfg.trainer, "sample_cfg"): + self.sample_cfg = cfg.trainer.sample_cfg + else: + self.sample_cfg = None + + self.show_network_hist = getattr( + cfg.trainer, "show_network_hist", False) + + def update(self, data, *args, **kwargs): + self.num_update_step += 1 + handles_ts = data['handles'].cuda().float() + targets_ts = data['targets'].cuda().float() + if 'x' in data and 'weights' in data: + x_ts = data['x'].cuda().float() + w_ts = data['weights'].cuda().float() + else: + x_ts = None + w_ts = 1. + + loss_g_weight = float(getattr(self.loss_g_cfg, "weight", 1e-3)) + loss_hess_weight = float(getattr(self.loss_bend_cfg, "weight", 0.)) + loss_stretch_weight = float( + getattr(self.loss_stretch_cfg, "weight", 0)) + step_res = deform_step( + self.net, self.opt, self.original_net, + handles_ts, targets_ts, dim=self.dim, + x=x_ts, weights=w_ts, + sample_cfg=self.sample_cfg, + # Loss handle + loss_h_weight=self.loss_h_weight, + loss_h_thr=self.loss_h_thr, + # Loss G + loss_g_weight=loss_g_weight, + n_g_pts=getattr(self.loss_g_cfg, "num_points", 5000), + + # Loss Hessian + loss_hess_weight=loss_hess_weight, + n_hess_pts=getattr(self.loss_bend_cfg, "num_points", 5000), + hess_use_surf_points=getattr( + self.loss_bend_cfg, "use_surf_points", True), + hess_invert_sample=getattr( + self.loss_bend_cfg, "invert_sample", True), + hess_detach_weight=getattr( + self.loss_bend_cfg, "detach_weight", True), + hess_use_rejection=getattr( + self.loss_bend_cfg, "use_rejection", True), + + # Loss stretch + loss_stretch_weight=loss_stretch_weight, + n_s_pts=getattr(self.loss_stretch_cfg, "num_points", 5000), + stretch_use_surf_points=getattr( + self.loss_stretch_cfg, "use_surf_points", True), + stretch_invert_sample=getattr( + self.loss_stretch_cfg, "invert_sample", True), + stretch_loss_type=getattr( + self.loss_stretch_cfg, "loss_type", "l2"), + stretch_use_weight=getattr( + self.loss_stretch_cfg, "use_weight", True), + stretch_detach_weight=getattr( + self.loss_stretch_cfg, "detach_weight", True), + stretch_use_rejection=getattr( + self.loss_stretch_cfg, "use_rejection", True), + + # Gradient clipping + grad_clip=self.grad_clip, + ) + step_res = { + ('scalar/loss/%s' % k): v for k, v in step_res.items() + } + step_res['loss'] = step_res['scalar/loss/loss'] + step_res.update({ + "scalar/weight/loss_h_weight": self.loss_h_weight, + 'scalar/weight/loss_hess_weight': loss_hess_weight, + 'scalar/weight/loss_stretch_weight': loss_stretch_weight, + }) + return step_res + + def log_train(self, train_info, train_data, writer=None, + step=None, epoch=None, visualize=False, **kwargs): + if writer is None: + return + writer_step = step if step is not None else epoch + + # Log training information to tensorboard + train_info = {k: (v.cpu() if not isinstance(v, float) else v) + for k, v in train_info.items()} + for k, v in train_info.items(): + ktype = k.split("/")[0] + kstr = "/".join(k.split("/")[1:]) + if ktype == 'scalar': + writer.add_scalar(kstr, v, writer_step) + + if self.show_network_hist: + for name, p in self.net.named_parameters(): + writer.add_histogram("dec/%s" % name, p, writer_step) + for name, p in self.original_net.named_parameters(): + writer.add_histogram("orig_dec/%s" % name, p, writer_step) + + def validate(self, test_loader, epoch, *args, **kwargs): + # TODO: compute mesh and compute the manifold harmonics to + # see if the high frequencies signals are dimed/suppressed + return {} + + def save(self, epoch=None, step=None, appendix=None, **kwargs): + d = { + 'dec': self.original_net.state_dict(), + 'net_opt_dec': self.opt.state_dict(), + 'next_dec': self.net.state_dict(), + 'epoch': epoch, + 'step': step + } + if appendix is not None: + d.update(appendix) + save_name = "epoch_%s_iters_%s.pt" % (epoch, step) + jittor.save(d, osp.join(self.cfg.save_dir, "checkpoints", save_name)) + jittor.save(d, osp.join(self.cfg.save_dir, "latest.pt")) + + def resume(self, path, strict=True, **kwargs): + ckpt = jittor.load(path) + self.original_net.load_state_dict(ckpt['dec'], strict=strict) + self.net.load_state_dict(ckpt['next_dec'], strict=strict) + self.opt.load_state_dict(ckpt['net_opt_dec']) + start_epoch = ckpt['epoch'] + return start_epoch + + def multi_gpu_wrapper(self, wrapper): + self.net = wrapper(self.net) + + def epoch_end(self, epoch, writer=None, **kwargs): + if self.sch is not None: + self.sch.step(epoch=epoch) + if writer is not None: + writer.add_scalar( + 'lr/opt_dec_lr_sch', self.sch.get_lr()[0], epoch) diff --git a/contrib/HSDF-Net/trainers/implicit_deform_2D.py b/contrib/HSDF-Net/trainers/implicit_deform_2D.py new file mode 100644 index 00000000..a04ac07d --- /dev/null +++ b/contrib/HSDF-Net/trainers/implicit_deform_2D.py @@ -0,0 +1,184 @@ +#import torch +import jittor +import numpy as np +from trainers.utils.vis_utils import imf2img, make_2d_grid +from trainers.utils.igp_losses import get_surf_pcl +from trainers.implicit_deform import Trainer as BaseTrainer +from argparse import Namespace +import matplotlib.pyplot as plt + + +try: + from evaluation.evaluation_metrics import EMD_CD + eval_reconstruciton = True +except: # noqa + # Skip evaluation + eval_reconstruciton = False + + +class Trainer(BaseTrainer): + + def __init__(self, cfg, args, original_decoder=None): + super().__init__(cfg, args, original_decoder=original_decoder) + self.dim = 2 + self.vis_cfg = getattr(self.cfg.trainer, "vis", Namespace()) + + def visualize( + self, train_data, train_info, + writer=None, step=None, epoch=None, **kwargs): + figsize = getattr(self.vis_cfg, "figsize", 5) + res = getattr(self.vis_cfg, "res", 100) + handle_np = ( + (train_data['handles'] + 1.) * float(res) * 0.5 + ).detach().cpu().numpy().reshape(-1, 2) + new_handle_np = ( + (train_data['targets'] + 1) * float(res) * 0.5 + ).detach().cpu().numpy().reshape(-1, 2) + orig_img = imf2img( + lambda x: self.original_net(x, None), res=res + ).reshape(res, res) + img = imf2img( + lambda x: self.net(x, None), res=res).reshape(res, res) + + qres = getattr(self.vis_cfg, "qres", 15) + orig_loc = make_2d_grid(qres).view(qres * qres, 2).detach().numpy() + dfm= self.net( + jittor.Var(orig_loc).cuda().view(1, -1, 2).float(), + None, return_delta=True + )[0].detach().cpu().numpy().reshape(qres * qres, 2) + + fig_s = plt.figure(figsize=(figsize, figsize)) + plt.tight_layout() + axs = fig_s.add_subplot(111) + axs.contour(orig_img, levels=[0], linestyles='dotted') + axs.contour(img, levels=[0]) + axs.scatter(handle_np[:, 0], handle_np[:, 1], c='b', marker='*') + axs.scatter(new_handle_np[:, 0], new_handle_np[:, 1], c='r', marker='o') + axs.set_title("Shape") + writer.add_figure( + "shape", fig_s, global_step=(step if step is not None else epoch)) + + fig_f = plt.figure(figsize=(figsize, figsize)) + plt.tight_layout() + axf = fig_f.add_subplot(111) + axf.contour(orig_img, levels=[0], linestyles='dotted') + axf.contour(img) + axf.scatter(handle_np[:, 0], handle_np[:, 1], c='b', marker='*') + axf.scatter(new_handle_np[:, 0], new_handle_np[:, 1], c='r', marker='o') + axf.set_title("Field") + writer.add_figure( + "field", fig_f, global_step=(step if step is not None else epoch)) + + fig_d = plt.figure(figsize=(figsize, figsize)) + plt.tight_layout() + axd = fig_d.add_subplot(111) + axd.set_title("Deform Direction") + dfm_norm = np.linalg.norm(dfm, axis=-1).reshape(-1, 1) + dfm_dirc = (dfm / dfm_norm).reshape(-1, 2) + loc_in_res = 0.5 * (orig_loc + 1.) * res + axd.contour(orig_img, levels=[0], linestyles='dotted', colors='r') + axd.contour(img, levels=[0], colors='r') + axd.quiver( + loc_in_res[:, 0], loc_in_res[:, 1], dfm_dirc[:, 0], dfm_dirc[:, 1]) + axd.scatter(handle_np[:, 0], handle_np[:, 1], c='b', marker='*') + axd.scatter(new_handle_np[:, 0], new_handle_np[:, 1], c='r', marker='o') + writer.add_figure( + "deform_direction", fig_d, + global_step=(step if step is not None else epoch)) + + fig_l = plt.figure(figsize=(figsize, figsize)) + plt.tight_layout() + axl = fig_l.add_subplot(111) + dfm = imf2img( + lambda x: self.net(x, None, return_delta=True)[0], res=res) + dfm_norm_img = np.linalg.norm( + dfm.reshape(res, res, 2), axis=-1).reshape(res, res) + axl.contourf(dfm_norm_img) + axl.set_title("Deform Length") + axl.contour(orig_img, levels=[0], linestyles='dotted', colors='r') + axl.contour(img, levels=[0], colors='r') + axl.scatter(handle_np[:, 0], handle_np[:, 1], c='b', marker='*') + axl.scatter(new_handle_np[:, 0], new_handle_np[:, 1], c='r', marker='o') + writer.add_figure( + "deform_length", fig_l, + global_step=(step if step is not None else epoch)) + + # Sample points just to make sure this is also exampled + # 1. Sample directly with net + # 2. Sample directly with original + # 3. Inverse the originally sampled + n_pts_smp = getattr(self.vis_cfg, "n_pts_smp", 1000) + print("Sampling forward!") + x_forward = get_surf_pcl( + lambda x: self.net(x, None), npoints=n_pts_smp, dim=2 + ).view(-1, 2).detach().cpu().numpy() + fig_xfwd = plt.figure(figsize=(figsize, figsize)) + plt.tight_layout() + ax_xfwd = fig_xfwd.add_subplot(111) + ax_xfwd.set_title("Npoints:%d" % int(x_forward.shape[0])) + if x_forward.shape[0] > 0: + ax_xfwd.scatter(x_forward[:, 0], x_forward[:, 1], s=5, marker='o') + ax_xfwd.set_xlim(-1, 1) + ax_xfwd.set_ylim(-1, 1) + writer.add_figure( + "x_forward", fig_xfwd, + global_step=(step if step is not None else epoch)) + + print("Sampling!") + x_orig = get_surf_pcl( + lambda x: self.original_net(x, None), + npoints=n_pts_smp, dim=2).view(-1, 2) + if hasattr(self.net, "deform") and x_orig.size(0) > 0 and \ + hasattr(self.net.deform, "invert"): + with jittor.no_grad(): + x_invert = self.net.deform.invert( + x_orig.view(1, -1, 2), iters=30) + x_invert = x_invert.detach().cpu().numpy().reshape(-1, 2) + + fig_xinv = plt.figure(figsize=(figsize, figsize)) + plt.tight_layout() + ax_xinv = fig_xinv.add_subplot(111) + ax_xinv.set_title("Npoints:%d" % int(x_invert.shape[0])) + ax_xinv.scatter(x_invert[:, 0], x_invert[:, 1], s=5, marker='o') + ax_xinv.set_xlim(-1, 1) + ax_xinv.set_ylim(-1, 1) + writer.add_figure( + "x_invert", fig_xinv, + global_step=(step if step is not None else epoch)) + + x_orig = x_orig.detach().cpu().numpy().reshape(-1, 2) + fig_xorg = plt.figure(figsize=(figsize, figsize)) + plt.tight_layout() + ax_xorig = fig_xorg.add_subplot(111) + ax_xorig.set_title("Npoints:%d" % int(x_orig.shape[0])) + if x_orig.shape[0] > 0: + ax_xorig.scatter(x_orig[:, 0], x_orig[:, 1], s=5, marker='o') + ax_xorig.set_xlim(-1, 1) + ax_xorig.set_ylim(-1, 1) + writer.add_figure( + "x_orig", fig_xorg, + global_step=(step if step is not None else epoch)) + + + def validate(self, test_loader, epoch, *args, **kwargs): + # TODO: compute mesh and compute the manifold harmonics to + # see if the high frequencies signals are dimed/suppressed + val_res = getattr(self.cfg.trainer, "val_res", 128) + _, orig_stats = imf2img( + lambda x: self.original_net(x, None), res=val_res, + return_stats=True, verbose=True + ) + _, net_stats = imf2img( + lambda x: self.net(x, None), res=val_res, + return_stats=True, verbose=True + ) + + return { + 'val/org_area': orig_stats['area'], + 'val/new_area': net_stats['area'], + 'val/area_change_ratio': net_stats['area'] / (orig_stats['area'] + 1e-5), + 'val/org_length': orig_stats['len'], + 'val/new_length': net_stats['len'], + 'val/length_change_ratio': net_stats['len'] / (orig_stats['len'] + 1e-5), + } + diff --git a/contrib/HSDF-Net/trainers/implicit_deform_3D.py b/contrib/HSDF-Net/trainers/implicit_deform_3D.py new file mode 100644 index 00000000..824f3506 --- /dev/null +++ b/contrib/HSDF-Net/trainers/implicit_deform_3D.py @@ -0,0 +1,151 @@ +import os +#import torch +import jittor +import trimesh +import numpy as np +from argparse import Namespace +from trainers.utils.vis_utils import imf2mesh +from evaluation.evaluation_metrics import CD, EMD +from trainers.implicit_deform import Trainer as BaseTrainer +from trainers.utils.igp_utils import compute_invert_weight + + +class Trainer(BaseTrainer): + + def __init__(self, cfg, args, original_decoder=None): + super().__init__(cfg, args, original_decoder=original_decoder) + self.dim = 3 + self.vis_cfg = getattr(self.cfg.trainer, "vis", Namespace()) + + # same resolution as the one from + self.res = int(getattr(self.cfg.trainer, "mc_res", 256)) + self.thr = float(getattr(self.cfg.trainer, "mc_thr", 0.)) + self.original_mesh, self.original_mesh_stats = imf2mesh( + lambda x: self.original_net(x), + res=self.res, threshold=self.thr, + normalize=True, norm_type='res', return_stats=True + ) + + if hasattr(self.cfg.trainer, "mesh_presample"): + self.presample_cfg = self.cfg.trainer.mesh_presample + self.presmp_npoints = getattr( + self.presample_cfg, "num_points", 10000) + else: + self.presmp_npoints = None + + def update(self, data, *args, **kwargs): + if self.presmp_npoints is not None: + uniform_pcl_orig = self.original_mesh.sample(self.presmp_npoints) + with jittor.no_grad(): + x_invert_uniform = self.net.deform.invert( + jittor.Var(uniform_pcl_orig).float().cuda().view(-1, 3), + iters=30 + ).view(1, -1, 3).cuda().float() + + weights = compute_invert_weight( + x_invert_uniform, + inp_nf=self.original_net, + out_nf=self.net, + deform=self.net.deform, + surface=True, + ).cuda().float().view(1, -1) + + if getattr(self.presample_cfg, "detach_weight", False): + weights = weights.detach() + + data.update({ + 'x': x_invert_uniform, + 'weights': weights + }) + return super().update(data, *args, **kwargs) + + def visualize( + self, train_data, train_info, + writer=None, step=None, epoch=None, **kwargs): + res = int(getattr(self.cfg.trainer, "vis_mc_res", 64)) + thr = float(getattr(self.cfg.trainer, "vis_mc_thr", 0.)) + with jittor.no_grad(): + print("Visualize: %s %s" % (step, epoch)) + mesh = imf2mesh( + lambda x: self.net(x, None), res=res, threshold=thr, + normalize=True, norm_type='res' + ) + if mesh is not None: + save_name = "mesh_%diters.obj" \ + % (step if step is not None else epoch) + path = os.path.join(self.cfg.save_dir, "vis", save_name) + mesh.export(path) + + def validate(self, test_loader, epoch, *args, **kwargs): + print("Validating : %d" % epoch) + + cd_gtr = 0 + emd_gtr = 0 + cd_out = 0 + emd_out = 0 + cd_ratio = 0. + emd_ratio = 0 + area_ratio = 0. + vol_ratio = 0. + + with jittor.no_grad(): + new_mesh, new_mesh_stats = imf2mesh( + lambda x: self.net(x), + res=self.res, threshold=self.thr, + normalize=True, norm_type='res', return_stats=True + ) + if new_mesh is not None: + save_name = "mesh_%diters.obj" % epoch + path = os.path.join(self.cfg.save_dir, "val", save_name) + new_mesh.export(path) + + area_ratio = new_mesh_stats['area'] / (self.original_mesh_stats['area'] + 1e-5) + vol_ratio = new_mesh_stats['vol'] / (self.original_mesh_stats['vol'] + 1e-5) + + for test_data in test_loader: + break + if 'gtr_verts' in test_data and 'gtr_faces' in test_data: + npoints = getattr(self.cfg.trainer, "val_npoints", 2048) + gtr_verts = test_data['gtr_verts'].detach().view(-1, 3).cpu().numpy() + gtr_faces = test_data['gtr_faces'].detach().view(-1, 3).cpu().numpy() + gtr_mesh = trimesh.Trimesh(vertices=gtr_verts, faces=gtr_faces) + + gtr_pcl0 = gtr_mesh.sample(npoints)[np.newaxis, ...] + gtr_pcl1 = gtr_mesh.sample(npoints)[np.newaxis, ...] + out_pcl = new_mesh.sample(npoints)[np.newaxis, ...] + print(gtr_pcl0.shape, gtr_pcl1.shape, out_pcl.shape) + + cd_gtr, dists_gtr = CD( + jittor.Var(gtr_pcl0), jittor.Var(gtr_pcl1)) + cd_out, dists_out = CD( + jittor.Var(gtr_pcl0), jittor.Var(out_pcl)) + cd_ratio = cd_out / (cd_gtr + 1e-8) + + emd_gtr, _ = EMD( + jittor.Var(gtr_pcl0), jittor.Var(gtr_pcl1), + dist=dists_gtr + ) + emd_out, _ = EMD( + jittor.Var(gtr_pcl0), jittor.Var(out_pcl), + dist=dists_out + ) + emd_ratio = emd_out / (emd_gtr + 1e-8) + + res = { + 'val/org_mesh_area': self.original_mesh_stats['area'], + 'val/org_mesh_vol': self.original_mesh_stats['vol'], + 'val/new_mesh_area': new_mesh_stats['area'], + 'val/new_mesh_vol': new_mesh_stats['vol'], + 'val/area_change_ratio': area_ratio, + 'val/vol_change_ratio': vol_ratio, + 'val/cd_gtr': cd_gtr, + 'val/emd_gtr': emd_gtr, + 'val/cd_out': cd_out, + 'val/emd_out': emd_out, + 'val/cd_ratio': cd_ratio, + 'val/emd_ratio': emd_ratio + } + print(res) + return res + + diff --git a/contrib/HSDF-Net/trainers/losses/eikonal_loss.py b/contrib/HSDF-Net/trainers/losses/eikonal_loss.py new file mode 100644 index 00000000..1b69d6f7 --- /dev/null +++ b/contrib/HSDF-Net/trainers/losses/eikonal_loss.py @@ -0,0 +1,73 @@ +#import torch +#import torch.nn.functional as F +import jittor +import jittor.nn as F +from trainers.utils.diff_ops import gradient +from trainers.utils.igp_utils import sample_points + + +def loss_eikonal( + net, gtr=None, deform=None, + npoints=1000, use_surf_points=False, invert_sampling=True, + x=None, dim=3, reduction='mean', weights=None +): + if x is None: + x, weights = sample_points( + npoints, dim=dim, sample_surf_points=use_surf_points, + inp_nf=gtr, out_nf=net, deform=deform, + invert_sampling=invert_sampling, + ) + bs, npoints = x.size(0), x.size(1) + else: + assert weights is not None + if len(x.size()) == 2: + bs, npoints = 1, x.size(0) + else: + bs, npoints = x.size(0), x.size(1) + x = x.view(bs, npoints, dim) + + x.requires_grad = True + y = net(x.view(1, -1, dim)) + grad_norm = gradient(y, x).view(-1, dim).norm(dim=-1) + loss_all = jittor.nn.mse_loss( + grad_norm, jittor.ones_like(grad_norm), reduction='none') + loss_all = loss_all * weights + + if reduction == 'none': + loss = loss_all + elif reduction == 'mean': + loss = loss_all.mean() + elif reduction == 'sum': + loss = loss_all.sum() + else: + raise NotImplementedError + return loss + +def loss_eikonal_dsdf( + y, x, deform=None, + npoints=1000, use_surf_points=False, invert_sampling=True, + dim=3, reduction='mean', weights=None +): + + if len(x.size()) == 2: + bs, npoints = 1, x.size(0) + else: + bs, npoints = x.size(0), x.size(1) + #x = x.view(bs, npoints, dim) + + #x.requires_grad = True + #y = net(x.view(1, -1, dim)) + grad_norm = gradient(y, x).view(-1, dim).norm(dim=-1) + loss_all = jittor.nn.mse_loss( + grad_norm, jittor.ones_like(grad_norm), reduction='none') + loss_all = loss_all * weights + + if reduction == 'none': + loss = loss_all + elif reduction == 'mean': + loss = loss_all.mean() + elif reduction == 'sum': + loss = loss_all.sum() + else: + raise NotImplementedError + return loss diff --git a/contrib/HSDF-Net/trainers/losses/filtering_losses.py b/contrib/HSDF-Net/trainers/losses/filtering_losses.py new file mode 100644 index 00000000..69174d59 --- /dev/null +++ b/contrib/HSDF-Net/trainers/losses/filtering_losses.py @@ -0,0 +1,178 @@ +#import torch +#import torch.nn.functional as F +from trainers.utils.diff_ops import laplace +from trainers.utils.igp_utils import get_surf_pcl, sample_points +import jittor +import jittor.nn as F + +def loss_boundary(gtr, net, npoints=1000, dim=3, x=None, use_surf_points=False): + """ + This function tries to enforce that the field [gtr] and [net] are similar. + Basically computing |gtr(x) - net(x)| for some [x]. + [x] will be sampled from surface of [gtr] if [use_surf_points] is True + Otherwise, [x] is sampled from [-1, 1]^3 + + :param gtr: + :param net: + :param npoints: + :param dim: + :param x: + :param use_surf_points: + :return: + """ + if x is None: + x, _ = sample_points( + npoints, dim=dim, sample_surf_points=use_surf_points, + invert_sampling=False, out_nf=gtr, deform=None + ) + x = x.detach().cuda().float() + bs = 1 + x = x.view(bs, npoints, dim) + else: + if len(x.size()) == 2: + bs, npoints = 1, x.size(0) + else: + bs, npoints = x.size(0), x.size(1) + x = x.view(bs, npoints, dim) + + if use_surf_points: + net_y = net(x) + loss_all = F.mse_loss(net_y, jittor.zeros_like(net_y), reduction='none') + else: + net_y = net(x) + gtr_y = gtr(x) + loss_all = F.mse_loss(net_y, gtr_y, reduction='none') + loss_all = loss_all.view(bs, npoints) + loss = loss_all.mean() + return loss, x + + +def loss_lap( + gtr, net, deform=None, + x=None, npoints=1000, dim=3, + beta=1., masking_thr=10, return_mask=False, use_weights=False, weights=1 +): + """ + Matching the Laplacian between [gtr] and [net] on sampled points. + + :param gtr: + :param net: + :param deform: + :param x: + :param npoints: + :param dim: + :param use_surf_points: + :param invert_sampling: + :param beta: + :param masking_thr: + :param return_mask: + :param use_weights: + :param weights: + :return: + """ + if x is None: + x, weights = sample_points( + npoints, dim=dim, sample_surf_points=False, + out_nf=gtr, inp_nf=None, deform=None, invert_sampling=False, + ) + bs, npoints = x.size(0), x.size(1) + else: + if len(x.size()) == 2: + bs, npoints = 1, x.size(0) + else: + bs, npoints = x.size(0), x.size(1) + x = x.view(bs, npoints, dim) + + if deform is None: + gtr_x = x + else: + gtr_x = deform(x, None) + gtr_x = gtr_x.view(bs, npoints, dim).contiguous() + if gtr_x.is_leaf: + gtr_x.requires_grad = True + else: + gtr_x.retain_grad() + gtr_y = gtr(gtr_x) + lap_gtr = laplace(gtr_y, gtr_x, normalize=True).view(bs, npoints) + + if x.is_leaf: + x.requires_grad = True + else: + x.retain_grad() + net_y = net(x) + lap_net = laplace(net_y, x, normalize=True).view(*lap_gtr.shape) + + diff = lap_gtr * beta - lap_net + if masking_thr is not None: + mask = ((jittor.abs(lap_gtr) < masking_thr) & + (jittor.abs(lap_net) < masking_thr)) + else: + mask = jittor.ones_like(lap_gtr) > 0 + loss = F.mse_loss(diff, jittor.zeros_like(diff), reduction='none') + if use_weights: + loss = loss * weights + loss = loss[mask].mean() + if return_mask: + return loss, mask + else: + return loss + +# modify lap loss for DSDF +def loss_lap_dsdf( + gtr_y, net_y, + x=None, npoints=1000, dim=3, + beta=1., masking_thr=10, return_mask=False, use_weights=False, weights=1 +): + """ + Matching the Laplacian between [gtr] and [net] on sampled points. + + :param gtr: + :param net: + :param deform: + :param x: + :param npoints: + :param dim: + :param use_surf_points: + :param invert_sampling: + :param beta: + :param masking_thr: + :param return_mask: + :param use_weights: + :param weights: + :return: + """ + + if len(x.size()) == 2: + bs, npoints = 1, x.size(0) + else: + bs, npoints = x.size(0), x.size(1) + #x = x.view(bs, npoints, dim) + + #print('x shape: {}'.format(x.shape)) + + ''' + if x.is_leaf: + x.requires_grad = True + else: + x.retain_grad() + ''' + + #print('x grad: {}'.format(x.requires_grad)) + + lap_gtr = laplace(gtr_y, x, normalize=True).view(bs, npoints) + lap_net = laplace(net_y, x, normalize=True).view(*lap_gtr.shape) + + diff = lap_gtr * beta - lap_net + if masking_thr is not None: + mask = ((jittor.abs(lap_gtr) < masking_thr) & + (jittor.abs(lap_net) < masking_thr)) + else: + mask = jittor.ones_like(lap_gtr) > 0 + loss = F.mse_loss(diff, jittor.zeros_like(diff), reduction='none') + if use_weights: + loss = loss * weights + loss = loss[mask].mean() + if return_mask: + return loss, mask + else: + return loss diff --git a/contrib/HSDF-Net/trainers/losses/implicit_thin_shell_losses.py b/contrib/HSDF-Net/trainers/losses/implicit_thin_shell_losses.py new file mode 100644 index 00000000..26875bc5 --- /dev/null +++ b/contrib/HSDF-Net/trainers/losses/implicit_thin_shell_losses.py @@ -0,0 +1,134 @@ +#import torch +#import torch.nn.functional as F +import jittor +import jittor.nn as F +from trainers.utils.diff_ops import hessian, jacobian +from trainers.utils.igp_utils import sample_points, tangential_projection_matrix + + +def bending_loss( + inp_nf, out_nf, + # Presampled points + x=None, weights=None, + # Sampling + npoints=1000, dim=3, use_surf_points=False, deform=None, + invert_sampling=False, detach_weight=True, use_rejection=False, + # Loss related + loss_type='l2', reduction='mean', +): + if x is None: + x, weights = sample_points( + npoints, dim=dim, sample_surf_points=use_surf_points, + inp_nf=inp_nf, out_nf=out_nf, deform=deform, + invert_sampling=invert_sampling, + detach_weight=detach_weight, use_rejection=use_rejection, + ) + bs, npoints = x.size(0), x.size(1) + else: + assert weights is not None + if len(x.size()) == 2: + bs, npoints = 1, x.size(0) + else: + bs, npoints = x.size(0), x.size(1) + x = x.view(bs, npoints, dim) + + # Compute Hessian from the output space + if x.is_leaf: + x.requires_grad = True + else: + x.retain_grad() + y_out = out_nf(x) + h_out, h_out_status = hessian(y_out, x) + h_out = h_out.view(bs * npoints, dim, dim) + + # Compute the projection matrix from the output space + _, P = tangential_projection_matrix(y_out, x) + P = P.view(bs * npoints, dim, dim) + + # Compute points from the input space + x_inp = deform(x).view(bs, npoints, dim) + J, J_status = jacobian(x_inp, x) + J = J.view(bs * npoints, dim, dim) + + # Compute Hessian from the input space + x_inp.retain_grad() + y_inp = inp_nf(x_inp) + h_inp, h_inp_status = hessian(y_inp, x_inp) + h_inp = h_inp.view(bs * npoints, dim, dim) + + # Compute the projected hessians and their differences after adjustment + h_inp_J = jittor.bmm(J.transpose(1, 2).contiguous(), jittor.bmm(h_inp, J)) + diff = jittor.bmm( + P.transpose(1, 2).contiguous(), jittor.bmm(h_out - h_inp_J, P)) + + # Compute the Forbinius norm (weighted) + F_norm = diff.view(bs * npoints, -1).norm(dim=-1, keepdim=False) + F_norm = F_norm.view(bs, npoints) + F_norm = F_norm * weights + + if loss_type == 'l2': + loss = F.mse_loss( + F_norm, jittor.zeros_like(F_norm), reduction=reduction) + elif loss_type == 'l1': + loss = F.l1_loss( + F_norm, jittor.zeros_like(F_norm), reduction=reduction) + else: + raise ValueError + return loss + + +def stretch_loss( + inp_nf, out_nf, deform, + x=None, npoints=1000, dim=3, use_surf_points=False, invert_sampling=False, + loss_type='l2', reduction='mean', weights=1, + detach_weight=True, use_rejection=False, +): + if x is None: + x, weights = sample_points( + npoints, dim=dim, sample_surf_points=use_surf_points, + inp_nf=inp_nf, out_nf=out_nf, deform=deform, + invert_sampling=invert_sampling, + detach_weight=detach_weight, use_rejection=use_rejection, + ) + bs, npoints = x.size(0), x.size(1) + else: + assert weights is not None + if len(x.size()) == 2: + bs, npoints = 1, x.size(0) + else: + bs, npoints = x.size(0), x.size(1) + x = x.view(bs, npoints, dim) + + # Compute Projection on the output space + if x.is_leaf: + x.requires_grad = True + x.retain_grad() + y_out = out_nf(x) + _, P = tangential_projection_matrix(y_out, x) + P = P.view(bs * npoints, dim, dim) + + # Compute the deformation Jacobian + x_inp = deform(x).view(bs, npoints, dim) + J, J_status = jacobian(x_inp, x) + J = J.view(bs * npoints, dim, dim) + + # Compute the matrix of interests + I = jittor.eye(dim).view(1, dim, dim).to(J) + diff = I - jittor.bmm(J.transpose(1, 2), J) + diff = jittor.bmm(P.transpose(1, 2), jittor.bmm(diff, P)) + + # Compute the Forbinius norm (weighted) + F_norm = diff.view(bs * npoints, -1).norm(dim=-1, keepdim=False) + F_norm = F_norm.view(bs, npoints) + F_norm = F_norm * weights + + if loss_type == 'l2': + loss = F.mse_loss( + F_norm, jittor.zeros_like(F_norm), reduction=reduction) + elif loss_type == 'l1': + loss = F.l1_loss( + F_norm, jittor.zeros_like(F_norm), reduction=reduction) + else: + raise ValueError + return loss + diff --git a/contrib/HSDF-Net/trainers/nf_sdf_trainer_3D.py b/contrib/HSDF-Net/trainers/nf_sdf_trainer_3D.py new file mode 100644 index 00000000..b3eaea82 --- /dev/null +++ b/contrib/HSDF-Net/trainers/nf_sdf_trainer_3D.py @@ -0,0 +1,167 @@ +import os +#import torch +import jittor +import importlib +import os.path as osp +#import torch.nn.functional as F +from trainers.utils.diff_ops import gradient +from trainers.utils.vis_utils import imf2mesh +from trainers.base_trainer import BaseTrainer +from trainers.utils.utils import get_opt, set_random_seed + + +class Trainer(BaseTrainer): + + def __init__(self, cfg, args): + super().__init__(cfg, args) + self.cfg = cfg + self.args = args + set_random_seed(getattr(self.cfg.trainer, "seed", 666)) + + # The networks + lib = importlib.import_module(cfg.models.decoder.type) + self.net = lib.Net(cfg, cfg.models.decoder) + self.net.cuda() + print("Net:") + print(self.net) + + # The optimizer + self.opt, self.sch = get_opt( + self.net.parameters(), self.cfg.trainer.opt) + + # Prepare save directory + os.makedirs(osp.join(cfg.save_dir, "val"), exist_ok=True) + os.makedirs(osp.join(cfg.save_dir, "images"), exist_ok=True) + os.makedirs(osp.join(cfg.save_dir, "checkpoints"), exist_ok=True) + + def update(self, data, *args, **kwargs): + if 'no_update' in kwargs: + no_update = kwargs['no_update'] + else: + no_update = False + if not no_update: + self.net.train() + self.opt.zero_grad() + + xyz, dist = data['xyz'].cuda(), data['dist'].cuda() + bs = xyz.size(0) + out = self.net(xyz) + ndf_loss_weight = float(getattr( + self.cfg.trainer, "ndf_loss_weight", 1.)) + if ndf_loss_weight > 0: + loss_y_ndf = ((jittor.abs(out) - dist) ** 2).view(bs, -1).mean() + loss_y_ndf *= ndf_loss_weight + else: + loss_y_ndf = jittor.zeros(1).cuda().float() + + sdf_loss_weight = float(getattr( + self.cfg.trainer, "sdf_loss_weight", 0.)) + if 'sign' in data and sdf_loss_weight > 0: + sign = data['sign'].cuda().float() + loss_y_sdf = ((out - dist * sign) ** 2).view(bs, -1).mean() + loss_y_sdf *= sdf_loss_weight + else: + loss_y_sdf = 0. * jittor.zeros(1).to(loss_y_ndf) + + occ_loss_weight = float(getattr( + self.cfg.trainer, "occ_loss_weight", 0.)) + if 'sign' in data and occ_loss_weight > 0: + target = (data['sign'].cuda().float() >= 0).float() + loss_occ = F.binary_cross_entropy( + jittor.sigmoid(out), target + ) + loss_occ *= occ_loss_weight + else: + loss_occ = 0. * jittor.zeros(1).cuda().float() + + grad_norm_weight = float(getattr( + self.cfg.trainer, "grad_norm_weight", 0.)) + grad_norm_num_points = int(getattr( + self.cfg.trainer, "grad_norm_num_points", 0)) + if grad_norm_weight > 0. and grad_norm_num_points > 0: + xyz = jittor.rand( + bs, grad_norm_num_points, xyz.size(-1)).to(xyz) * 2 - 1 + xyz = xyz.cuda() + xyz.requires_grad = True + grad_norm = gradient(self.net(xyz), xyz).view( + bs, -1, xyz.size(-1)).norm(dim=-1) + loss_unit_grad_norm = F.mse_loss( + grad_norm, jittor.ones_like(grad_norm)) * grad_norm_weight + else: + loss_unit_grad_norm = 0. * jittor.zeros(1).to(loss_y_ndf) + loss = loss_unit_grad_norm + loss_y_ndf + loss_y_sdf + loss_occ + + if not no_update: + loss.backward() + self.opt.step() + + return { + 'loss': loss.detach().cpu().item(), + 'scalar/loss': loss.detach().cpu().item(), + 'scalar/loss_y_ndf': loss_y_ndf.detach().cpu().item(), + 'scalar/loss_y_sdf': loss_y_sdf.detach().cpu().item(), + 'scalar/loss_occ': loss_occ.detach().cpu().item(), + 'scalar/loss_grad_norm': loss_unit_grad_norm.detach().cpu().item(), + } + + def log_train(self, train_info, train_data, writer=None, + step=None, epoch=None, visualize=False, **kwargs): + if writer is None: + return + + # Log training information to tensorboard + writer_step = step if step is not None else epoch + assert writer_step is not None + for k, v in train_info.items(): + t, kn = k.split("/")[0], "/".join(k.split("/")[1:]) + if t not in ['scalar']: + continue + if t == 'scalar': + writer.add_scalar('train/' + kn, v, writer_step) + + if visualize: + with jittor.no_grad(): + print("Visualize: %s" % step) + res = int(getattr(self.cfg.trainer, "vis_mc_res", 256)) + thr = float(getattr(self.cfg.trainer, "vis_mc_thr", 0.)) + + mesh = imf2mesh( + lambda x: self.net(x), res=res, threshold=thr) + if mesh is not None: + save_name = "mesh_%diters.obj" \ + % (step if step is not None else epoch) + mesh.export(osp.join(self.cfg.save_dir, "val", save_name)) + mesh.export(osp.join(self.cfg.save_dir, "latest_mesh.obj")) + + def validate(self, test_loader, epoch, *args, **kwargs): + return {} + + def save(self, epoch=None, step=None, appendix=None, **kwargs): + d = { + 'opt': self.opt.state_dict(), + 'net': self.net.state_dict(), + 'epoch': epoch, + 'step': step + } + if appendix is not None: + d.update(appendix) + save_name = "epoch_%s_iters_%s.pt" % (epoch, step) + jittor.save(d, osp.join(self.cfg.save_dir, "checkpoints", save_name)) + jittor.save(d, osp.join(self.cfg.save_dir, "latest.pt")) + + def resume(self, path, strict=True, **kwargs): + ckpt = jittor.load(path) + self.net.load_state_dict(ckpt['net'], strict=strict) + self.opt.load_state_dict(ckpt['opt']) + start_epoch = ckpt['epoch'] + return start_epoch + + def multi_gpu_wrapper(self, wrapper): + self.net = wrapper(self.net) + + def epoch_end(self, epoch, writer=None, **kwargs): + if self.sch is not None: + self.sch.step(epoch=epoch) + if writer is not None: + writer.add_scalar( + 'train/opt_lr', self.sch.get_lr()[0], epoch) diff --git a/contrib/HSDF-Net/trainers/smooth_sharpen.py b/contrib/HSDF-Net/trainers/smooth_sharpen.py new file mode 100644 index 00000000..d3a658a9 --- /dev/null +++ b/contrib/HSDF-Net/trainers/smooth_sharpen.py @@ -0,0 +1,208 @@ +import os +#import torch +import jittor +import importlib +import os.path as osp +from trainers.base_trainer import BaseTrainer +from models.igp_wrapper import distillation, deformation, correction +from trainers.utils.utils import set_random_seed +from trainers.losses.eikonal_loss import loss_eikonal +from trainers.losses.filtering_losses import loss_boundary, loss_lap + + +class Trainer(BaseTrainer): + + def __init__(self, cfg, args, original_decoder=None): + super().__init__(cfg, args) + self.cfg = cfg + self.args = args + self.dim = 3 + + set_random_seed(getattr(self.cfg.trainer, "seed", 666)) + + # The networks + if original_decoder is None: + if not hasattr(cfg.models, "net"): + cfg.models.net = cfg.models.decoder + sn_lib = importlib.import_module(cfg.models.net.type) + self.original_decoder = sn_lib.Net(cfg, cfg.models.net) + self.original_decoder.cuda() + self.original_decoder.load_state_dict( + jittor.load(cfg.models.net.path)['net']) + print("Original Decoder:") + print(self.original_decoder) + else: + self.original_decoder = original_decoder + + # Get the wrapper for the operation + self.wrapper_type = getattr( + cfg.trainer, "wrapper_type", "distillation") + if self.wrapper_type in ['distillation']: + self.decoder, self.opt_dec, self.scheduler_dec = distillation( + cfg, self.original_decoder, + reload=getattr(self.cfg.trainer, "reload_decoder", True)) + elif self.wrapper_type in ['correction']: + self.decoder, self.opt_dec, self.scheduler_dec = correction( + cfg, self.original_decoder) + elif self.wrapper_type in ['deformation']: + self.decoder, self.opt_dec, self.scheduler_dec = deformation( + cfg, self.original_decoder) + else: + raise ValueError("wrapper_type:", self.wrapper_type) + + # Prepare save directory + os.makedirs(osp.join(cfg.save_dir, "images"), exist_ok=True) + os.makedirs(osp.join(cfg.save_dir, "checkpoints"), exist_ok=True) + os.makedirs(osp.join(cfg.save_dir, "val"), exist_ok=True) + + # Set-up counter + self.num_update_step = 0 + self.boundary_points = None + + # The [beta] that controlls how smooth/sharp the output shape should be + # If beta > 1, then the output shape will increase in curvature + # so it will be sharper + # If beta < 1, then the output shape will decrease in curvature + # so it will be smoother. + # beta should be > 0. + self.beta = getattr(self.cfg.trainer, "beta", 1.) + + # whether plot histogram for network weights + self.show_network_hist = getattr( + self.cfg.trainer, "show_network_hist", False) + + def update(self, _, *args, **kwargs): + self.num_update_step += 1 + if 'no_update' in kwargs: + no_update = kwargs['no_update'] + else: + no_update = False + if not no_update: + self.decoder.train() + self.opt_dec.zero_grad() + + boundary_loss_weight = float(getattr( + self.cfg.trainer, "boundary_weight", 1.)) + boundary_loss_num_points = int(getattr( + self.cfg.trainer, "boundary_num_points", 0)) + boundary_loss_points_update_step = int(getattr( + self.cfg.trainer, "boundary_loss_points_update_step", 1)) + boundary_loss_use_surf_points = int(getattr( + self.cfg.trainer, "boundary_loss_use_surf_points", True)) + if boundary_loss_weight > 0. and boundary_loss_num_points > 0: + if self.num_update_step % boundary_loss_points_update_step == 0: + self.boundary_points = None + loss_y_boundary, self.boundary_points = loss_boundary( + (lambda x: self.original_decoder(x)), + (lambda x: self.decoder(x)), + npoints=boundary_loss_num_points, + x=self.boundary_points, + dim=self.dim, + use_surf_points=boundary_loss_use_surf_points) + loss_y_boundary = loss_y_boundary * boundary_loss_weight + else: + loss_y_boundary = jittor.zeros(1).float().cuda() + + grad_norm_weight = float(getattr( + self.cfg.trainer, "grad_norm_weight", 1e-2)) + grad_norm_num_points = int(getattr( + self.cfg.trainer, "grad_norm_num_points", 5000)) + if grad_norm_weight > 0. and grad_norm_num_points > 0: + loss_unit_grad_norm = loss_eikonal( + lambda x: self.decoder(x), + npoints= grad_norm_num_points, + use_surf_points=False, invert_sampling=False + ) + loss_unit_grad_norm *= grad_norm_weight + else: + loss_unit_grad_norm = jittor.zeros(1).float().cuda() + + lap_loss_weight = float(getattr( + self.cfg.trainer, "lap_loss_weight", 1e-4)) + lap_loss_threshold = int(getattr( + self.cfg.trainer, "lap_loss_threshold", 50)) + lap_loss_num_points = int(getattr( + self.cfg.trainer, "lap_loss_num_points", 5000)) + if lap_loss_weight > 0. and lap_loss_num_points > 0: + loss_lap_scaling = loss_lap( + (lambda x: self.original_decoder(x)), + (lambda x: self.decoder(x)), + npoints=lap_loss_num_points, + beta=self.beta, + masking_thr=lap_loss_threshold, + ) + loss_lap_scaling = loss_lap_scaling * lap_loss_weight + else: + loss_lap_scaling = jittor.zeros(1).float().cuda() + + loss = loss_unit_grad_norm + loss_y_boundary + loss_lap_scaling + if not no_update: + loss.backward() + self.opt_dec.step() + + return { + 'loss': loss.detach().cpu().item(), + 'scalar/loss/loss': loss.detach().cpu().item(), + 'scalar/loss/loss_boundary': loss_y_boundary.detach().cpu().item(), + 'scalar/loss/loss_eikonal': loss_unit_grad_norm.detach().cpu().item(), + 'scalar/loss/loss_lap_scaling': loss_lap_scaling.detach().cpu().item(), + 'scalar/weight/loss_boundary': boundary_loss_weight, + 'scalar/weight/loss_eikonal': grad_norm_weight, + 'scalar/weight/loss_lap': lap_loss_weight, + } + + def log_train(self, train_info, train_data, writer=None, + step=None, epoch=None, visualize=False, **kwargs): + if writer is None: + return + writer_step = step if step is not None else epoch + + # Log training information to tensorboard + train_info = {k: (v.cpu() if not isinstance(v, float) else v) + for k, v in train_info.items()} + for k, v in train_info.items(): + ktype = k.split("/")[0] + kstr = "/".join(k.split("/")[1:]) + if ktype == 'scalar': + writer.add_scalar(kstr, v, writer_step) + + if self.show_network_hist: + for name, p in self.decoder.named_parameters(): + writer.add_histogram("hist/%s" % name, p, writer_step) + + if visualize: + # NOTE: trainer sub class should implement this function + self.visualize(train_info, train_data, writer=writer, step=step, + epoch=epoch, visualize=visualize, **kwargs) + + def validate(self, test_loader, epoch, *args, **kwargs): + return {} + + def save(self, epoch=None, step=None, appendix=None, **kwargs): + d = { + 'orig_dec': self.original_decoder.state_dict(), + 'opt_dec': self.opt_dec.state_dict(), + 'dec': self.decoder.state_dict(), + 'epoch': epoch, + 'step': step + } + if appendix is not None: + d.update(appendix) + save_name = "epoch_%s_iters_%s.pt" % (epoch, step) + path = osp.join(self.cfg.save_dir, "checkpoints", save_name) + jittor.save(d, path) + + def resume(self, path, strict=True, **kwargs): + ckpt = jittor.load(path) + self.original_decoder.load_state_dict(ckpt['orig_dec'], strict=strict) + self.decoder.load_state_dict(ckpt['dec'], strict=strict) + self.opt_dec.load_state_dict(ckpt['opt_dec']) + start_epoch = ckpt['epoch'] + return start_epoch + + def epoch_end(self, epoch, writer=None, **kwargs): + if self.scheduler_dec is not None: + self.scheduler_dec.step(epoch=epoch) + if writer is not None: + writer.add_scalar( + 'train/opt_lr', self.scheduler_dec.get_lr()[0], epoch) diff --git a/contrib/HSDF-Net/trainers/smooth_sharpen_3D.py b/contrib/HSDF-Net/trainers/smooth_sharpen_3D.py new file mode 100644 index 00000000..c6e3773c --- /dev/null +++ b/contrib/HSDF-Net/trainers/smooth_sharpen_3D.py @@ -0,0 +1,21 @@ +import os +from trainers.utils.vis_utils import imf2mesh +from trainers.smooth_sharpen import Trainer as BaseTrainer + + +class Trainer(BaseTrainer): + + def __init__(self, cfg, args, original_decoder=None): + super().__init__(cfg, args, original_decoder=original_decoder) + self.dim = 3 + + def visualize(self, train_info, train_data, writer=None, + step=None, epoch=None, visualize=False, **kwargs): + print("Visualize: %s" % step) + res = int(getattr(self.cfg.trainer, "vis_mc_res", 256)) + thr = float(getattr(self.cfg.trainer, "vis_mc_thr", 0.)) + mesh = imf2mesh(lambda x: self.decoder(x), res=res, threshold=thr) + if mesh is not None: + save_name = "mesh_%diters.obj" % self.num_update_step + mesh.export(os.path.join(self.cfg.save_dir, "val", save_name)) + mesh.export(os.path.join(self.cfg.save_dir, "latest_mesh.obj")) diff --git a/contrib/HSDF-Net/trainers/utils/diff_ops.py b/contrib/HSDF-Net/trainers/utils/diff_ops.py new file mode 100644 index 00000000..ae633779 --- /dev/null +++ b/contrib/HSDF-Net/trainers/utils/diff_ops.py @@ -0,0 +1,87 @@ +# Based on https://github.com/vsitzmann/siren/blob/master/diff_operators.py +#import torch +#from torch.autograd import grad +import jittor + + +def hessian(y, x): + """ + Hessian of y wrt x + y: shape (meta_batch_size, num_observations, channels) + x: shape (meta_batch_size, num_observations, dim) + return: + shape (meta_batch_size, num_observations, dim, channels) + """ + meta_batch_size, num_observations = y.shape[:2] + grad_y = jittor.ones_like(y[..., 0]).to(y.device) + h = jittor.zeros(meta_batch_size, num_observations, + y.shape[-1], x.shape[-1], x.shape[-1]).to(y.device) + for i in range(y.shape[-1]): + # calculate dydx over batches for each feature value of y + dydx = grad(y[..., i], x, grad_y, create_graph=True)[0] + + # calculate hessian on y for each x value + for j in range(x.shape[-1]): + h[..., i, j, :] = grad(dydx[..., j], x, grad_y, + create_graph=True)[0][..., :] + + status = 0 + if jittor.any(jittor.isnan(h)): + status = -1 + return h, status + + +def laplace(y, x, normalize=False, eps=0., return_grad=False): + grad = gradient(y, x) + if normalize: + grad = grad / (grad.norm(dim=-1, keepdim=True) + eps) + div = divergence(grad, x) + + if return_grad: + return div, grad + return div + + +def divergence(y, x): + div = 0. + for i in range(y.shape[-1]): + div += grad( + y[..., i], x, jittor.ones_like(y[..., i]), + create_graph=True)[0][..., i:i+1] + return div + + +def gradient(y, x, grad_outputs=None): + if grad_outputs is None: + grad_outputs = jittor.ones_like(y) + grad = jittor.grad( + y, [x], retain_graph=True)[0] + + #print('grad: {}'.format(grad)) + return grad + + +def jacobian(y, x): + """ + Jacobian of y wrt x + y: shape (meta_batch_size, num_observations, channels) + x: shape (meta_batch_size, num_observations, dim) + ret: shape (meta_batch_size, num_observations, channels, dim) + """ + meta_batch_size, num_observations = y.shape[:2] + # (meta_batch_size*num_points, 2, 2) + jac = jittor.zeros( + meta_batch_size, num_observations, + y.shape[-1], x.shape[-1]).to(y.device) + for i in range(y.shape[-1]): + # calculate dydx over batches for each feature value of y + y_flat = y[...,i].view(-1, 1) + jac[:, :, i, :] = grad( + y_flat, x, jittor.ones_like(y_flat), create_graph=True)[0] + + status = 0 + if jittor.any(jittor.isnan(jac)): + status = -1 + + return jac, status + diff --git a/contrib/HSDF-Net/trainers/utils/igp_utils.py b/contrib/HSDF-Net/trainers/utils/igp_utils.py new file mode 100644 index 00000000..80e21d20 --- /dev/null +++ b/contrib/HSDF-Net/trainers/utils/igp_utils.py @@ -0,0 +1,257 @@ +#import torch +import jittor +from trainers.utils.diff_ops import gradient, jacobian + + +def outter(v1, v2): + """ + Batched outter product of two vectors: [v1] [v2]^T + :param v1: (bs, dim) + :param v2: (bs, dim) + :return: (bs, dim, dim) + """ + bs = v1.size(0) + d = v1.size(1) + v1 = v1.view(bs, d, 1) + v2 = v2.view(bs, 1, d) + return jittor.bmm(v1, v2) + + +def _addr_(mat, vec1, vec2, alpha=1., beta=1.): + """ + Return + alpha * outter(vec1, vec2) + beta * [mat] + :param mat: (bs, npoints, dim, dim) + :param vec1: (bs, npoints, dim) + :param vec2: (bs, npoints, dim) + :param alpha: float + :param beta: float + :return: + """ + bs, npoints, dim =vec1.size(0), vec1.size(1), vec1.size(2) + assert len(mat.size()) == 4 + outter_n = outter(vec1.view(bs * npoints, dim), vec2.view(bs * npoints, dim)) + outter_n = outter_n.view(bs, npoints, dim, dim) + out = alpha * outter_n + beta * mat.view(bs, npoints, dim, dim) + return out + + +def get_surf_pcl(net, npoints=1000, dim=3, use_rejection=True, **kwargs): + if use_rejection: + return get_surf_pcl_rejection(net, npoints, dim, **kwargs) + else: + return get_surf_pcl_langevin_dynamic(net, npoints, dim, **kwargs) + + +def get_surf_pcl_rejection( + net, npoints, dim, batch_size=100000, thr=0.05, return_rej_x=False): + """ + Sampling points with rejection sampling. We first sample uniformly from + [-1, 1]^3, then reject all points with |distance| > [thr]. Once gathered + enough rejected points, we will take a gradient step toward the surface: + y = x - F(x)n(x) + :param net: Neural field + :param npoints: Number of points to sample + :param dim: Dimension of the points + :param batch_size: Batch size per iteration + :param thr: Rejection threshold + :param return_rej_x: Whether returned points right after rejection. + :return: + [x] Sampled points + [rej_x]? Obtained points after rejection. + """ + out = [] + cnt = 0 + with jittor.no_grad(): + while cnt < npoints: + x = jittor.rand(1, batch_size, dim).cuda().float() * 2 - 1 + y = jittor.abs(net(x)) + m = (y < thr).view(1, batch_size) + m_cnt = m.sum().detach().cpu().item() + if m_cnt < 1: + continue + x_eq = x[m].view(m_cnt, dim) + out.append(x_eq) + cnt += m_cnt + rej_x = x = jittor.cat(out, dim=0)[:npoints, :] + + if x.is_leaf: + x.requires_grad = True + else: + x.retain_grad() + y = net(x) + g = gradient(y, x).view(npoints, dim).detach() + g = g / g.norm(dim=-1, keepdim=True) + x = x - g * y + if return_rej_x: + return x, rej_x + return x + + +def get_surf_pcl_langevin_dynamic( + net, npoints, dim, steps=5, eps=1e-4, + noise_sigma=0.01, filtered=True, sigma_decay=1., + max_repeat=10, bound=(1 - 1e-4)): + out_cnt = 0 + out = None + already_repeated = 0 + while out_cnt < npoints and already_repeated < max_repeat: + already_repeated += 1 + x = jittor.rand(npoints, dim).cuda().float() * 2 - 1 + for i in range(steps): + sigma_i = noise_sigma * sigma_decay ** i + x = x.detach() + jittor.randn_like(x).to(x) * sigma_i + x.requires_grad = True + y = net(x) + if jittor.allclose(y, jittor.zeros_like(y)): + break + + g = gradient(y, x).view(npoints, dim).detach() + g = g / (g.norm(dim=-1, keepdim=True) + eps) + x = jittor.clamp(x - g * y, min_v=-bound, max_v=bound) + + if filtered: + with jittor.no_grad(): + y = net(x) + mask = (jittor.abs(y) < eps).view(-1, 1) + x = x.view(-1, dim).masked_select(mask).view(-1, dim) + out_cnt += x.shape[0] + if out is None: + out = x + else: + out = jittor.cat([x, out], dim=0) + else: + out = x + out_cnt = npoints + out = out[:npoints, :] + return out + + +def tangential_projection_matrix(y, x, norm=True, eps=1e-6): + """ + Compute the tangential projection matrix: + P = I - n(x)n(x)^T + where n(x) is the outward surface normal of x + :param x: (bs, npts, dim) input points + :param y: (bs, npts, 1) neural_field(x) + :param norm: Whether normalize the surface normal vector + :param eps: Numerical eps + :return: + [normals] (bs, npts, dim) The surface normal + [normals_proj] (bs, npts, dim, dim) The projector matrices + """ + bs, npoints, dim = x.size(0), x.size(1), x.size(2) + grad = gradient(y, x) + if norm: + normals = ( + grad / (grad.norm(dim=-1, keepdim=True) + eps) + ).view(bs, npoints, dim) + else: + normals = grad.view(bs, npoints, dim) + normals_proj = _addr_( + jittor.eye(dim).view(1, 1, dim, dim).expand(bs, npoints, -1, -1).to(y), + normals, normals, alpha=-1 + ) + return normals, normals_proj + + +def compute_invert_weight( + x, deform, inp_nf, out_nf, surface=False, normalize=True): + """ + Computing the weight in Section 5.3.3. + + :param x: (bs, npts, dim) Points from the output space + :param deform: Network that maps output space to input space + :param inp_nf: Neural fields of the input space + :param out_nf: Neural fields of the output space + :param surface: Whether the inverse is for surface integral. + :param normalize: + :return: + """ + bs, npoints, dim = x.size(0), x.size(1), x.size(2) + x = x.clone().detach() + x.requires_grad = True + y = deform(x).view(bs, npoints, dim) + J, status = jacobian(y, x) + assert status == 0 + + if surface: + # Find the change of area along the tangential plane + yn, yn_proj = tangential_projection_matrix(inp_nf(y), y) + xn, xn_proj = tangential_projection_matrix(out_nf(x), x) + + J = jittor.bmm( + J.view(-1, dim, dim), + xn_proj.view(-1, dim, dim) + ) + J = _addr_(J.view(bs, npoints, dim, dim), + yn.view(bs, npoints, dim), + xn.view(bs, npoints, dim)) + + weight = jittor.abs(jittor.linalg.det(J.view(bs * npoints, dim, dim))) + if int(dim) == 3: + weight = weight ** 2 + weight = 1. / weight.view(bs, npoints) + + if normalize: + weight = weight / weight.sum(dim=-1, keepdim=True) * npoints + + return weight + + +def sample_points( + npoints, dim=3, sample_surf_points=False, + inp_nf=None, out_nf=None, deform=None, + invert_sampling=False, + detach_weight=True, use_rejection=False): + """ + Sample points from the neural fields: inp_nf, out_nf, and deform. + + :param npoints: Number of points to sample. + :param dim: Dimension of the points. + :param sample_surf_points: + :param inp_nf: Input neural fields. F: (bs, npts, dim) -> (bs, npts, 1) + :param out_nf: Output neural fields. G: (bs, npts, dim) -> (bs, npts, 1) + :param deform: Neural fields that deofrm from output space to input space. + (bs, npts, dim) -> (bs, npts, dim) + :param invert_sampling: Whether sample from [inp_nf] then invert the points + through the [deform] to become samples of [out_nf] + :param detach_weight: Whether detach the weights. + :param use_rejection: Whether use rejection to sample. + :return: + [x] (1, npoints, dim) Sampled points on the surface of [out_nf](x) = 0. + [weights] the weights for inverting the surface intergral. + """ + if sample_surf_points: + if invert_sampling: + assert deform is not None + assert inp_nf is not None + y = get_surf_pcl( + inp_nf, npoints=npoints, dim=dim, use_rejection=use_rejection) + x = deform.invert(y, iters=30).detach().cuda().float() + + weight = compute_invert_weight( + x.view(1, -1, dim), + deform=deform, inp_nf=inp_nf, out_nf=out_nf, surface=True) + else: + assert out_nf is not None + x = get_surf_pcl( + out_nf, npoints=npoints, dim=dim, use_rejection=use_rejection + ).detach().cuda().float() + weight = jittor.ones(1, npoints).cuda().float() + else: + x = jittor.rand(1, npoints, dim).cuda().float() * 2 - 1 + weight = jittor.ones(1, npoints).cuda().float() + if invert_sampling: + assert deform is not None + y = x + x = deform.invert(y, iters=30).detach().cuda().float() + weight = compute_invert_weight( + x.view(1, -1, dim), + deform=deform, inp_nf=inp_nf, out_nf=out_nf, surface=False) + + x = x.view(1, npoints, dim) + weight = weight.view(1, npoints) + if detach_weight: + weight = weight.detach() + return x, weight diff --git a/contrib/HSDF-Net/trainers/utils/o3d_deformation.py b/contrib/HSDF-Net/trainers/utils/o3d_deformation.py new file mode 100644 index 00000000..6d31e2e6 --- /dev/null +++ b/contrib/HSDF-Net/trainers/utils/o3d_deformation.py @@ -0,0 +1,83 @@ +import trimesh +import numpy as np +import open3d as o3d +from trainers.utils.vis_utils import imf2mesh + + +def trimesh_to_o3dmesh(mesh): + o3d_mesh = o3d.geometry.TriangleMesh( + vertices=o3d.utility.Vector3dVector(np.array(mesh.vertices)), + triangles=o3d.utility.Vector3iVector(np.array(mesh.faces)) + ) + o3d_mesh.compute_vertex_normals() + return o3d_mesh + + +def o3dmesh_to_trimesh(mesh): + mesh = trimesh.Trimesh( + vertices=np.asarray(mesh.vertices).reshape(-1, 3).astype(np.float), + faces=np.asarray(mesh.triangles).reshape(-1, 3).astype(np.int) + ) + return mesh + + +def deform_mesh_o3d(imf, handles, targets, normalize=True, res=256, + imf_mesh=None, steps=50, smoothed_alpha=0.01, verbose=True): + """ + Use Open3D to do deformation + Args: + [imf] + [handles] (n, 3) Source points. + [targets] (n, 3) Target points. + [normalize] Whether normalize the mesh to unit sphere. Default (True). + [res] Resolution for MC. Default (256). + Returns: + """ + if imf_mesh is None: + mesh = imf2mesh(imf, res=res, threshold=0.00) + + if normalize: + verts = (mesh.vertices * 2 - res) / float(res) + mesh = trimesh.Trimesh(vertices=verts, faces=mesh.faces) + else: + mesh = imf_mesh + + vertices = np.asarray(mesh.vertices).reshape(-1, 3) + vert_ids = [] + vert_pos = [] + for i in range(handles.reshape(-1, 3).shape[0]): + dist = np.linalg.norm( + vertices - handles[i, :].reshape(1, 3), axis=-1 + ).flatten() + handle_idx = np.argmin(dist) + vert_ids.append(handle_idx) + vert_pos.append( + vertices[handle_idx].reshape(3) + targets[i].reshape(3) - + handles[i].reshape(3)) + + constraint_ids = o3d.utility.IntVector(vert_ids) + constraint_pos = o3d.utility.Vector3dVector(vert_pos) + o3d_vert0 = o3d.utility.Vector3dVector(mesh.vertices) + o3d_face0 = o3d.utility.Vector3iVector(mesh.faces) + o3d_mesh0 = o3d.geometry.TriangleMesh( + vertices=o3d_vert0, triangles=o3d_face0) + o3d_mesh0.compute_vertex_normals() + + with o3d.utility.VerbosityContextManager( + o3d.utility.VerbosityLevel.Debug) as cm: + if smoothed_alpha > 0: + print("Smoothing alphas:", smoothed_alpha, "Use Smoothed Energy") + mesh_deformed = o3d_mesh0.deform_as_rigid_as_possible( + constraint_ids, constraint_pos, max_iter=steps, + smoothed_alpha=smoothed_alpha, + energy=o3d.geometry.DeformAsRigidAsPossibleEnergy.Smoothed) + else: + print("Smoothing alphas:", smoothed_alpha, "Use Spokes Energy") + mesh_deformed = o3d_mesh0.deform_as_rigid_as_possible( + constraint_ids, constraint_pos, max_iter=steps, + smoothed_alpha=0, + energy=o3d.geometry.DeformAsRigidAsPossibleEnergy.Spokes) + + return o3dmesh_to_trimesh(mesh_deformed) + + diff --git a/contrib/HSDF-Net/trainers/utils/utils.py b/contrib/HSDF-Net/trainers/utils/utils.py new file mode 100644 index 00000000..2fdd9143 --- /dev/null +++ b/contrib/HSDF-Net/trainers/utils/utils.py @@ -0,0 +1,72 @@ +#import torch +import jittor +import random +import numpy as np +from jittor import optim + + +def get_opt(params, cfgopt, overwrite_lr=None): + if overwrite_lr is not None: + lr = float(overwrite_lr) + else: + lr = float(cfgopt.lr) + if cfgopt.type == 'adam': + optimizer = optim.Adam(params, lr=lr, + betas=(cfgopt.beta1, cfgopt.beta2), + weight_decay=float(cfgopt.weight_decay)) + elif cfgopt.type == 'sgd': + optimizer = jittor.optim.SGD(params, lr=lr, momentum=cfgopt.momentum) + else: + assert 0, "Optimizer type should be either 'adam' or 'sgd'" + + scheduler = None + scheduler_type = getattr(cfgopt, "scheduler", None) + if scheduler_type is not None: + if scheduler_type == 'exponential': + decay = float(getattr(cfgopt, "step_decay", 0.1)) + scheduler = optim.lr_scheduler.ExponentialLR(optimizer, decay) + elif scheduler_type == 'step': + step_size = int(getattr(cfgopt, "step_epoch", 500)) + decay = float(getattr(cfgopt, "step_decay", 0.1)) + scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=decay) + elif scheduler_type == 'linear': + step_size = int(getattr(cfgopt, "step_epoch", 2000)) + final_ratio = float(getattr(cfgopt, "final_ratio", 0.01)) + start_ratio = float(getattr(cfgopt, "start_ratio", 0.5)) + duration_ratio = float(getattr(cfgopt, "duration_ratio", 0.45)) + + def lambda_rule(ep): + lr_l = 1.0 - min(1, max(0, ep - start_ratio * step_size) / float(duration_ratio * step_size)) * (1 - final_ratio) + return lr_l + + scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) + + elif scheduler_type == 'cosine_anneal_nocycle': + final_lr_ratio = float(getattr(cfgopt, "final_lr_ratio", 0.01)) + eta_min = float(lr) * final_lr_ratio + eta_max = float(lr) + + total_epoch = int(getattr(cfgopt, "step_epoch", 2000)) + start_ratio = float(getattr(cfgopt, "start_ratio", 0.2)) + T_max = total_epoch * (1 - start_ratio) + + def lambda_rule(ep): + curr_ep = max(0., ep - start_ratio * total_epoch) + lr_l = eta_min + 0.5 * (eta_max - eta_min) * (1 + np.cos(np.pi * curr_ep / T_max)) + lr_l = lr_l / eta_max + return lr_l + + scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) + + else: + assert 0, "args.schedulers should be either 'exponential' or 'linear' or 'step'" + return optimizer, scheduler + + +def set_random_seed(seed): + """set random seed""" + random.seed(seed) + np.random.seed(seed) + jittor.manual_seed(seed) + jittor.cuda.manual_seed(seed) + jittor.cuda.manual_seed_all(seed) diff --git a/contrib/HSDF-Net/trainers/utils/vis_utils.py b/contrib/HSDF-Net/trainers/utils/vis_utils.py new file mode 100644 index 00000000..2fd1d4d3 --- /dev/null +++ b/contrib/HSDF-Net/trainers/utils/vis_utils.py @@ -0,0 +1,137 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tqdm +#import torch +import jittor +import trimesh +import skimage +import numpy as np +import skimage.measure + + +def imf2mesh(imf, res=256, threshold=0.0, batch_size = 10000, verbose=True, + use_double=False, normalize=False, norm_type='res', + return_stats=False, bound=1.): + xs, ys, zs = np.meshgrid(np.arange(res), np.arange(res), np.arange(res)) + grid = np.concatenate([ + ys[..., np.newaxis], + xs[..., np.newaxis], + zs[..., np.newaxis] + ], axis=-1).astype(np.float) + grid = (grid / float(res - 1) - 0.5) * 2 * bound + grid = grid.reshape(-1, 3) + # Grid will be [-1, 1] * bound + + dists_lst = [] + pbar = range(0, grid.shape[0], batch_size) + if verbose: + pbar = tqdm.tqdm(pbar) + for i in pbar: + sidx, eidx = i, i + batch_size + eidx = min(grid.shape[0], eidx) + with jittor.no_grad(): + xyz = jittor.Var( + grid[sidx:eidx, :]).float().cuda().view(1, -1, 3) + if use_double: + xyz = xyz.double() + distances = imf(xyz) + distances = distances.cpu().numpy() + dists_lst.append(distances.reshape(-1)) + + dists = np.concatenate( + [x.reshape(-1, 1) for x in dists_lst], axis=0).reshape(-1) + field = dists.reshape(res, res, res) + try: + vert, face, _, _ = skimage.measure.marching_cubes( + field, level=threshold) + print(vert.max(), vert.min()) + # Vertices will be [0, res - 1] + + if normalize: + if norm_type == 'norm': + center = vert.mean(axis=0).view(1, -1) + vert_c = vert - center + length = np.linalg.norm(vert_c, axis=-1).max() + vert = vert_c / length + elif norm_type == 'res': + vert = (vert / float(res - 1) - 0.5) * 2 * bound + else: + raise ValueError + new_mesh = trimesh.Trimesh(vertices=vert, faces=face) + except ValueError as e: + print(field.max(), field.min()) + print(e) + new_mesh = None + except RuntimeError as e: + print(field.max(), field.min()) + print(e) + new_mesh = None + + if return_stats: + if new_mesh is not None: + area = new_mesh.area + vol = (field < threshold).astype(np.float).mean() * (2 * bound) ** 3 + else: + area = 0 + vol = 0 + return new_mesh, { + 'vol': vol, + 'area': area + } + + return new_mesh + + +def make_2d_grid(r, add_noise=False): + xs, ys = jittor.meshgrid(jittor.arange(r), jittor.arange(r)) + xy = jittor.cat([ys.reshape(-1, 1), xs.reshape(-1, 1)], dim=-1).float() + if add_noise: + xy += jittor.rand_like(xy) + else: + xy += 0.5 + xy = (xy / float(r) - 0.5) * 2 + return xy + + +def imf2img(imf, res=256, add_noise=False, batch_size=10000, threshold=0., + verbose=False, grid=None, return_stats=False, bound=1): + if grid is None: + grid = make_2d_grid(res, add_noise=add_noise).view(-1, 2) + dists_lst = [] + pbar = range(0, grid.shape[0], batch_size) + if verbose: + pbar = tqdm.tqdm(pbar) + for i in pbar: + sidx, eidx = i, i + batch_size + eidx = min(grid.shape[0], eidx) + with jittor.no_grad(): + xyz = grid[sidx:eidx, :].cuda().view(1, -1, 2) + n = xyz.size(1) + distances = imf(xyz) + distances = distances.cpu().numpy() + dists_lst.append(distances.reshape(n, -1)) + dists = np.concatenate( + [x for x in dists_lst], axis=0) + img = dists.reshape(res, res, -1) + if return_stats: + area = (img < threshold).astype(np.float).mean() * 2 ** 2 + contours = skimage.measure.find_contours( + img.reshape(res, res), level=threshold) + total_length = 0 + for vert in contours: + n_v_c = vert.shape[0] + n_v_c_idx = np.array( + (np.arange(n_v_c).astype(np.int) + 1) % n_v_c).astype(np.int) + v_next = vert[n_v_c_idx, :] + v_next = v_next.reshape(n_v_c, 2) + diff = (vert - v_next) / float(res) + dist = np.linalg.norm(diff, axis=-1).sum() + total_length += dist + return img, { + 'area' : area, + 'len': total_length, + 'contours': contours + } + return img diff --git a/contrib/HSDF-Net/utils.py b/contrib/HSDF-Net/utils.py new file mode 100644 index 00000000..b6af758a --- /dev/null +++ b/contrib/HSDF-Net/utils.py @@ -0,0 +1,314 @@ +from posixpath import dirname +import numpy as np +import sys +import os +from subprocess import call +import shutil +#from torch.import typename +import multiprocessing as mp +from multiprocessing import Pool +from functools import partial +#import torch +import jittor + +#from torch.nn.parallel.data_parallel import DataParallel +import configs.config_loader as cfg_loader +from glob import glob + +#import torch.distributed as dist +import pymeshlab as ml +import trimesh +from scipy.spatial import cKDTree as KDTree +import time + +#from mesh_to_sdf import sample_sdf_near_surface, mesh_to_voxels, mesh_to_sdf +#from mesh_to_sdf.utils import get_raster_points +from numpy.core.einsumfunc import einsum_path +from numpy.lib.twodim_base import mask_indices + +import trimesh +import pyrender +import numpy as np +from trimesh import points +#import igl +from skimage.measure import marching_cubes +#import torch +import os +#import igl + +# this is mostly from https://github.com/chrischoy/3D-R2N2/blob/master/lib/voxel.py +# though I sped up the voxel2mesh function considerably, now only surface voxels are saved +# this is only really important for very large models + +MGN_TYPE = [ + 'Pants', + 'ShortPants', + 'LongCoat', + 'ShirtNoCoat', + 'TShirtNoCoat'] + + +def voxel2mesh(voxels, threshold=.3): + cube_verts = [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], + [1, 1, 1]] # 8 points + + cube_faces = [[0, 1, 2], [1, 3, 2], [2, 3, 6], [3, 7, 6], [0, 2, 6], [0, 6, 4], [0, 5, 1], + [0, 4, 5], [6, 7, 5], [6, 5, 4], [1, 7, 3], [1, 5, 7]] # 12 face + + cube_verts = np.array(cube_verts) + cube_faces = np.array(cube_faces) + 1 + + l, m, n = voxels.shape + + scale = 0.01 + cube_dist_scale = 1.1 + verts = [] + faces = [] + curr_vert = 0 + + positions = np.where(voxels > threshold) # recieves position of all voxels + offpositions = np.where(voxels < threshold) # recieves position of all voxels + voxels[positions] = 1 # sets all voxels values to 1 + voxels[offpositions] = 0 + for i,j,k in zip(*positions): + if np.sum(voxels[i-1:i+2,j-1:j+2,k-1:k+2])< 27 : #identifies if current voxels has an exposed face + verts.extend(scale * (cube_verts + cube_dist_scale * np.array([[i, j, k]]))) + faces.extend(cube_faces + curr_vert) + curr_vert += len(cube_verts) + return np.array(verts), np.array(faces) + + +def write_obj(filename, verts, faces): + """ write the verts and faces on file.""" + with open(filename, 'w') as f: + # write vertices + f.write('g\n# %d vertex\n' % len(verts)) + for vert in verts: + f.write('v %f %f %f\n' % tuple(vert)) + + # write faces + f.write('# %d faces\n' % len(faces)) + for face in faces: + f.write('f %d %d %d\n' % tuple(face)) + + +def voxel2obj(filename, pred, threshold=.3): + verts, faces = voxel2mesh(pred, threshold ) + write_obj(filename, verts, faces) + + +# arange shapenet_improved dataset into folders +def preprocess_shapenet(data_path, obj_name='model.obj'): + file_list = os.listdir(data_path) + for obj_path in file_list: + dir_name = os.path.join(data_path, os.path.splitext(obj_path)[0]) + os.mkdir(dir_name) + shutil.move(os.path.join(data_path, obj_path), dir_name) + os.rename(os.path.join(dir_name, obj_path), os.path.join(dir_name, obj_name)) + + print('{} moved!'.format(obj_path)) + +def preprocess_mgn(data_path, obj_name='model.obj'): + file_list = os.listdir(data_path) + for obj_dir in file_list: + for type_name in MGN_TYPE: + type_path = os.path.join(data_path, obj_dir, type_name + '.obj') + if os.path.exists(type_path): + dir_name = os.path.join(data_path, obj_dir + '_' + type_name) + os.mkdir(dir_name) + shutil.move(type_path, dir_name) + os.rename(os.path.join(dir_name, type_name + '.obj'), os.path.join(dir_name, obj_name)) + + print('{} moved!'.format(dir_name)) + + shutil.rmtree(os.path.join(data_path, obj_dir)) + +def preprocess_mixamo(data_path, obj_name='model.obj'): + dir_list = os.listdir(data_path) + for dir_name in dir_list: + os.rename(os.path.join(data_path, dir_name, dir_name+'.obj'), os.path.join(data_path, dir_name, obj_name)) + +# fix npz files +def fix_npz(path): + dir_list = os.listdir(path) + bad_file_list = [] + for dir_name in dir_list: + file_list = os.listdir(os.path.join(path, dir_name)) + for file_name in file_list: + if '.npz' in file_name: + try: + np.load(os.path.join(path,dir_name,file_name), allow_pickle=True) + except: + print('bad file: {}'.format(file_name)) + bad_file_list.append(file_name) + + return bad_file_list + +def fix_npz_mp(file_path): + try: + np.load(file_path, allow_pickle=True) + except: + print('bad file: {}'.format(file_path)) + os.remove(file_path) +''' +def reduce_tensor(tensor): + rt = tensor.clone() + dist.all_reduce(rt, op=dist.ReduceOp.SUM) + rt /= dist.get_world_size() + return rt +''' +def optimizer_to(optim, device): + for param in optim.state.values(): + # Not sure there are any global tensors in the state dict + if isinstance(param, jittor.Var): + param.data = param.data.to(device) + if param._grad is not None: + param._grad.data = param._grad.data.to(device) + elif isinstance(param, dict): + for subparam in param.values(): + if isinstance(subparam, jittor.Var): + subparam.data = subparam.data.to(device) + if subparam._grad is not None: + subparam._grad.data = subparam._grad.data.to(device) + +def add_tail(name_list, tail='_old'): + ret_list = [] + for name in name_list: + ret_list.append(os.path.join(os.path.dirname(name)+tail, os.path.basename(name))) + + return ret_list + +def modify_npz(npz_path): + split = np.load(npz_path) + np.savez(npz_path, train=add_tail(split['train']), test=add_tail(split['test']), val=add_tail(split['val'])) + + +def pc2mesh(data_dir): + name_list = os.listdir(data_dir) + + for name in name_list: + + target_path = os.path.join(data_dir, name, 'dense_point_cloud_7_bpa.obj') + if os.path.exists(target_path): + print('{} exsits, skip!'.format(name)) + continue + + print('processing {}'.format(name)) + start = time.time() + path = os.path.join(data_dir, name, 'dense_point_cloud_7_pc.off') + + ms = ml.MeshSet() + ms.load_new_mesh(path) + ms.load_filter_script('ndf_postprocess.mlx') + ms.apply_filter_script() + + ms.save_current_mesh(os.path.join(data_dir, name, 'dense_point_cloud_7_bpa.obj')) + + duration = time.time() - start + + print('duration {}'.format(duration)) + +def preprocess_watertight(data_dir, src_dir): + name_list = os.listdir(data_dir) + + for name in name_list: + + target_path = os.path.join(src_dir, name, 'model_wt.obj') + if os.path.exists(target_path): + print('{} exsits, skip!'.format(name)) + continue + + print('processing {}'.format(name)) + start = time.time() + path = os.path.join(src_dir, name, 'model_scaled.off') + + ''' + ms = ml.MeshSet() + ms.load_new_mesh(path) + ms.load_filter_script('ndf_postprocess.mlx') + ms.apply_filter_script() + + ms.save_current_mesh(os.path.join(data_dir, name, 'dense_point_cloud_7_bpa.obj')) + ''' + + voxel_resolution = 256 + + mesh = trimesh.load(path) + + points = get_raster_points(voxel_resolution=voxel_resolution) + + sdf = igl.signed_distance(points, mesh.vertices, mesh.faces)[0] + + sdf = sdf.reshape([voxel_resolution]*3) + + verts, faces, norms, vals = marching_cubes(sdf, 0) + + trimesh.Trimesh(vertices=verts, faces=faces).export(target_path) + + duration = time.time() - start + + print('duration {}'.format(duration)) + +def create_grid_points_from_bounds(minimun, maximum, res): + x = np.linspace(minimun, maximum, res) + X, Y, Z = np.meshgrid(x, x, x, indexing='ij') + X = X.reshape((np.prod(X.shape),)) + Y = Y.reshape((np.prod(Y.shape),)) + Z = Z.reshape((np.prod(Z.shape),)) + + points_list = np.column_stack((X, Y, Z)) + del X, Y, Z, x + return points_list + +if __name__=='__main__': + #modify_npz('datasets/shapenet_improved/data/split_shapenet_cars_chen_old.npz') + #preprocess_shapenet('datasets/shapenet_improved/data/03001627') + #preprocess_mgn('datasets/MGN/data/0') + #preprocess_mixamo('datasets/mixamo_data/data/0') + + preprocess_watertight('experiments/shapenet_lamp_chen_apex_148_3000/evaluation/generation/03636649', 'datasets/shapenet_improved/data/03636649') + #pc2mesh('experiments/shapenet_chairs_chen_apex_148_3000/evaluation/generation/03001627') + + ''' + grid_points = create_grid_points_from_bounds(-0.5, 0.5, 64) + kdtree = KDTree(grid_points) + + mesh = trimesh.load('datasets/shapenet_improved/data/03001627/113016635d554d5171fb733891076ecf/model_scaled.off') + point_cloud = mesh.sample(3000) + + occupancies = np.zeros(len(grid_points), dtype=np.int8) + + _, idx = kdtree.query(point_cloud) + occupancies[idx] = 1 + + #npz_file = np.load('datasets/shapenet_improved/data/03001627/113016635d554d5171fb733891076ecf/voxelized_point_cloud_256res_3000points.npz') + #occupancies = np.unpackbits(npz_file['compressed_occupancies']) + voxel2obj('chair.obj', np.reshape(occupancies, (64,)*3)) + ''' + + ''' + cfg = cfg_loader.get_config() + + paths = glob( cfg.data_dir + '/*/*.npz') + print(len(paths)) + + paths = sorted(paths) + + chunks = np.array_split(paths,cfg.num_chunks) + paths = chunks[cfg.current_chunk] + + + if cfg.num_cpus == -1: + num_cpus = mp.cpu_count() + print('cpu count: {}'.format(num_cpus)) + else: + num_cpus = cfg.num_cpus + + def multiprocess(func): + p = Pool(num_cpus) + p.map(func, paths) + p.close() + p.join() + + multiprocess(fix_npz_mp) + ''' diff --git a/contrib/NeRF-Editing/LICENSE b/contrib/NeRF-Editing/LICENSE new file mode 100644 index 00000000..f288702d --- /dev/null +++ b/contrib/NeRF-Editing/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/contrib/NeRF-Editing/OpenVolumeMesh/CMakeLists.txt b/contrib/NeRF-Editing/OpenVolumeMesh/CMakeLists.txt new file mode 100644 index 00000000..02a36a3b --- /dev/null +++ b/contrib/NeRF-Editing/OpenVolumeMesh/CMakeLists.txt @@ -0,0 +1,35 @@ +cmake_minimum_required(VERSION 3.12 FATAL_ERROR) +cmake_policy(VERSION 3.12.0) +project(OpenVolumeMeshExamples) + +if (NOT TARGET OpenVolumeMesh::OpenVolumeMesh) + find_package(OpenVolumeMesh REQUIRED) +endif() + +# Add target for first example +add_executable(simple_mesh simple_mesh/simple_mesh.cpp) + +target_link_libraries(simple_mesh OpenVolumeMesh::OpenVolumeMesh) + +set(OVM_CXX_STANDARD 11 CACHE STRING "C++ standard version to use") +set_property(CACHE OVM_CXX_STANDARD PROPERTY STRINGS 11 14 17 20) + +set_target_properties (simple_mesh PROPERTIES + CXX_STANDARD ${OVM_CXX_STANDARD} + CXX_STANDARD_REQUIRED YES + CXX_EXTENSIONS NO +) + +#[[= +if(WIN32) + # copy exe file to "Build" directory + # Visual studio will create this file in a subdirectory so we can't use + # RUNTIME_OUTPUT_DIRECTORY directly here + add_custom_command (TARGET simple_mesh POST_BUILD + COMMAND ${CMAKE_COMMAND} -E + copy_if_different + ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/simple_mesh.exe + ${CMAKE_BINARY_DIR}/Examples/simple_mesh.exe) +else() +endif() +=]] diff --git a/contrib/NeRF-Editing/OpenVolumeMesh/simple_mesh/simple_mesh.cc b/contrib/NeRF-Editing/OpenVolumeMesh/simple_mesh/simple_mesh.cc new file mode 100644 index 00000000..46e02db2 --- /dev/null +++ b/contrib/NeRF-Editing/OpenVolumeMesh/simple_mesh/simple_mesh.cc @@ -0,0 +1,149 @@ +// C++ includes +#include +#include +#include +#include +#include + +// Include vector classes +#include + +// Include polyhedral mesh kernel +#include + +#include + +// Make some typedefs to facilitate your life +typedef OpenVolumeMesh::Geometry::Vec3f Vec3f; +typedef OpenVolumeMesh::GeometryKernel PolyhedralMeshV3f; + +// verts [NV,3]; simplicies [NC,4] +void readVerts(std::string file_path, std::vector>& verts, std::vector>& cells) { + std::ifstream myfile (file_path); + if (myfile.is_open()) { + int Num; + myfile >> Num; // read verts + for (int idx = 0; idx < Num; idx++) { + float v1, v2, v3; myfile >> v1; myfile>> v2; myfile >> v3; + std::vector cur = {v1, v2, v3}; + verts.push_back(cur); + } + myfile >> Num; // read cells + for (int idx = 0; idx < Num; idx++) { + int id1, id2, id3, id4; + myfile >> id1; myfile >> id2; myfile >> id3; myfile >> id4; + std::vector cur = {id1, id2, id3, id4}; + cells.push_back(cur); + } + myfile.close(); + } + + else std::cout << "Unable to open file \n"; +} + +int getHalfFaceID(std::vector& vertsPerCell) { + // determine the face[v1/v2/v3] ID wrt. v4 + Vec3f v01 = vertsPerCell[1] - vertsPerCell[0]; + Vec3f v02 = vertsPerCell[2] - vertsPerCell[0]; + Vec3f v03 = vertsPerCell[3] - vertsPerCell[0]; + + // for (Vec3f v : vertsPerCell) std::cout << v << ", "; + // std::cout << std::endl; + + if (v01.cross(v02).dot(v03) > 0) return 0; + else return 1; +} + +int main(int _argc, char** _argv) { + + + std::string input_file = _argv[1]; + std::string save_name = _argv[2]; + std::vector> verts; + std::vector> cells; + readVerts(input_file, verts, cells); + + // Create mesh object + PolyhedralMeshV3f myMesh; + + // add verts + std::vector vert_handles; + for (std::vector cur : verts) { + OpenVolumeMesh::VertexHandle vh = myMesh.add_vertex(Vec3f(cur[0], cur[1], cur[2])); + vert_handles.push_back(vh); + } + + // add faces + // to avoid duplicate faces, use hash to check + std::unordered_map fh_hash; + std::vector face_handles; + for (std::vector cur_cell : cells) { + std::vector cur_vh; + OpenVolumeMesh::FaceHandle fh; + std::vector halffaces; + std::vector vertsPerCell; + std::string fh_str; + + cur_vh.push_back(vert_handles[cur_cell[0]]); // 0 1 2 + cur_vh.push_back(vert_handles[cur_cell[1]]); + cur_vh.push_back(vert_handles[cur_cell[2]]); + fh_str = std::to_string(cur_cell[0]) + ", " + std::to_string(cur_cell[1]) + ", " + std::to_string(cur_cell[2]); + if (fh_hash.count(fh_str)) fh = fh_hash[fh_str]; + else {fh = myMesh.add_face(cur_vh); fh_hash[fh_str] = fh;} + for (OpenVolumeMesh::FaceVertexIter fv_it = myMesh.fv_iter(fh); fv_it.valid(); fv_it++) { + vertsPerCell.push_back(myMesh.vertex(*fv_it)); + } + vertsPerCell.push_back(myMesh.vertex(vert_handles[cur_cell[3]])); + halffaces.push_back(myMesh.halfface_handle(fh, getHalfFaceID(vertsPerCell))); + + cur_vh.clear(); vertsPerCell.clear(); + cur_vh.push_back(vert_handles[cur_cell[0]]); // 0 1 3 + cur_vh.push_back(vert_handles[cur_cell[1]]); + cur_vh.push_back(vert_handles[cur_cell[3]]); + fh_str = std::to_string(cur_cell[0]) + ", " + std::to_string(cur_cell[1]) + ", " + std::to_string(cur_cell[3]); + if (fh_hash.count(fh_str)) fh = fh_hash[fh_str]; + else {fh = myMesh.add_face(cur_vh); fh_hash[fh_str] = fh;} + for (OpenVolumeMesh::FaceVertexIter fv_it = myMesh.fv_iter(fh); fv_it.valid(); fv_it++) { + vertsPerCell.push_back(myMesh.vertex(*fv_it)); + } + vertsPerCell.push_back(myMesh.vertex(vert_handles[cur_cell[2]])); + halffaces.push_back(myMesh.halfface_handle(fh, getHalfFaceID(vertsPerCell))); + + cur_vh.clear(); vertsPerCell.clear(); + cur_vh.push_back(vert_handles[cur_cell[0]]); // 0 2 3 + cur_vh.push_back(vert_handles[cur_cell[2]]); + cur_vh.push_back(vert_handles[cur_cell[3]]); + fh_str = std::to_string(cur_cell[0]) + ", " + std::to_string(cur_cell[2]) + ", " + std::to_string(cur_cell[3]); + if (fh_hash.count(fh_str)) fh = fh_hash[fh_str]; + else {fh = myMesh.add_face(cur_vh); fh_hash[fh_str] = fh;} + for (OpenVolumeMesh::FaceVertexIter fv_it = myMesh.fv_iter(fh); fv_it.valid(); fv_it++) { + vertsPerCell.push_back(myMesh.vertex(*fv_it)); + } + vertsPerCell.push_back(myMesh.vertex(vert_handles[cur_cell[1]])); + halffaces.push_back(myMesh.halfface_handle(fh, getHalfFaceID(vertsPerCell))); + + cur_vh.clear(); vertsPerCell.clear(); + cur_vh.push_back(vert_handles[cur_cell[1]]); // 1 2 3 + cur_vh.push_back(vert_handles[cur_cell[2]]); + cur_vh.push_back(vert_handles[cur_cell[3]]); + fh_str = std::to_string(cur_cell[1]) + ", " + std::to_string(cur_cell[2]) + ", " + std::to_string(cur_cell[3]); + if (fh_hash.count(fh_str)) fh = fh_hash[fh_str]; + else {fh = myMesh.add_face(cur_vh); fh_hash[fh_str] = fh;} + for (OpenVolumeMesh::FaceVertexIter fv_it = myMesh.fv_iter(fh); fv_it.valid(); fv_it++) { + vertsPerCell.push_back(myMesh.vertex(*fv_it)); + } + vertsPerCell.push_back(myMesh.vertex(vert_handles[cur_cell[0]])); + halffaces.push_back(myMesh.halfface_handle(fh, getHalfFaceID(vertsPerCell))); + + myMesh.add_cell(halffaces); + } + + // Create file manager object + OpenVolumeMesh::IO::FileManager fileManager; + // Store mesh to file "myMesh.ovm" in the current directory + // std::string save_name = "myMesh_debug.ovm"; + fileManager.writeFile(save_name.c_str(), myMesh); + std::cout << "save mesh to " << save_name << std::endl; + + return 0; +} diff --git a/contrib/NeRF-Editing/OpenVolumeMesh/simple_mesh/test.cpp b/contrib/NeRF-Editing/OpenVolumeMesh/simple_mesh/test.cpp new file mode 100644 index 00000000..d9ea57c9 --- /dev/null +++ b/contrib/NeRF-Editing/OpenVolumeMesh/simple_mesh/test.cpp @@ -0,0 +1,90 @@ +// C++ includes +#include +#include + +// Include vector classes +#include + +// Include polyhedral mesh kernel +#include + +// Include the file manager header +#include + +#include // sqrt + +// Make some typedefs to facilitate your life +typedef OpenVolumeMesh::Geometry::Vec3f Vec3f; +typedef OpenVolumeMesh::GeometryKernel PolyhedralMeshV3f; + +float calDihedralAngle(std::vector dihedral) { + // v1, v2 constructs the common edge. v3 and v4 expand the dihedral angle + OpenVolumeMesh::Geometry::Vec3f v1 = dihedral[0]; + OpenVolumeMesh::Geometry::Vec3f v2 = dihedral[1]; + OpenVolumeMesh::Geometry::Vec3f v3 = dihedral[2]; + OpenVolumeMesh::Geometry::Vec3f v4 = dihedral[3]; + + // assume the order: v3 -> v1 -> v2 -> v4 + OpenVolumeMesh::Geometry::Vec3f d1 = v1 - v3; + OpenVolumeMesh::Geometry::Vec3f d2 = v2 - v1; + OpenVolumeMesh::Geometry::Vec3f d3 = v4 - v2; + + OpenVolumeMesh::Geometry::Vec3f n1 = d1.cross(d2); + OpenVolumeMesh::Geometry::Vec3f n2 = d2.cross(d3); + + float cos_angle = n1.dot(n2) / n1.norm() / n2.norm(); + + return cos_angle / sqrt(1 - cos_angle*cos_angle); +} + +int main(int argc, char const *argv[]) +{ + std::string ovm_path = "/mnt/2/sunyangtian/NeRF_Ali/volumeARAP_batch/test_file2/mesh_cage_.ovm"; + OpenVolumeMesh::GeometricPolyhedralMeshV3f myMesh; + OpenVolumeMesh::IO::FileManager fileManager; + fileManager.readFile(ovm_path, myMesh); + + for (int i = 0; i < myMesh.n_vertices(); i++) { + OpenVolumeMesh::VertexHandle vh(i); + for (OpenVolumeMesh::VertexVertexIter vvit=myMesh.vv_iter(vh); vvit.valid(); vvit++) { + OpenVolumeMesh::Geometry::Vec3f pos = myMesh.vertex(*vvit); + std::cout << "vertex index: " << (*vvit).idx() << std::endl; + } + std::cout << "---------------------------------" << std::endl; + for (OpenVolumeMesh::VertexOHalfEdgeIter voheit=myMesh.voh_iter(vh); voheit.valid(); voheit++) { + OpenVolumeMesh::VertexHandle vh2 = myMesh.to_vertex_handle(*voheit); + std::cout << "****** the edge between ****** " << vh.idx() << " and " << vh2.idx() << std::endl; + OpenVolumeMesh::EdgeHandle eh = myMesh.edge_handle(*voheit); + for (OpenVolumeMesh::EdgeCellIter ecit=myMesh.ec_iter(eh); ecit.valid(); ecit++) { + std::cout << "iterate cells ..." << std::endl; + std::vector dihedralAngle; + // vector size is 4. The first and second is the common edge + dihedralAngle.push_back(myMesh.vertex(vh)); + dihedralAngle.push_back(myMesh.vertex(vh2)); + std::cout << "specific verts are: " << vh.idx() << ", " << vh2.idx() << std::endl; + for (OpenVolumeMesh::CellVertexIter cvit=myMesh.cv_iter(*ecit); cvit.valid(); cvit++) { + if (*cvit != vh && *cvit != vh2) { + dihedralAngle.push_back(myMesh.vertex(*cvit)); + std::cout << "verts add " << (*cvit).idx() << std::endl; + } + } + std::cout << "the cot of dihedralAngle is: " << calDihedralAngle(dihedralAngle) << std::endl; + std::cout << "---------------------------------" << std::endl; + } + + } + if (i > 1) + break; + } + + std::cout << "TEST dihetral angle" << std::endl; + OpenVolumeMesh::Geometry::Vec3f v1(0,0,0); + OpenVolumeMesh::Geometry::Vec3f v2(1,0,0); + OpenVolumeMesh::Geometry::Vec3f v3(0,0,1); + OpenVolumeMesh::Geometry::Vec3f v4(0,1,-1); + + std::vector dihedral = {v1,v2,v3,v4}; + std::cout << "the cot of dihedralAngle is: " << calDihedralAngle(dihedral) << std::endl; + + return 0; +} diff --git a/contrib/NeRF-Editing/README.md b/contrib/NeRF-Editing/README.md new file mode 100644 index 00000000..dd56c4d7 --- /dev/null +++ b/contrib/NeRF-Editing/README.md @@ -0,0 +1,161 @@ +# NeRF-Editing: Geometry Editing of Neural Radiance Fields + +![Teaser image](./img/teaser.gif) + +## Abstract + +Implicit neural rendering, especially Neural Radiance Field (NeRF), has shown great potential in novel view synthesis of a scene. However, current NeRF-based methods cannot enable users to perform user-controlled shape deformation in the scene. While existing works have proposed some approaches to modify the radiance field according to the user's constraints, the modification is limited to color editing or object translation and rotation. In this paper, we propose a method that allows users to perform controllable shape deformation on the implicit representation of the scene, and synthesizes the novel view images of the edited scene without re-training the network. Specifically, we establish a correspondence between the extracted explicit mesh representation and the implicit neural representation of the target scene. Users can first utilize well-developed mesh-based deformation methods to deform the mesh representation of the scene. Our method then utilizes user edits from the mesh representation to bend the camera rays by introducing a tetrahedra mesh as a proxy, obtaining the rendering results of the edited scene. Extensive experiments demonstrate that our framework can achieve ideal editing results not only on synthetic data, but also on real scenes captured by users. + +## Environment +* Install [jittor](https://github.com/Jittor/jittor) +
+ Other dependencies (click to expand) + + - opencv_python==4.5.2.52 + - imageio==2.17.0 + - trimesh==3.9.8 + - numpy==1.19.2 + - pyhocon==0.3.57 + - icecream==2.1.0 + - tqdm==4.50.2 + - scipy==1.7.0 + - PyMCubes==0.1.2 + - natsort==8.1.0 + - tensorboardX-2.5 + +
+ +We also use the pyrender to get the depth map. +``` +pip install pyrender +``` + +* Download [OpenVolumeMesh](https://www.graphics.rwth-aachen.de/software/openvolumemesh/download/) to the `OpenVolumeMesh` folder +* Download [Eigen](https://eigen.tuxfamily.org/index.php?title=Main_Page) to the `volumeARAP_batch/Eigen` folder + +## Data preparation + +Suppose the image data is in the `$data_dir/images` folder, we first estimate the camera poses with [colmap](https://github.com/colmap/colmap). Then we process the camera poses with the command +``` +python process_colmap.py $data_dir $data_dir +``` +Finally the data folder looks like +``` +$data_dir +├── colmap_output.txt (colmap output) +├── database.db (colmap output) +├── images ($data_dir/images) +├── intrinsics.txt +├── pose +├── rgb +└── sparse (colmap output) +``` + +We now provide a provide a ready-to-use dataset `hbychair` collected by ourselves in [google drive](https://drive.google.com/drive/folders/1OdHHNxMk9t9cDTZHlMHLSb11tf9LYHtG?usp=sharing), along with the pre-trained model and deformation results. You can put the data into the `data` folder. + +Or you can use [nerf-synthetic dataset](https://drive.google.com/drive/folders/128yBriW1IG_3NJ5Rp7APSTZsJqdJdfc1) directly, see `./confs/wmask_lego.conf` as an example. + +## Technological process + +##### Training +we adopt the training strategy of [NeuS](https://github.com/Totoro97/NeuS). + +``` +python exp_runner.py --mode train --conf ./confs/womask_hbychair.conf --case hbychair_neus +``` + +##### Extract mesh + ``` +python exp_runner.py --mode validate_mesh --conf ./confs/womask_hbychair.conf --case hbychair_neus --is_continue # use latest checkpoint + ``` + *We have provided a simplified mesh `mesh_nofloor_simp.obj`* + +##### Render image before editing +``` +python exp_runner.py --mode circle --conf ./confs/womask_hbychair_render.conf --case hbychair_neus --is_continue --obj_path ./logs/hbychair_wo_mask/mesh_nofloor_simp.obj +``` + +*Note: `obj_path` is optional, which provides better rendering results.* + +##### Construct cage mesh + ``` +python exp_runner.py --mode validate_mesh --conf ./confs/womask_hbychair.conf --case hbychair_neus --is_continue --do_dilation + ``` + *We have provided a cage mesh `mesh_cage_nofloor.obj`* + +##### Construct tetrahedral mesh using [TetWild](https://github.com/Yixin-Hu/TetWild). +``` +./TetWild ../../src/logs/hbychair_wo_mask/mesh_cage_nofloor.obj +``` +*Note that we modify the tetrahedra storage format of Tetwild output. Therefore, please compile the `tetwild` in this repository following the instructions [here](https://github.com/Yixin-Hu/TetWild).* + +##### Change the output to `ovm` format. +``` +./simple_mesh ../../src/logs/hbychair_wo_mask/mesh_cage_nofloor_.txt ../../src/logs/hbychair_wo_mask/mesh_cage_nofloor_.ovm +``` +*`simple_mesh` can be obtained using the `CMakeLists.txt` in the `OpenVolumeMesh` folder.* + +##### Editing + Deform the extracted mesh *with any mesh editing tool*, and put the (sequence) mesh in `$deformed_dir` folder. + + *We have provided a deformed mesh `deformed_mesh.obj` and a folder named as `mesh_seq`* + +##### Propagate editing +Generate the controlpoint.txt to guide the deformation. +``` +python barycentric_control_pts_jittor.py +``` +Note that specify the `mesh_path` (extracted mesh), `tet_path` (tetrahedra mesh) and `deformed_dir`(deformed mesh sequence) first. + +And the format of controlpoint.txt is listed below. + +``` +10 (Number of sequence) +N (Num of control points) +x1 y1 z1 +x2 y2 z2 +... +N (Num of control points) +x1 y1 z1 +x2 y2 z2 +... +. +. +. +N (Num of barycentric coordinate) +id1 id2 id3 id4 (vert index of this tet) +u1 v1 w1 z1 +id1' id2' id3' id4' +u2 v2 w2 z2 +... +``` +Compile the `volumeARAP_batch` project to obtain `volumeARAP`, and deform the tetrehedra mesh. +``` +./volumeARAP ../../src/logs/hbychair_wo_mask/mesh_cage_nofloor_.ovm ../../src/logs/hbychair_wo_mask/mesh_seq/2_barycentric_control.txt ../../src/logs/hbychair_wo_mask/mesh_seq_ovm 0 +``` +##### Rendering after editing +``` +python exp_runner.py --mode circle --conf ./confs/womask_hbychair_render.conf --case hbychair_neus --is_continue --use_deform --reconstructed_mesh_file ./logs/hbychair_wo_mask/mesh_cage_nofloor_.txt --deformed_mesh_file ./logs/hbychair_wo_mask/mesh_seq_ovm/arap_result_0000_.ovm --obj_path ./logs/hbychair_wo_mask/deformed_mesh.obj +``` + +* fix camera (generate sequential editing results in a fixed camera) +``` +python exp_runner.py --mode circle --conf ./confs/womask_hbychair_render.conf --case hbychair_neus --is_continue --use_deform --reconstructed_mesh_file ./logs/hbychair_wo_mask/mesh_cage_nofloor_.txt --deformed_mesh_file ./logs/hbychair_wo_mask/mesh_seq_ovm/arap_result_0000_.ovm --obj_path ./logs/hbychair_wo_mask/deformed_mesh.obj --fix_camera +``` + +## Acknowledgement +This code borrows heavily from [NeuS](https://github.com/Totoro97/NeuS). + +## Citation + +If you found this code useful please cite our work as: + +``` +@inproceedings{yuan2022nerf, + title={NeRF-editing: geometry editing of neural radiance fields}, + author={Yuan, Yu-Jie and Sun, Yang-Tian and Lai, Yu-Kun and Ma, Yuewen and Jia, Rongfei and Gao, Lin}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={18353--18364}, + year={2022} +} +``` diff --git a/contrib/NeRF-Editing/TetWild/CMakeLists.txt b/contrib/NeRF-Editing/TetWild/CMakeLists.txt new file mode 100644 index 00000000..c0b90cb5 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/CMakeLists.txt @@ -0,0 +1,177 @@ +cmake_minimum_required(VERSION 3.3) +list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake) + +################################################################################ + + +################################################################################ +# Options +################################################################################ +# tetwild +option(TETWILD_WITH_HUNTER "Use Hunter to download and configure Boost" OFF) +option(TETWILD_WITH_ISPC "Use ISPC" OFF) +# libigl library +option(LIBIGL_USE_STATIC_LIBRARY "Use libigl as static library" OFF) +option(LIBIGL_WITH_ANTTWEAKBAR "Use AntTweakBar" OFF) +option(LIBIGL_WITH_CGAL "Use CGAL" ON) +option(LIBIGL_WITH_COMISO "Use CoMiso" OFF) +option(LIBIGL_WITH_CORK "Use Cork" OFF) +option(LIBIGL_WITH_LIM "Use LIM" OFF) +option(LIBIGL_WITH_MATLAB "Use Matlab" OFF) +option(LIBIGL_WITH_MOSEK "Use MOSEK" OFF) +option(LIBIGL_WITH_PNG "Use PNG" OFF) +option(LIBIGL_WITH_PYTHON "Use Python" OFF) +option(LIBIGL_WITH_TETGEN "Use Tetgen" OFF) +option(LIBIGL_WITH_TRIANGLE "Use Triangle" OFF) +option(LIBIGL_WITH_XML "Use XML" OFF) +# gui +option(LIBIGL_WITH_EMBREE "Use Embree" OFF) +option(LIBIGL_WITH_NANOGUI "Use Nanogui menu" OFF) +option(LIBIGL_WITH_OPENGL "Use OpenGL" OFF) +option(LIBIGL_WITH_OPENGL_GLFW "Use GLFW" OFF) +option(LIBIGL_WITH_VIEWER "Use OpenGL viewer" OFF) +#geogram +option(GEOGRAM_WITH_TRIANGLE "Use Triangle" OFF) + +if(TETWILD_WITH_HUNTER) + # Needs to be set before the main project... argh =/ + include(HunterGate) + HunterGate( + URL "https://github.com/ruslo/hunter/archive/v0.23.25.tar.gz" + SHA1 "cb75cce9a3a8d552e70e7118f3203eb4ac05c201" + ) +endif() + +################################################################################ +# Project name +################################################################################ + +project(TetWild) + +################################################################################ +# Settings +################################################################################ +if(NOT CMAKE_BUILD_TYPE) + message(STATUS "No build type selected, default to Release") + set(CMAKE_BUILD_TYPE "Release") +endif() + +set(TETWILD_EXTERNAL "${CMAKE_CURRENT_SOURCE_DIR}/extern") + +# Color output +include(UseColors) + +# Use folder in Visual Studio +set_property(GLOBAL PROPERTY USE_FOLDERS ON) + +# Extra warnings +include(Warnings) + +# Export compile flags (used for autocompletion of the C++ code) +set(CMAKE_EXPORT_COMPILE_COMMANDS 1) + +# Generate position independent code +set(CMAKE_POSITION_INDEPENDENT_CODE ON) + +################################################################################ +# 3rd party libraries +################################################################################ +include(TetWildDependencies) + +################################################################################ +# TetWild +################################################################################ +# Build static library for executable +add_library(libTetWild + include/tetwild/Args.h + include/tetwild/Exception.h + include/tetwild/Logger.h + include/tetwild/tetwild.h + src/tetwild/BSPSubdivision.cpp + src/tetwild/BSPSubdivision.h + src/tetwild/CGALTypes.h + src/tetwild/Common.cpp + src/tetwild/Common.h + src/tetwild/DelaunayTetrahedralization.cpp + src/tetwild/DelaunayTetrahedralization.h + src/tetwild/DistanceQuery.cpp + src/tetwild/DistanceQuery.h + src/tetwild/EdgeCollapser.cpp + src/tetwild/EdgeCollapser.h + src/tetwild/EdgeRemover.cpp + src/tetwild/EdgeRemover.h + src/tetwild/EdgeSplitter.cpp + src/tetwild/EdgeSplitter.h + src/tetwild/ForwardDecls.h + src/tetwild/InoutFiltering.cpp + src/tetwild/InoutFiltering.h + src/tetwild/LocalOperations.cpp + src/tetwild/LocalOperations.h + src/tetwild/Logger.cpp + src/tetwild/MeshConformer.cpp + src/tetwild/MeshConformer.h + src/tetwild/MeshRefinement.cpp + src/tetwild/MeshRefinement.h + src/tetwild/Preprocess.cpp + src/tetwild/Preprocess.h + src/tetwild/SimpleTetrahedralization.cpp + src/tetwild/SimpleTetrahedralization.h + src/tetwild/State.cpp + src/tetwild/State.h + src/tetwild/TetmeshElements.cpp + src/tetwild/TetmeshElements.h + src/tetwild/tetwild.cpp + src/tetwild/VertexSmoother.cpp + src/tetwild/VertexSmoother.h + src/tetwild/geogram/mesh_AABB.cpp + src/tetwild/geogram/mesh_AABB.h +) +target_include_directories(libTetWild + PRIVATE + src + PUBLIC + include +) +target_link_libraries(libTetWild + PUBLIC + geogram + igl::core + pymesh::pymesh + spdlog::spdlog + PRIVATE + igl::cgal + warnings::all +) +set_target_properties(libTetWild PROPERTIES OUTPUT_NAME "tetwild") + +# ispc +if(TETWILD_WITH_ISPC) + message(STATUS "Compiling energy with ISPC") + add_subdirectory(src/ispc) + ispc_add_energy(libTetWild) +endif() + +# Building executable +add_executable(TetWild src/main.cpp) +target_link_libraries(TetWild + libTetWild + CLI11::CLI11 + igl::cgal + warnings::all +) +target_include_directories(TetWild PRIVATE src) +igl_copy_cgal_dll(TetWild) + +# Install +install(TARGETS TetWild RUNTIME DESTINATION bin) + +################################################################################ +# Folders for Visual Studio/XCode IDEs +################################################################################ + +# geogram +set_target_properties(geogram PROPERTIES FOLDER extern/geogram) +set_target_properties(geogram_third_party PROPERTIES FOLDER extern/geogram) +set_target_properties(uninstall PROPERTIES FOLDER extern/geogram) +# pymesh +set_target_properties(pymesh_tiny PROPERTIES FOLDER extern/pymesh) diff --git a/contrib/NeRF-Editing/TetWild/Dockerfile b/contrib/NeRF-Editing/TetWild/Dockerfile new file mode 100644 index 00000000..11892a4b --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/Dockerfile @@ -0,0 +1,23 @@ +# Use an official Python runtime as a parent image +FROM ubuntu +ENV DEBIAN_FRONTEND=noninteractive + +# Install any needed packages specified in requirements.txt +RUN apt-get update && apt-get install -y git cmake g++ libcgal-dev + +# Set the working directory to /app +WORKDIR /app + +# Download and compile TetWild +RUN git clone https://github.com/Yixin-Hu/TetWild --recursive +WORKDIR /app/TetWild/build +RUN cmake .. && make + +WORKDIR /data + +ENTRYPOINT ["/app/TetWild/build/TetWild"] + +## Create TetWild image with: +# docker build -t tetwild . +## Run TetWild with: +# docker run --rm -v "$(pwd)":/data tetwild [TetWild arguments] diff --git a/contrib/NeRF-Editing/TetWild/LICENSE.GPL b/contrib/NeRF-Editing/TetWild/LICENSE.GPL new file mode 100644 index 00000000..20d40b6b --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/LICENSE.GPL @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. \ No newline at end of file diff --git a/contrib/NeRF-Editing/TetWild/LICENSE.MPL2 b/contrib/NeRF-Editing/TetWild/LICENSE.MPL2 new file mode 100644 index 00000000..f4bbcd20 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/LICENSE.MPL2 @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/contrib/NeRF-Editing/TetWild/README.md b/contrib/NeRF-Editing/TetWild/README.md new file mode 100644 index 00000000..78e808d6 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/README.md @@ -0,0 +1,238 @@ +# TetWild - Tetrahedral Meshing in the Wild +![](docs/teaser.png) +Yixin Hu, Qingnan Zhou, Xifeng Gao, Alec Jacobson, Denis Zorin, Daniele Panozzo. +ACM Transactions on Graphics (SIGGRAPH 2018). + +``` +@article{Hu:2018:TMW:3197517.3201353, + author = {Hu, Yixin and Zhou, Qingnan and Gao, Xifeng and Jacobson, Alec and Zorin, Denis and Panozzo, Daniele}, + title = {Tetrahedral Meshing in the Wild}, + journal = {ACM Trans. Graph.}, + issue_date = {August 2018}, + volume = {37}, + number = {4}, + month = jul, + year = {2018}, + issn = {0730-0301}, + pages = {60:1--60:14}, + articleno = {60}, + numpages = {14}, + url = {http://doi.acm.org/10.1145/3197517.3201353}, + doi = {10.1145/3197517.3201353}, + acmid = {3201353}, + publisher = {ACM}, + address = {New York, NY, USA}, + keywords = {mesh generation, robust geometry processing, tetrahedral meshing}, +} +``` + +## News + +🎉🎉🎉 We have 2D version "TetWild" - **TriWild** public! TriWild is able to deal with both linear and curved constraints. Now we are able to mesh curves robustly! Check it out 👉 **[TriWild](https://github.com/wildmeshing/TriWild)**. + +## Important Tips + +💡💡💡 **If you are interested in the algorithm details, please refer to our [paper](https://cs.nyu.edu/~yixinhu/tetwild.pdf) first. We provide plenty of examples and statistics in the paper. You can also refer to my SIGRRAPH talk [slides](https://slides.games-cn.org/pdf/GAMES201858%E8%83%A1%E8%AF%91%E5%BF%83.pdf).** + +💡💡💡 **Check our [license](https://github.com/Yixin-Hu/TetWild#license) first.** + +💡💡💡 **Our algorithm should be and is robust both in theory and in practice. If you do find TetWild crash (on your laptop), please test it (on cluster) with more resource given. The most complex model I tested requires >100GB memory.** + +💡💡💡 **The orientation of input faces is as important as the position of their vertices. Our algorithm is faithful to the input face position and orientation. The winding number algorithm we are using requires reasonably consistent orientation of the input triangle mesh. If the input has a large region of faces all inverted (i.e. with flipped normal), the output tetmesh could have some parts missing or includes some elements "outside" the surface around this region, which is expected because the inverted input faces defines the "inside" as outside. We provide three 2D examples as below for easier understanding. The left in each sub-figure is input boundary with normals and the right is output triangle meshes.** + +![](docs/Slide1.jpg) + + + +## Dataset +Here is pre-generated tetmeshes and the extracted surface meshes for research-purpose usage. **Please kindly cite our paper when using our pre-generated data.** + +- Input: [Thingi10k](https://ten-thousand-models.appspot.com/) + +- Output: +[10k tetmeshes](https://drive.google.com/file/d/17AZwaQaj_nxdCIUpiGFCQ7_khNQxfG4Y/view?usp=sharing), +[10k surface meshes](https://drive.google.com/open?id=1E_C1uVoG1ZGF3pfDpHFKIS8Qqd2VXLZQ) + +- Figures in the paper: [Input/output & scripts](https://drive.google.com/file/d/1P4wFOGOEebNp4pT-s4sFhl9GTSQcG0n_/view?usp=sharing) + +## Installation + +#### via Docker + +Install [Docker](https://docs.docker.com/) and run Docker. Pull TetWild Docker image and run the binary: + +```bash +docker pull yixinhu/tetwild +docker run --rm -v "$(pwd)":/data yixinhu/tetwild [TetWild arguments] +``` + +Important note: If you find TetWild termination unreasonably, it's largely due to the momery limit of Docker. Please check the momery limit using `docker stats`. + +#### via CMake + +Our code was originally developed on MacOS and has been tested on Linux and Windows. We provide the commands for installing TetWild in MacOS: + +- Clone the repository into your local machine: + +```bash +git clone https://github.com/Yixin-Hu/TetWild +``` + +- Compile the code using cmake (default in Release mode): + +```bash +cd TetWild +mkdir build +cd build +cmake .. +make +``` + +You may need to install `gmp` and `mpfr` or `CGAL` before compiling the code. You can install them via [homebrew](https://brew.sh/). + +``` +brew install gmp +brew install mpfr +``` +or + +``` +brew install cgal +``` + +- Check the installation: + +```bash +./TetWild --help +``` +This command should show a list of TetWild parameters. + +💡 If you do not have Boost installed (Boost-thread is needed for the logger spdlog), you can enable the cmake option `-DTETWILD_WITH_HUNTER=ON`. This will let CMake use [Hunter](https://github.com/ruslo/hunter) to download and configure Boost automatically. + +💡 If you find `Could not find Matlab` or `Could not find Mosek` in the output of cmake, it does not matter since they are not used. + +💡 We provide users an option to use [ISPC](https://ispc.github.io/index.html) for computing energy parallelly. It reduces the timimg for computing energy to 50% of the original, but it could result in more optimization iterations and more overall running time. According to our experiment on 1000 models, it reduces the overall running time by 4% in average. If you want to use ISPC, please [install it first](https://ispc.github.io/ispc.html#installing-ispc) and then turn on the flag `GTET_ISPC` in `CMakeLists.txt`. + +## Usage + +### Input/output Format + +The inputs of our software are triangle surface meshes in `.off/.obj/.stl/.ply` format. + +We support `.mesh/.msh` format output. The default output format is `.msh` with minimum dihedral angle recorded as element scalar field, which can be visualized by software [Gmsh](http://gmsh.info/). You can use `PyMesh::MshLoader` and `PyMesh::MshSaver` in `pymesh/` for read and write `.msh` meshes. + +💡 TetWild also outputs the surface of tetmesh in `.obj` format if the `is_quiet` flag is not turned on. + +### Features +Our software is quite easy to use. Basically, users only need to provide a surface triangle mesh as input and our mesher would output a tetrahedral mesh by using default settings. If you want to customize your own tetmeshes, we also provide some options. + +- Envelope of size *epsilon* + +Using smaller envelope preserves features better but also takes longer time. The default value of *epsilon* is *b/1000*, where *b* is the length of the diagonal of the bounding box. + +- Ideal edge length + +Using smaller ideal edge length gives a denser mesh but also takes longer time. The default ideal edge length is *b/20* + +- Filtering energy + +Our mesher stops optimizing the mesh when maximum energy is smaller than filtering energy. Thus, larger filtering energy means less optimization and sooner stopping. If you do not care about quality, then give a larger filtering energy would let you get the result earlier. The energy we used here is conformal AMIPS whose range is from 3 to +inf. The default filtering energy is 10. + +💡 We suggest not to set filtering energy smaller than 8 for complex input. + +- Maximum number of optimization passes + +Our mesher stops optimizing the mesh when the maximum number of passes is reached. The default number is 80. + +- Targeted number of vertices + +We allow users to input the targeted number of vertices and the mesher would try its best to match that number with 5% error. When the targeted number of vertices is unrealistically small, then the output tetmesh may not have number of vertices matched. + +💡 If you want a tetmesh with low resolution, please use larger envelop and larger ideal edge length. + +- Sizing field + +Users can provide a background tetmesh in .msh format with vertex scalar field `values` stored. The scalar field `values` is used for controlling edge length. The scalars inside an element of the background mesh are linearly interpolated. + +💡 [Here](https://drive.google.com/open?id=1-5AyoQ-CdZnX8IAqZoqgW1tiNBTNvFjJ) is an example including input surface mesh, background mesh and output tetmeshes with/without sizing control. + +- Smoothing open regions + +Our method can fill gaps and holes but the tetmesh faces on those parts could be bumpy. We provide users an option to do Lapacian smoothing on those faces to get a smoother surface. + +- With out winding number + +Please use `--save-mid-result 2`. + +### Command Line Switches +Our software supports usage via command line or via a C++ function wrapper. Here is an overview of all command line switches: + +``` +RobustTetMeshing +Usage: ./TetWild [OPTIONS] input [output] + +Positionals: + input TEXT REQUIRED Input surface mesh INPUT in .off/.obj/.stl/.ply format. (string, required) + output TEXT Output tetmesh OUTPUT in .msh format. (string, optional, default: input_file+postfix+'.msh') + +Options: + -h,--help Print this help message and exit + --input TEXT REQUIRED Input surface mesh INPUT in .off/.obj/.stl/.ply format. (string, required) + --output TEXT Output tetmesh OUTPUT in .msh or .mesh format. (string, optional, default: input_file+postfix+'.msh') + --postfix TEXT Postfix P for output files. (string, optional, default: '_') + -l,--ideal-edge-length FLOAT + ideal_edge_length = diag_of_bbox * L. (double, optional, default: 0.05) + -e,--epsilon FLOAT epsilon = diag_of_bbox * EPS. (double, optional, default: 1e-3) + --stage INT Run pipeline in stage STAGE. (integer, optional, default: 1) + --filter-energy FLOAT Stop mesh improvement when the maximum energy is smaller than ENERGY. (double, optional, default: 10) + --max-pass INT Do PASS mesh improvement passes in maximum. (integer, optional, default: 80) + --is-laplacian Do Laplacian smoothing for the surface of output on the holes of input (optional) + --targeted-num-v INT Output tetmesh that contains TV vertices. (integer, optional, tolerance: 5%) + --bg-mesh TEXT Background tetmesh BGMESH in .msh format for applying sizing field. (string, optional) + --save-mid-result 0: save result before optimization, 1: save mid-results during optimization, 2: save result without winding number. + -q,--is-quiet Mute console output. (optional) + --log TEXT Log info to given file. + --level INT Log level (0 = most verbose, 6 = off). +``` + + + +### Function Wrapper + +💡 We use [libigl](https://github.com/libigl/libigl) to read the input triangle mesh. If you encounter any issue loading your mesh with libigl, please open a ticket there. Alternatively, you could load the mesh yourself and use our function wrapper to pass the raw data directly to TetWild. + +We provide a wrapper for TetWild in `tetwild.h`, allowing users do the tetrahedaliztion without read/write data from/to files. One can use it in the following way: + +1. Include the header file `#include `. +2. Set parameters through a struct variable `tetwild::Args args`. The following table provides the correspondence between parameters and command line switches. + + | Switch | Parameter | + |:--------------------|:----------------------------| + | --input | N/A | + | --postfix | `args.postfix` | + | --output | N/A | + | --ideal-edge-length | `args.initial_edge_len_rel` | + | --epsilon | `args.eps_rel` | + | --stage | `args.stage` | + | --filter-energy | `args.filter_energy_thres` | + | --max-pass | `args.max_num_passes` | + | --is-quiet | `args.is_quiet` | + | --targeted-num-v | `args.target_num_vertices` | + | --bg-mesh | `args.background_mesh` | + | --is-laplacian | `args.smooth_open_boundary` | + +3. Call function `tetwild::tetrahedralization(v_in, f_in, v_out, t_out, a_out, args)`. The input/output arguments are described in the function docstring, and use libigl-style matrices for representing a mesh. + +## License +TetWild is MPL2 licensed. But it contains CGAL code under GPL license. + +TetWild is free for both commercial and non-commercial usage. However, you have to cite our work in your paper or put a reference of TetWild in your software. Whenever you fix bugs or make some improvement of TetWild, you should contribute back. + +## Acknowledgements + +We used several useful libraries in our implement, testing, and rendering listed as follows. We would like to especially thank their authors for their great work and publishing the code. + +- [PyMesh](https://github.com/qnzhou/PyMesh) +- [PyRenderer](https://github.com/qnzhou/PyRenderer) +- [CLI11](https://github.com/CLIUtils/CLI11) diff --git a/contrib/NeRF-Editing/TetWild/cmake/DownloadProject.CMakeLists.cmake.in b/contrib/NeRF-Editing/TetWild/cmake/DownloadProject.CMakeLists.cmake.in new file mode 100644 index 00000000..89be4fdd --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/cmake/DownloadProject.CMakeLists.cmake.in @@ -0,0 +1,17 @@ +# Distributed under the OSI-approved MIT License. See accompanying +# file LICENSE or https://github.com/Crascit/DownloadProject for details. + +cmake_minimum_required(VERSION 2.8.2) + +project(${DL_ARGS_PROJ}-download NONE) + +include(ExternalProject) +ExternalProject_Add(${DL_ARGS_PROJ}-download + ${DL_ARGS_UNPARSED_ARGUMENTS} + SOURCE_DIR "${DL_ARGS_SOURCE_DIR}" + BINARY_DIR "${DL_ARGS_BINARY_DIR}" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/contrib/NeRF-Editing/TetWild/cmake/DownloadProject.cmake b/contrib/NeRF-Editing/TetWild/cmake/DownloadProject.cmake new file mode 100644 index 00000000..e300f426 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/cmake/DownloadProject.cmake @@ -0,0 +1,182 @@ +# Distributed under the OSI-approved MIT License. See accompanying +# file LICENSE or https://github.com/Crascit/DownloadProject for details. +# +# MODULE: DownloadProject +# +# PROVIDES: +# download_project( PROJ projectName +# [PREFIX prefixDir] +# [DOWNLOAD_DIR downloadDir] +# [SOURCE_DIR srcDir] +# [BINARY_DIR binDir] +# [QUIET] +# ... +# ) +# +# Provides the ability to download and unpack a tarball, zip file, git repository, +# etc. at configure time (i.e. when the cmake command is run). How the downloaded +# and unpacked contents are used is up to the caller, but the motivating case is +# to download source code which can then be included directly in the build with +# add_subdirectory() after the call to download_project(). Source and build +# directories are set up with this in mind. +# +# The PROJ argument is required. The projectName value will be used to construct +# the following variables upon exit (obviously replace projectName with its actual +# value): +# +# projectName_SOURCE_DIR +# projectName_BINARY_DIR +# +# The SOURCE_DIR and BINARY_DIR arguments are optional and would not typically +# need to be provided. They can be specified if you want the downloaded source +# and build directories to be located in a specific place. The contents of +# projectName_SOURCE_DIR and projectName_BINARY_DIR will be populated with the +# locations used whether you provide SOURCE_DIR/BINARY_DIR or not. +# +# The DOWNLOAD_DIR argument does not normally need to be set. It controls the +# location of the temporary CMake build used to perform the download. +# +# The PREFIX argument can be provided to change the base location of the default +# values of DOWNLOAD_DIR, SOURCE_DIR and BINARY_DIR. If all of those three arguments +# are provided, then PREFIX will have no effect. The default value for PREFIX is +# CMAKE_BINARY_DIR. +# +# The QUIET option can be given if you do not want to show the output associated +# with downloading the specified project. +# +# In addition to the above, any other options are passed through unmodified to +# ExternalProject_Add() to perform the actual download, patch and update steps. +# The following ExternalProject_Add() options are explicitly prohibited (they +# are reserved for use by the download_project() command): +# +# CONFIGURE_COMMAND +# BUILD_COMMAND +# INSTALL_COMMAND +# TEST_COMMAND +# +# Only those ExternalProject_Add() arguments which relate to downloading, patching +# and updating of the project sources are intended to be used. Also note that at +# least one set of download-related arguments are required. +# +# If using CMake 3.2 or later, the UPDATE_DISCONNECTED option can be used to +# prevent a check at the remote end for changes every time CMake is run +# after the first successful download. See the documentation of the ExternalProject +# module for more information. It is likely you will want to use this option if it +# is available to you. Note, however, that the ExternalProject implementation contains +# bugs which result in incorrect handling of the UPDATE_DISCONNECTED option when +# using the URL download method or when specifying a SOURCE_DIR with no download +# method. Fixes for these have been created, the last of which is scheduled for +# inclusion in CMake 3.8.0. Details can be found here: +# +# https://gitlab.kitware.com/cmake/cmake/commit/bdca68388bd57f8302d3c1d83d691034b7ffa70c +# https://gitlab.kitware.com/cmake/cmake/issues/16428 +# +# If you experience build errors related to the update step, consider avoiding +# the use of UPDATE_DISCONNECTED. +# +# EXAMPLE USAGE: +# +# include(DownloadProject) +# download_project(PROJ googletest +# GIT_REPOSITORY https://github.com/google/googletest.git +# GIT_TAG master +# UPDATE_DISCONNECTED 1 +# QUIET +# ) +# +# add_subdirectory(${googletest_SOURCE_DIR} ${googletest_BINARY_DIR}) +# +#======================================================================================== + + +set(_DownloadProjectDir "${CMAKE_CURRENT_LIST_DIR}") + +include(CMakeParseArguments) + +function(download_project) + + set(options QUIET) + set(oneValueArgs + PROJ + PREFIX + DOWNLOAD_DIR + SOURCE_DIR + BINARY_DIR + # Prevent the following from being passed through + CONFIGURE_COMMAND + BUILD_COMMAND + INSTALL_COMMAND + TEST_COMMAND + ) + set(multiValueArgs "") + + cmake_parse_arguments(DL_ARGS "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + # Hide output if requested + if (DL_ARGS_QUIET) + set(OUTPUT_QUIET "OUTPUT_QUIET") + else() + unset(OUTPUT_QUIET) + message(STATUS "Downloading/updating ${DL_ARGS_PROJ}") + endif() + + # Set up where we will put our temporary CMakeLists.txt file and also + # the base point below which the default source and binary dirs will be. + # The prefix must always be an absolute path. + if (NOT DL_ARGS_PREFIX) + set(DL_ARGS_PREFIX "${CMAKE_BINARY_DIR}") + else() + get_filename_component(DL_ARGS_PREFIX "${DL_ARGS_PREFIX}" ABSOLUTE + BASE_DIR "${CMAKE_CURRENT_BINARY_DIR}") + endif() + if (NOT DL_ARGS_DOWNLOAD_DIR) + set(DL_ARGS_DOWNLOAD_DIR "${DL_ARGS_PREFIX}/${DL_ARGS_PROJ}-download") + endif() + + # Ensure the caller can know where to find the source and build directories + if (NOT DL_ARGS_SOURCE_DIR) + set(DL_ARGS_SOURCE_DIR "${DL_ARGS_PREFIX}/${DL_ARGS_PROJ}-src") + endif() + if (NOT DL_ARGS_BINARY_DIR) + set(DL_ARGS_BINARY_DIR "${DL_ARGS_PREFIX}/${DL_ARGS_PROJ}-build") + endif() + set(${DL_ARGS_PROJ}_SOURCE_DIR "${DL_ARGS_SOURCE_DIR}" PARENT_SCOPE) + set(${DL_ARGS_PROJ}_BINARY_DIR "${DL_ARGS_BINARY_DIR}" PARENT_SCOPE) + + # The way that CLion manages multiple configurations, it causes a copy of + # the CMakeCache.txt to be copied across due to it not expecting there to + # be a project within a project. This causes the hard-coded paths in the + # cache to be copied and builds to fail. To mitigate this, we simply + # remove the cache if it exists before we configure the new project. It + # is safe to do so because it will be re-generated. Since this is only + # executed at the configure step, it should not cause additional builds or + # downloads. + file(REMOVE "${DL_ARGS_DOWNLOAD_DIR}/CMakeCache.txt") + + # Create and build a separate CMake project to carry out the download. + # If we've already previously done these steps, they will not cause + # anything to be updated, so extra rebuilds of the project won't occur. + # Make sure to pass through CMAKE_MAKE_PROGRAM in case the main project + # has this set to something not findable on the PATH. + configure_file("${_DownloadProjectDir}/DownloadProject.CMakeLists.cmake.in" + "${DL_ARGS_DOWNLOAD_DIR}/CMakeLists.txt") + execute_process(COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" + -D "CMAKE_MAKE_PROGRAM:FILE=${CMAKE_MAKE_PROGRAM}" + . + RESULT_VARIABLE result + ${OUTPUT_QUIET} + WORKING_DIRECTORY "${DL_ARGS_DOWNLOAD_DIR}" + ) + if(result) + message(FATAL_ERROR "CMake step for ${DL_ARGS_PROJ} failed: ${result}") + endif() + execute_process(COMMAND ${CMAKE_COMMAND} --build . + RESULT_VARIABLE result + ${OUTPUT_QUIET} + WORKING_DIRECTORY "${DL_ARGS_DOWNLOAD_DIR}" + ) + if(result) + message(FATAL_ERROR "Build step for ${DL_ARGS_PROJ} failed: ${result}") + endif() + +endfunction() diff --git a/contrib/NeRF-Editing/TetWild/cmake/FindLIBIGL.cmake b/contrib/NeRF-Editing/TetWild/cmake/FindLIBIGL.cmake new file mode 100644 index 00000000..b0619fd4 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/cmake/FindLIBIGL.cmake @@ -0,0 +1,33 @@ +# - Try to find the LIBIGL library +# Once done this will define +# +# LIBIGL_FOUND - system has LIBIGL +# LIBIGL_INCLUDE_DIR - **the** LIBIGL include directory +if(LIBIGL_FOUND OR TARGET igl::core) + return() +endif() + +find_path(LIBIGL_INCLUDE_DIR igl/readOBJ.h + HINTS + # ENV LIBIGL + # ENV LIBIGLROOT + # ENV LIBIGL_ROOT + # ENV LIBIGL_DIR + PATHS + ${TETWILD_EXTERNAL}/libigl + # /usr + # /usr/local + # /usr/local/igl/libigl + PATH_SUFFIXES include + NO_CMAKE_SYSTEM_PATH +) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(LIBIGL + "\nlibigl not found --- You can download it using:\n\tgit clone --recursive https://github.com/libigl/libigl.git ${CMAKE_SOURCE_DIR}/../libigl" + LIBIGL_INCLUDE_DIR) +mark_as_advanced(LIBIGL_INCLUDE_DIR) + +#list(APPEND CMAKE_MODULE_PATH "${LIBIGL_INCLUDE_DIR}/../shared/cmake") +list(APPEND CMAKE_MODULE_PATH "${LIBIGL_INCLUDE_DIR}/../cmake") +include(libigl) diff --git a/contrib/NeRF-Editing/TetWild/cmake/HunterGate.cmake b/contrib/NeRF-Editing/TetWild/cmake/HunterGate.cmake new file mode 100644 index 00000000..887557a5 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/cmake/HunterGate.cmake @@ -0,0 +1,540 @@ +# Copyright (c) 2013-2018, Ruslan Baratov +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This is a gate file to Hunter package manager. +# Include this file using `include` command and add package you need, example: +# +# cmake_minimum_required(VERSION 3.2) +# +# include("cmake/HunterGate.cmake") +# HunterGate( +# URL "https://github.com/path/to/hunter/archive.tar.gz" +# SHA1 "798501e983f14b28b10cda16afa4de69eee1da1d" +# ) +# +# project(MyProject) +# +# hunter_add_package(Foo) +# hunter_add_package(Boo COMPONENTS Bar Baz) +# +# Projects: +# * https://github.com/hunter-packages/gate/ +# * https://github.com/ruslo/hunter + +option(HUNTER_ENABLED "Enable Hunter package manager support" ON) + +if(HUNTER_ENABLED) + if(CMAKE_VERSION VERSION_LESS "3.2") + message( + FATAL_ERROR + "At least CMake version 3.2 required for Hunter dependency management." + " Update CMake or set HUNTER_ENABLED to OFF." + ) + endif() +endif() + +include(CMakeParseArguments) # cmake_parse_arguments + +option(HUNTER_STATUS_PRINT "Print working status" ON) +option(HUNTER_STATUS_DEBUG "Print a lot info" OFF) +option(HUNTER_TLS_VERIFY "Enable/disable TLS certificate checking on downloads" ON) + +set(HUNTER_WIKI "https://github.com/ruslo/hunter/wiki") + +function(hunter_gate_status_print) + if(HUNTER_STATUS_PRINT OR HUNTER_STATUS_DEBUG) + foreach(print_message ${ARGV}) + message(STATUS "[hunter] ${print_message}") + endforeach() + endif() +endfunction() + +function(hunter_gate_status_debug) + if(HUNTER_STATUS_DEBUG) + foreach(print_message ${ARGV}) + string(TIMESTAMP timestamp) + message(STATUS "[hunter *** DEBUG *** ${timestamp}] ${print_message}") + endforeach() + endif() +endfunction() + +function(hunter_gate_wiki wiki_page) + message("------------------------------ WIKI -------------------------------") + message(" ${HUNTER_WIKI}/${wiki_page}") + message("-------------------------------------------------------------------") + message("") + message(FATAL_ERROR "") +endfunction() + +function(hunter_gate_internal_error) + message("") + foreach(print_message ${ARGV}) + message("[hunter ** INTERNAL **] ${print_message}") + endforeach() + message("[hunter ** INTERNAL **] [Directory:${CMAKE_CURRENT_LIST_DIR}]") + message("") + hunter_gate_wiki("error.internal") +endfunction() + +function(hunter_gate_fatal_error) + cmake_parse_arguments(hunter "" "WIKI" "" "${ARGV}") + string(COMPARE EQUAL "${hunter_WIKI}" "" have_no_wiki) + if(have_no_wiki) + hunter_gate_internal_error("Expected wiki") + endif() + message("") + foreach(x ${hunter_UNPARSED_ARGUMENTS}) + message("[hunter ** FATAL ERROR **] ${x}") + endforeach() + message("[hunter ** FATAL ERROR **] [Directory:${CMAKE_CURRENT_LIST_DIR}]") + message("") + hunter_gate_wiki("${hunter_WIKI}") +endfunction() + +function(hunter_gate_user_error) + hunter_gate_fatal_error(${ARGV} WIKI "error.incorrect.input.data") +endfunction() + +function(hunter_gate_self root version sha1 result) + string(COMPARE EQUAL "${root}" "" is_bad) + if(is_bad) + hunter_gate_internal_error("root is empty") + endif() + + string(COMPARE EQUAL "${version}" "" is_bad) + if(is_bad) + hunter_gate_internal_error("version is empty") + endif() + + string(COMPARE EQUAL "${sha1}" "" is_bad) + if(is_bad) + hunter_gate_internal_error("sha1 is empty") + endif() + + string(SUBSTRING "${sha1}" 0 7 archive_id) + + if(EXISTS "${root}/cmake/Hunter") + set(hunter_self "${root}") + else() + set( + hunter_self + "${root}/_Base/Download/Hunter/${version}/${archive_id}/Unpacked" + ) + endif() + + set("${result}" "${hunter_self}" PARENT_SCOPE) +endfunction() + +# Set HUNTER_GATE_ROOT cmake variable to suitable value. +function(hunter_gate_detect_root) + # Check CMake variable + string(COMPARE NOTEQUAL "${HUNTER_ROOT}" "" not_empty) + if(not_empty) + set(HUNTER_GATE_ROOT "${HUNTER_ROOT}" PARENT_SCOPE) + hunter_gate_status_debug("HUNTER_ROOT detected by cmake variable") + return() + endif() + + # Check environment variable + string(COMPARE NOTEQUAL "$ENV{HUNTER_ROOT}" "" not_empty) + if(not_empty) + set(HUNTER_GATE_ROOT "$ENV{HUNTER_ROOT}" PARENT_SCOPE) + hunter_gate_status_debug("HUNTER_ROOT detected by environment variable") + return() + endif() + + # Check HOME environment variable + string(COMPARE NOTEQUAL "$ENV{HOME}" "" result) + if(result) + set(HUNTER_GATE_ROOT "$ENV{HOME}/.hunter" PARENT_SCOPE) + hunter_gate_status_debug("HUNTER_ROOT set using HOME environment variable") + return() + endif() + + # Check SYSTEMDRIVE and USERPROFILE environment variable (windows only) + if(WIN32) + string(COMPARE NOTEQUAL "$ENV{SYSTEMDRIVE}" "" result) + if(result) + set(HUNTER_GATE_ROOT "$ENV{SYSTEMDRIVE}/.hunter" PARENT_SCOPE) + hunter_gate_status_debug( + "HUNTER_ROOT set using SYSTEMDRIVE environment variable" + ) + return() + endif() + + string(COMPARE NOTEQUAL "$ENV{USERPROFILE}" "" result) + if(result) + set(HUNTER_GATE_ROOT "$ENV{USERPROFILE}/.hunter" PARENT_SCOPE) + hunter_gate_status_debug( + "HUNTER_ROOT set using USERPROFILE environment variable" + ) + return() + endif() + endif() + + hunter_gate_fatal_error( + "Can't detect HUNTER_ROOT" + WIKI "error.detect.hunter.root" + ) +endfunction() + +function(hunter_gate_download dir) + string( + COMPARE + NOTEQUAL + "$ENV{HUNTER_DISABLE_AUTOINSTALL}" + "" + disable_autoinstall + ) + if(disable_autoinstall AND NOT HUNTER_RUN_INSTALL) + hunter_gate_fatal_error( + "Hunter not found in '${dir}'" + "Set HUNTER_RUN_INSTALL=ON to auto-install it from '${HUNTER_GATE_URL}'" + "Settings:" + " HUNTER_ROOT: ${HUNTER_GATE_ROOT}" + " HUNTER_SHA1: ${HUNTER_GATE_SHA1}" + WIKI "error.run.install" + ) + endif() + string(COMPARE EQUAL "${dir}" "" is_bad) + if(is_bad) + hunter_gate_internal_error("Empty 'dir' argument") + endif() + + string(COMPARE EQUAL "${HUNTER_GATE_SHA1}" "" is_bad) + if(is_bad) + hunter_gate_internal_error("HUNTER_GATE_SHA1 empty") + endif() + + string(COMPARE EQUAL "${HUNTER_GATE_URL}" "" is_bad) + if(is_bad) + hunter_gate_internal_error("HUNTER_GATE_URL empty") + endif() + + set(done_location "${dir}/DONE") + set(sha1_location "${dir}/SHA1") + + set(build_dir "${dir}/Build") + set(cmakelists "${dir}/CMakeLists.txt") + + hunter_gate_status_debug("Locking directory: ${dir}") + file(LOCK "${dir}" DIRECTORY GUARD FUNCTION) + hunter_gate_status_debug("Lock done") + + if(EXISTS "${done_location}") + # while waiting for lock other instance can do all the job + hunter_gate_status_debug("File '${done_location}' found, skip install") + return() + endif() + + file(REMOVE_RECURSE "${build_dir}") + file(REMOVE_RECURSE "${cmakelists}") + + file(MAKE_DIRECTORY "${build_dir}") # check directory permissions + + # Disabling languages speeds up a little bit, reduces noise in the output + # and avoids path too long windows error + file( + WRITE + "${cmakelists}" + "cmake_minimum_required(VERSION 3.2)\n" + "project(HunterDownload LANGUAGES NONE)\n" + "include(ExternalProject)\n" + "ExternalProject_Add(\n" + " Hunter\n" + " URL\n" + " \"${HUNTER_GATE_URL}\"\n" + " URL_HASH\n" + " SHA1=${HUNTER_GATE_SHA1}\n" + " DOWNLOAD_DIR\n" + " \"${dir}\"\n" + " TLS_VERIFY\n" + " ${HUNTER_TLS_VERIFY}\n" + " SOURCE_DIR\n" + " \"${dir}/Unpacked\"\n" + " CONFIGURE_COMMAND\n" + " \"\"\n" + " BUILD_COMMAND\n" + " \"\"\n" + " INSTALL_COMMAND\n" + " \"\"\n" + ")\n" + ) + + if(HUNTER_STATUS_DEBUG) + set(logging_params "") + else() + set(logging_params OUTPUT_QUIET) + endif() + + hunter_gate_status_debug("Run generate") + + # Need to add toolchain file too. + # Otherwise on Visual Studio + MDD this will fail with error: + # "Could not find an appropriate version of the Windows 10 SDK installed on this machine" + if(EXISTS "${CMAKE_TOOLCHAIN_FILE}") + get_filename_component(absolute_CMAKE_TOOLCHAIN_FILE "${CMAKE_TOOLCHAIN_FILE}" ABSOLUTE) + set(toolchain_arg "-DCMAKE_TOOLCHAIN_FILE=${absolute_CMAKE_TOOLCHAIN_FILE}") + else() + # 'toolchain_arg' can't be empty + set(toolchain_arg "-DCMAKE_TOOLCHAIN_FILE=") + endif() + + string(COMPARE EQUAL "${CMAKE_MAKE_PROGRAM}" "" no_make) + if(no_make) + set(make_arg "") + else() + # Test case: remove Ninja from PATH but set it via CMAKE_MAKE_PROGRAM + set(make_arg "-DCMAKE_MAKE_PROGRAM=${CMAKE_MAKE_PROGRAM}") + endif() + + execute_process( + COMMAND + "${CMAKE_COMMAND}" + "-H${dir}" + "-B${build_dir}" + "-G${CMAKE_GENERATOR}" + "${toolchain_arg}" + ${make_arg} + WORKING_DIRECTORY "${dir}" + RESULT_VARIABLE download_result + ${logging_params} + ) + + if(NOT download_result EQUAL 0) + hunter_gate_internal_error( + "Configure project failed." + "To reproduce the error run: ${CMAKE_COMMAND} -H${dir} -B${build_dir} -G${CMAKE_GENERATOR} ${toolchain_arg} ${make_arg}" + "In directory ${dir}" + ) + endif() + + hunter_gate_status_print( + "Initializing Hunter workspace (${HUNTER_GATE_SHA1})" + " ${HUNTER_GATE_URL}" + " -> ${dir}" + ) + execute_process( + COMMAND "${CMAKE_COMMAND}" --build "${build_dir}" + WORKING_DIRECTORY "${dir}" + RESULT_VARIABLE download_result + ${logging_params} + ) + + if(NOT download_result EQUAL 0) + hunter_gate_internal_error("Build project failed") + endif() + + file(REMOVE_RECURSE "${build_dir}") + file(REMOVE_RECURSE "${cmakelists}") + + file(WRITE "${sha1_location}" "${HUNTER_GATE_SHA1}") + file(WRITE "${done_location}" "DONE") + + hunter_gate_status_debug("Finished") +endfunction() + +# Must be a macro so master file 'cmake/Hunter' can +# apply all variables easily just by 'include' command +# (otherwise PARENT_SCOPE magic needed) +macro(HunterGate) + if(HUNTER_GATE_DONE) + # variable HUNTER_GATE_DONE set explicitly for external project + # (see `hunter_download`) + set_property(GLOBAL PROPERTY HUNTER_GATE_DONE YES) + endif() + + # First HunterGate command will init Hunter, others will be ignored + get_property(_hunter_gate_done GLOBAL PROPERTY HUNTER_GATE_DONE SET) + + if(NOT HUNTER_ENABLED) + # Empty function to avoid error "unknown function" + function(hunter_add_package) + endfunction() + + set( + _hunter_gate_disabled_mode_dir + "${CMAKE_CURRENT_LIST_DIR}/cmake/Hunter/disabled-mode" + ) + if(EXISTS "${_hunter_gate_disabled_mode_dir}") + hunter_gate_status_debug( + "Adding \"disabled-mode\" modules: ${_hunter_gate_disabled_mode_dir}" + ) + list(APPEND CMAKE_PREFIX_PATH "${_hunter_gate_disabled_mode_dir}") + endif() + elseif(_hunter_gate_done) + hunter_gate_status_debug("Secondary HunterGate (use old settings)") + hunter_gate_self( + "${HUNTER_CACHED_ROOT}" + "${HUNTER_VERSION}" + "${HUNTER_SHA1}" + _hunter_self + ) + include("${_hunter_self}/cmake/Hunter") + else() + set(HUNTER_GATE_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}") + + string(COMPARE NOTEQUAL "${PROJECT_NAME}" "" _have_project_name) + if(_have_project_name) + hunter_gate_fatal_error( + "Please set HunterGate *before* 'project' command. " + "Detected project: ${PROJECT_NAME}" + WIKI "error.huntergate.before.project" + ) + endif() + + cmake_parse_arguments( + HUNTER_GATE "LOCAL" "URL;SHA1;GLOBAL;FILEPATH" "" ${ARGV} + ) + + string(COMPARE EQUAL "${HUNTER_GATE_SHA1}" "" _empty_sha1) + string(COMPARE EQUAL "${HUNTER_GATE_URL}" "" _empty_url) + string( + COMPARE + NOTEQUAL + "${HUNTER_GATE_UNPARSED_ARGUMENTS}" + "" + _have_unparsed + ) + string(COMPARE NOTEQUAL "${HUNTER_GATE_GLOBAL}" "" _have_global) + string(COMPARE NOTEQUAL "${HUNTER_GATE_FILEPATH}" "" _have_filepath) + + if(_have_unparsed) + hunter_gate_user_error( + "HunterGate unparsed arguments: ${HUNTER_GATE_UNPARSED_ARGUMENTS}" + ) + endif() + if(_empty_sha1) + hunter_gate_user_error("SHA1 suboption of HunterGate is mandatory") + endif() + if(_empty_url) + hunter_gate_user_error("URL suboption of HunterGate is mandatory") + endif() + if(_have_global) + if(HUNTER_GATE_LOCAL) + hunter_gate_user_error("Unexpected LOCAL (already has GLOBAL)") + endif() + if(_have_filepath) + hunter_gate_user_error("Unexpected FILEPATH (already has GLOBAL)") + endif() + endif() + if(HUNTER_GATE_LOCAL) + if(_have_global) + hunter_gate_user_error("Unexpected GLOBAL (already has LOCAL)") + endif() + if(_have_filepath) + hunter_gate_user_error("Unexpected FILEPATH (already has LOCAL)") + endif() + endif() + if(_have_filepath) + if(_have_global) + hunter_gate_user_error("Unexpected GLOBAL (already has FILEPATH)") + endif() + if(HUNTER_GATE_LOCAL) + hunter_gate_user_error("Unexpected LOCAL (already has FILEPATH)") + endif() + endif() + + hunter_gate_detect_root() # set HUNTER_GATE_ROOT + + # Beautify path, fix probable problems with windows path slashes + get_filename_component( + HUNTER_GATE_ROOT "${HUNTER_GATE_ROOT}" ABSOLUTE + ) + hunter_gate_status_debug("HUNTER_ROOT: ${HUNTER_GATE_ROOT}") + if(NOT HUNTER_ALLOW_SPACES_IN_PATH) + string(FIND "${HUNTER_GATE_ROOT}" " " _contain_spaces) + if(NOT _contain_spaces EQUAL -1) + hunter_gate_fatal_error( + "HUNTER_ROOT (${HUNTER_GATE_ROOT}) contains spaces." + "Set HUNTER_ALLOW_SPACES_IN_PATH=ON to skip this error" + "(Use at your own risk!)" + WIKI "error.spaces.in.hunter.root" + ) + endif() + endif() + + string( + REGEX + MATCH + "[0-9]+\\.[0-9]+\\.[0-9]+[-_a-z0-9]*" + HUNTER_GATE_VERSION + "${HUNTER_GATE_URL}" + ) + string(COMPARE EQUAL "${HUNTER_GATE_VERSION}" "" _is_empty) + if(_is_empty) + set(HUNTER_GATE_VERSION "unknown") + endif() + + hunter_gate_self( + "${HUNTER_GATE_ROOT}" + "${HUNTER_GATE_VERSION}" + "${HUNTER_GATE_SHA1}" + _hunter_self + ) + + set(_master_location "${_hunter_self}/cmake/Hunter") + if(EXISTS "${HUNTER_GATE_ROOT}/cmake/Hunter") + # Hunter downloaded manually (e.g. by 'git clone') + set(_unused "xxxxxxxxxx") + set(HUNTER_GATE_SHA1 "${_unused}") + set(HUNTER_GATE_VERSION "${_unused}") + else() + get_filename_component(_archive_id_location "${_hunter_self}/.." ABSOLUTE) + set(_done_location "${_archive_id_location}/DONE") + set(_sha1_location "${_archive_id_location}/SHA1") + + # Check Hunter already downloaded by HunterGate + if(NOT EXISTS "${_done_location}") + hunter_gate_download("${_archive_id_location}") + endif() + + if(NOT EXISTS "${_done_location}") + hunter_gate_internal_error("hunter_gate_download failed") + endif() + + if(NOT EXISTS "${_sha1_location}") + hunter_gate_internal_error("${_sha1_location} not found") + endif() + file(READ "${_sha1_location}" _sha1_value) + string(COMPARE EQUAL "${_sha1_value}" "${HUNTER_GATE_SHA1}" _is_equal) + if(NOT _is_equal) + hunter_gate_internal_error( + "Short SHA1 collision:" + " ${_sha1_value} (from ${_sha1_location})" + " ${HUNTER_GATE_SHA1} (HunterGate)" + ) + endif() + if(NOT EXISTS "${_master_location}") + hunter_gate_user_error( + "Master file not found:" + " ${_master_location}" + "try to update Hunter/HunterGate" + ) + endif() + endif() + include("${_master_location}") + set_property(GLOBAL PROPERTY HUNTER_GATE_DONE YES) + endif() +endmacro() diff --git a/contrib/NeRF-Editing/TetWild/cmake/TetWildDependencies.cmake b/contrib/NeRF-Editing/TetWild/cmake/TetWildDependencies.cmake new file mode 100644 index 00000000..795be6ad --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/cmake/TetWildDependencies.cmake @@ -0,0 +1,52 @@ +################################################################################ +# CMake download helpers +################################################################################ + +# download external dependencies +include(TetWildDownloadExternal) + +################################################################################ +# Required dependencies +################################################################################ + +# geogram +if(NOT TARGET geogram) + tetwild_download_geogram() + include(geogram) +endif() + +# Boost +if(TETWILD_WITH_HUNTER) + hunter_add_package(Boost COMPONENTS thread system) +endif() + +# fmt +if(NOT TARGET fmt::fmt) + tetwild_download_fmt() + add_subdirectory(${TETWILD_EXTERNAL}/fmt) +endif() + +# spdlog +if(NOT TARGET spdlog::spdlog) + tetwild_download_spdlog() + add_library(spdlog INTERFACE) + add_library(spdlog::spdlog ALIAS spdlog) + target_include_directories(spdlog INTERFACE ${TETWILD_EXTERNAL}/spdlog/include) + target_compile_definitions(spdlog INTERFACE -DSPDLOG_FMT_EXTERNAL) + target_link_libraries(spdlog INTERFACE fmt::fmt) +endif() + +# libigl +if(NOT TARGET igl::core) + tetwild_download_libigl() + find_package(LIBIGL REQUIRED) +endif() + +# pymesh loaders +add_subdirectory(${TETWILD_EXTERNAL}/pymesh) + +# CL11 +if(NOT TARGET CLI11::CLI11) + tetwild_download_cli11() + add_subdirectory(${TETWILD_EXTERNAL}/cli11) +endif() diff --git a/contrib/NeRF-Editing/TetWild/cmake/TetWildDownloadExternal.cmake b/contrib/NeRF-Editing/TetWild/cmake/TetWildDownloadExternal.cmake new file mode 100644 index 00000000..538e5e65 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/cmake/TetWildDownloadExternal.cmake @@ -0,0 +1,54 @@ +################################################################################ +include(DownloadProject) + +# Shortcut function +function(tetwild_download_project name) + download_project( + PROJ ${name} + SOURCE_DIR ${TETWILD_EXTERNAL}/${name} + DOWNLOAD_DIR ${TETWILD_EXTERNAL}/.cache/${name} + ${ARGN} + ) +endfunction() + +################################################################################ + +## libigl +function(tetwild_download_libigl) + tetwild_download_project(libigl + GIT_REPOSITORY https://github.com/libigl/libigl.git + GIT_TAG v2.2.0 + ) +endfunction() + +## geogram +function(tetwild_download_geogram) + tetwild_download_project(geogram + GIT_REPOSITORY https://github.com/alicevision/geogram.git + GIT_TAG v1.6.7 + ) +endfunction() + +## fmt +function(tetwild_download_fmt) + tetwild_download_project(fmt + GIT_REPOSITORY https://github.com/fmtlib/fmt.git + GIT_TAG 5.2.0 + ) +endfunction() + +## spdlog +function(tetwild_download_spdlog) + tetwild_download_project(spdlog + GIT_REPOSITORY https://github.com/gabime/spdlog.git + GIT_TAG v1.1.0 + ) +endfunction() + +## CLI11 +function(tetwild_download_cli11) + tetwild_download_project(cli11 + URL https://github.com/CLIUtils/CLI11/archive/v1.6.1.tar.gz + URL_MD5 48ef97262adb0b47a2f0a7edbda6e2aa + ) +endfunction() diff --git a/contrib/NeRF-Editing/TetWild/cmake/UseColors.cmake b/contrib/NeRF-Editing/TetWild/cmake/UseColors.cmake new file mode 100644 index 00000000..78017566 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/cmake/UseColors.cmake @@ -0,0 +1,47 @@ +################################################################################ +# When using Clang, there is nothing to do: colors are enabled by default +# When using GCC >= 4.9, colored diagnostics can be enabled natively +# When using an older version, one can use gccfilter (a perl script) +# +# I do not recommend using gccfilter as of now (May 2014), because it seems to +# be bugged. But if you still want to try, here is how to install it on Ubuntu: +# +# +# 1) Download the perl script and add it to you $PATH +# mkdir -p ~/.local/bin +# wget -P ~/.local/bin http://www.mixtion.org/gccfilter/gccfilter +# chmod +x ~/local/bin/gccfilter +# echo 'PATH="$HOME/.local/bin:$PATH"' >> ~/.bashrc +# +# 2) Install the dependencies +# * Term::ANSIColor +# sudo cpan +# cpan> install Term::ANSIColor +# * The module "Getopt::Long" is included in "perl-base" +# * For Getopt::ArgvFile and Regexp::Common ... +# sudo apt-get install libgetopt-argvfile-perl libregexp-common-perl +# +################################################################################ + +if(CMAKE_COMPILER_IS_GNUCXX) + # If GCC >= 4.9, just activate the right option + # We enable colorized diagnostics always instead of using "auto" so that + # they're still colored when using Ninja. + if(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) + message(STATUS "GCC >= 4.9 detected, enabling colored diagnostics") + add_definitions(-fdiagnostics-color=always) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-color=always") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-color=always") + return() + endif() + # If GCC < 4.9, maybe we can use gccfilter + find_program(GCC_FILTER gccfilter) + if(GCC_FILTER) + option(COLOR_GCC "Use GCCFilter to color compiler output messages" OFF) + set(COLOR_GCC_OPTIONS "-c -r -w" CACHE STRING "Arguments that are passed to gccfilter when output coloring is switchend on. Defaults to -c -r -w.") + if(COLOR_GCC) + set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${GCC_FILTER} ${COLOR_GCC_OPTIONS}") + message(STATUS "Using gccfilter for colored diagnostics") + endif() + endif() +endif() diff --git a/contrib/NeRF-Editing/TetWild/cmake/Warnings.cmake b/contrib/NeRF-Editing/TetWild/cmake/Warnings.cmake new file mode 100644 index 00000000..067fc404 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/cmake/Warnings.cmake @@ -0,0 +1,160 @@ +################################################################################ +cmake_minimum_required(VERSION 3.1) +################################################################################ +# See comments and discussions here: +# http://stackoverflow.com/questions/5088460/flags-to-enable-thorough-and-verbose-g-warnings +################################################################################ + +if(TARGET warnings::all) + return() +endif() + +set(MY_FLAGS + -Wall + -Wextra + -pedantic + + # -Wconversion + #-Wunsafe-loop-optimizations # broken with C++11 loops + -Wunused + + -Wno-long-long + -Wpointer-arith + -Wformat=2 + -Wuninitialized + -Wcast-qual + -Wmissing-noreturn + -Wmissing-format-attribute + -Wredundant-decls + + -Werror=implicit + -Werror=nonnull + -Werror=init-self + -Werror=main + -Werror=missing-braces + -Werror=sequence-point + -Werror=return-type + -Werror=trigraphs + -Werror=array-bounds + -Werror=write-strings + -Werror=address + -Werror=int-to-pointer-cast + -Werror=pointer-to-int-cast + + -Wunused-variable + -Wunused-but-set-variable + -Wunused-parameter + + #-Weffc++ + -Wno-old-style-cast + #-Wno-sign-conversion + #-Wsign-conversion + + -Wshadow + + -Wstrict-null-sentinel + -Woverloaded-virtual + -Wsign-promo + -Wstack-protector + -Wstrict-aliasing + -Wstrict-aliasing=2 + -Wswitch-default + -Wswitch-enum + -Wswitch-unreachable + + -Wcast-align + -Wdisabled-optimization + #-Winline # produces warning on default implicit destructor + -Winvalid-pch + #-Wmissing-include-dirs + -Wpacked + -Wno-padded + -Wstrict-overflow + -Wstrict-overflow=2 + + -Wctor-dtor-privacy + -Wlogical-op + -Wnoexcept + -Woverloaded-virtual + # -Wundef + + -Wnon-virtual-dtor + -Wdelete-non-virtual-dtor + -Werror=non-virtual-dtor + -Werror=delete-non-virtual-dtor + + -Wno-sign-compare + + ########### + # GCC 6.1 # + ########### + + -Wnull-dereference + -fdelete-null-pointer-checks + -Wduplicated-cond + -Wmisleading-indentation + + #-Weverything + + ########################### + # Enabled by -Weverything # + ########################### + + #-Wdocumentation + #-Wdocumentation-unknown-command + #-Wfloat-equal + #-Wcovered-switch-default + + #-Wglobal-constructors + #-Wexit-time-destructors + #-Wmissing-variable-declarations + #-Wextra-semi + #-Wweak-vtables + #-Wno-source-uses-openmp + #-Wdeprecated + #-Wnewline-eof + #-Wmissing-prototypes + + #-Wno-c++98-compat + #-Wno-c++98-compat-pedantic + + ########################### + # Need to check if those are still valid today + ########################### + + #-Wimplicit-atomic-properties + #-Wmissing-declarations + #-Wmissing-prototypes + #-Wstrict-selector-match + #-Wundeclared-selector + #-Wunreachable-code + + # Not a warning, but enable link-time-optimization + # TODO: Check out modern CMake version of setting this flag + # https://cmake.org/cmake/help/latest/module/CheckIPOSupported.html + #-flto + + # Gives meaningful stack traces + -fno-omit-frame-pointer + -fno-optimize-sibling-calls +) + +# Flags above don't make sense for MSVC +if(MSVC) + set(MY_FLAGS) +endif() + +include(CheckCXXCompilerFlag) + +add_library(warnings_all INTERFACE) +add_library(warnings::all ALIAS warnings_all) + +foreach(FLAG IN ITEMS ${MY_FLAGS}) + string(REPLACE "=" "-" FLAG_VAR "${FLAG}") + if(NOT DEFINED IS_SUPPORTED_${FLAG_VAR}) + check_cxx_compiler_flag("${FLAG}" IS_SUPPORTED_${FLAG_VAR}) + endif() + if(IS_SUPPORTED_${FLAG_VAR}) + target_compile_options(warnings_all INTERFACE ${FLAG}) + endif() +endforeach() diff --git a/contrib/NeRF-Editing/TetWild/cmake/geogram.cmake b/contrib/NeRF-Editing/TetWild/cmake/geogram.cmake new file mode 100644 index 00000000..ee18db5d --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/cmake/geogram.cmake @@ -0,0 +1,84 @@ +################################################################################ +# Find Geogram and build it as part of the current build +################################################################################ + +if(TARGET geogram) + return() +endif() + +################################################################################ + +if(TETWILD_EXTERNAL) + set(GEOGRAM_SEARCH_PATHS ${TETWILD_EXTERNAL}) +else() + set(GEOGRAM_SEARCH_PATHS + ${GEOGRAM_INSTALL_PREFIX} + "$ENV{GEOGRAM_INSTALL_PREFIX}" + "/usr/local/" + "$ENV{PROGRAMFILES}/Geogram" + "$ENV{PROGRAMW6432}/Geogram" + "$ENV{HOME}/.local/") +endif() + +find_path(GEOGRAM_SOURCE_INCLUDE_DIR + geogram/basic/common.h + PATHS ${GEOGRAM_SEARCH_PATHS} + PATH_SUFFIXES geogram/src/lib + ) + +set(GEOGRAM_ROOT ${GEOGRAM_SOURCE_INCLUDE_DIR}/../..) + +################################################################################ + +if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") + set(VORPALINE_ARCH_64 TRUE CACHE BOOL "" FORCE) + set(VORPALINE_PLATFORM Win-vs-generic CACHE STRING "" FORCE) +elseif(${CMAKE_SYSTEM_NAME} MATCHES "Linux") + set(VORPALINE_PLATFORM Linux64-gcc-dynamic CACHE STRING "" FORCE) +elseif(${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + set(VORPALINE_PLATFORM Darwin-clang-dynamic CACHE STRING "" FORCE) +endif() + +option(GEOGRAM_WITH_GRAPHICS "Viewers and geogram_gfx library" OFF) +option(GEOGRAM_WITH_LEGACY_NUMERICS "Legacy numerical libraries" OFF) +option(GEOGRAM_WITH_HLBFGS "Non-linear solver (Yang Liu's HLBFGS)" OFF) +option(GEOGRAM_WITH_TETGEN "Tetrahedral mesher (Hang Si's TetGen)" OFF) +option(GEOGRAM_WITH_TRIANGLE "Triangle mesher (Jonathan Shewchuk's triangle)" ON) +option(GEOGRAM_WITH_EXPLORAGRAM "Experimental code (hexahedral meshing vpipeline and optimal transport)" OFF) +option(GEOGRAM_WITH_LUA "Built-in LUA interpreter" OFF) +option(GEOGRAM_LIB_ONLY "Libraries only (no example programs/no viewer)" ON) +option(GEOGRAM_WITH_FPG "Predicate generator (Sylvain Pion's FPG)" OFF) +option(GEOGRAM_USE_SYSTEM_GLFW3 "Use the version of GLFW3 installed in the system if found" OFF) + +################################################################################ + +add_subdirectory(${GEOGRAM_ROOT} geogram) +target_include_directories(geogram SYSTEM PUBLIC ${GEOGRAM_SOURCE_INCLUDE_DIR}) + +################################################################################ + +if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") + # remove warning for multiply defined symbols (caused by multiple + # instanciations of STL templates) + target_compile_options(geogram INTERFACE /wd4251) + + # remove all unused stuff from windows.h + target_compile_definitions(geogram INTERFACE -DWIN32_LEAN_AND_MEAN) + target_compile_definitions(geogram INTERFACE -DVC_EXTRALEAN) + + # do not define a min() and a max() macro, breaks + # std::min() and std::max() !! + target_compile_definitions(geogram INTERFACE -DNOMINMAX) + + # we want M_PI etc... + target_compile_definitions(geogram INTERFACE -D_USE_MATH_DEFINES) + + if(NOT VORPALINE_BUILD_DYNAMIC) + # If we use static library, we link with the static C++ runtime. + foreach(config ${CMAKE_CONFIGURATION_TYPES}) + string(TOUPPER ${config} config) + string(REPLACE /MD /MT CMAKE_C_FLAGS_${config} "${CMAKE_C_FLAGS_${config}}") + string(REPLACE /MD /MT CMAKE_CXX_FLAGS_${config} "${CMAKE_CXX_FLAGS_${config}}") + endforeach() + endif() +endif() diff --git a/contrib/NeRF-Editing/TetWild/docs/Slide1.jpg b/contrib/NeRF-Editing/TetWild/docs/Slide1.jpg new file mode 100644 index 00000000..9ecb9fec Binary files /dev/null and b/contrib/NeRF-Editing/TetWild/docs/Slide1.jpg differ diff --git a/contrib/NeRF-Editing/TetWild/docs/teaser.png b/contrib/NeRF-Editing/TetWild/docs/teaser.png new file mode 100644 index 00000000..4fec9c4e Binary files /dev/null and b/contrib/NeRF-Editing/TetWild/docs/teaser.png differ diff --git a/contrib/NeRF-Editing/TetWild/include/tetwild/Args.h b/contrib/NeRF-Editing/TetWild/include/tetwild/Args.h new file mode 100644 index 00000000..8da5e015 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/include/tetwild/Args.h @@ -0,0 +1,93 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 5/31/18. +// + +#pragma once + +#include + +namespace tetwild { + +// Global arguments controlling the behavior of TetWild +struct Args { + // Initial target edge-length at every vertex (in % of the bbox diagonal) + double initial_edge_len_rel = 1/20.0; + + // Initial absolute target edge-length at every vertex. Only used if -a is specified. + double initial_edge_len_abs = 0.0; + + // convenience function to get the correct absolute edge length depending + // on what was set by the CLI reader + double getAbsoluteEdgeLength(const double bbox_diag) const + { + return initial_edge_len_abs != 0.0 ? initial_edge_len_abs + : initial_edge_len_rel*bbox_diag; + } + + // convenience function to get the correct relative edge length depending + // on what was set by the CLI reader + double getRelativeEdgeLength(const double bbox_diag) const + { + return initial_edge_len_abs != 0.0 ? initial_edge_len_abs/bbox_diag + : initial_edge_len_rel; + } + + // Target epsilon (in % of the bbox diagonal) + double eps_rel = 1e-3; + + ////////////////////// + // Advanced options // + ////////////////////// + + // Explicitly specify a sampling distance for triangles (in % of the bbox diagonal) + int sampling_dist_rel = -1; + + // Run the algorithm in stage (as explain in p.8 of the paper) + // If the first stage didn't succeed, call again with `stage = 2`, etc. + int stage = 1; + + // Multiplier for resizing the target-edge length around bad-quality vertices + // See MeshRefinement::updateScalarField() for more details + double adaptive_scalar = 0.6; + + // Energy threshold + // If the max tet energy is below this threshold, the mesh optimization process is stopped. + // Also used to determine where to resize the scalar field (if a tet incident to a vertex has larger energy than this threshold, then resize around this vertex). + double filter_energy_thres = 10; + + // Threshold on the energy delta (avg and max) below which to rescale the target edge length scalar field + double delta_energy_thres = 0.1; + + // Maximum number of mesh optimization iterations + int max_num_passes = 80; + + // Sample points at voxel centers for initial Delaunay triangulation + bool not_use_voxel_stuffing = false; + + // Use Laplacian smoothing on the faces/vertices covering an open boundary after the mesh optimization step (post-processing) + bool smooth_open_boundary = false; + + // Target number of vertices (minimum), within 5% of tolerance + int target_num_vertices = -1; + + // Background mesh for the edge length sizing field + std::string background_mesh = ""; + + // [debug] logging + bool write_csv_file = true; + std::string working_dir = ""; + std::string postfix = "_"; + std::string csv_file = ""; + int save_mid_result = -1; // save intermediate result + + bool is_quiet = false; +}; + +} // namespace tetwild diff --git a/contrib/NeRF-Editing/TetWild/include/tetwild/Exception.h b/contrib/NeRF-Editing/TetWild/include/tetwild/Exception.h new file mode 100644 index 00000000..cf4fe18f --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/include/tetwild/Exception.h @@ -0,0 +1,26 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Jeremie Dumas +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Jeremie Dumas on 09/04/18. +// + +#pragma once + +#include +#include + +namespace tetwild { + +class TetWildError : public std::runtime_error { +public: + explicit TetWildError(const std::string& what_arg) + : std::runtime_error(what_arg) + { } +}; + +} // namespace tetwild diff --git a/contrib/NeRF-Editing/TetWild/include/tetwild/Logger.h b/contrib/NeRF-Editing/TetWild/include/tetwild/Logger.h new file mode 100644 index 00000000..f74002a5 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/include/tetwild/Logger.h @@ -0,0 +1,44 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Jeremie Dumas +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Jeremie Dumas on 09/04/18. +// + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace tetwild { + +struct Logger { + static std::shared_ptr logger_; + + // By default, write to stdout, but don't write to any file + static void init(bool use_cout = true, const std::string &filename = "", bool truncate = true); +}; + +// Retrieve current logger, or create one if not available +inline spdlog::async_logger & logger() { + if (!Logger::logger_) { + Logger::init(); + } + return *Logger::logger_; +} + +template +[[noreturn]] void log_and_throw(T x) { + logger().error(x); + throw TetWildError(x); +} + +} // namespace tetwild diff --git a/contrib/NeRF-Editing/TetWild/include/tetwild/tetwild.h b/contrib/NeRF-Editing/TetWild/include/tetwild/tetwild.h new file mode 100644 index 00000000..f89d8396 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/include/tetwild/tetwild.h @@ -0,0 +1,44 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 5/31/18. +// + +#pragma once + +#include +#include + +namespace tetwild { + +/// +/// Robust tetrahedralization of an input triangle soup, with an envelope constraint. +/// +/// @param[in] VI { #VI x 3 input mesh vertices } +/// @param[in] FI { #FI x 3 input mesh triangles } +/// @param[out] VO { #VO x 3 output mesh vertices } +/// @param[out] TO { #TO x 4 output mesh tetrahedra } +/// @param[out] AO { #TO x 1 array of min dihedral angle over each tet } +/// @param[in] args { Extra arguments controlling the behavior of TetWild } +/// +void tetrahedralization(const Eigen::MatrixXd &VI, const Eigen::MatrixXi &FI, + Eigen::MatrixXd &VO, Eigen::MatrixXi &TO, Eigen::VectorXd &AO, const Args &args = Args()); + +/// +/// Extract the boundary facets of a triangle mesh, removing unreferenced vertices +/// +/// @param[in] VI { #VI x 3 input mesh vertices } +/// @param[in] TI { #TI x 4 input mesh tetrahedra } +/// @param[out] VS { #VS x 3 output mesh vertices } +/// @param[out] FS { #FS x 3 output mesh triangles } +/// +void extractSurfaceMesh(const Eigen::MatrixXd &VI, const Eigen::MatrixXi &TI, + Eigen::MatrixXd &VS, Eigen::MatrixXi &FS); + +} // namespace tetwild + diff --git a/contrib/NeRF-Editing/TetWild/src/ispc/CMakeLists.txt b/contrib/NeRF-Editing/TetWild/src/ispc/CMakeLists.txt new file mode 100644 index 00000000..85ae71f4 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/ispc/CMakeLists.txt @@ -0,0 +1,27 @@ +set(tetwild_ispc__internal_dir ${CMAKE_CURRENT_LIST_DIR} CACHE INTERNAL "") + +function(ispc_add_energy target_name) + # Compilation flags for ISPC + if(CMAKE_BUILD_TYPE MATCHES Release) + set(TETWILD_ISPC_FLAGS "") + else() + set(TETWILD_ISPC_FLAGS "-g") + endif() + + add_custom_command( + COMMAND + ispc --pic --target=host ${TETWILD_ISPC_FLAGS} + ${tetwild_ispc__internal_dir}/energy.ispc + -h ${tetwild_ispc__internal_dir}/energy.h + -o ${CMAKE_CURRENT_BINARY_DIR}/energy_ispc.o + DEPENDS + ${tetwild_ispc__internal_dir}/energy.ispc + OUTPUT + ${tetwild_ispc__internal_dir}/energy.h + ${CMAKE_CURRENT_BINARY_DIR}/energy_ispc.o + ) + + target_sources(${target_name} PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/energy_ispc.o) + target_compile_definitions(${target_name} PUBLIC -DTETWILD_WITH_ISPC) + target_include_directories(${target_name} PUBLIC ${tetwild_ispc__internal_dir}/..) +endfunction() diff --git a/contrib/NeRF-Editing/TetWild/src/ispc/energy.ispc b/contrib/NeRF-Editing/TetWild/src/ispc/energy.ispc new file mode 100644 index 00000000..0107bd3d --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/ispc/energy.ispc @@ -0,0 +1,65 @@ +// Input: +// Each V is a vector of size count. +// count is the number of tetrahedra to process +// Output: +// E is a vector of size count, it must be preallocated + +export void energy_ispc( + uniform double V1_x[], + uniform double V1_y[], + uniform double V1_z[], + uniform double V2_x[], + uniform double V2_y[], + uniform double V2_z[], + uniform double V3_x[], + uniform double V3_y[], + uniform double V3_z[], + uniform double V4_x[], + uniform double V4_y[], + uniform double V4_z[], + uniform double E[], + uniform int count) + { + foreach (index = 0 ... count) + { + // Load the appropriate tetrahedron vertices and compute the energy + double helper_0[12]; + helper_0[0] = V1_x[index]; + helper_0[1] = V1_y[index]; + helper_0[2] = V1_z[index]; + helper_0[3] = V2_x[index]; + helper_0[4] = V2_y[index]; + helper_0[5] = V2_z[index]; + helper_0[6] = V3_x[index]; + helper_0[7] = V3_y[index]; + helper_0[8] = V3_z[index]; + helper_0[9] = V4_x[index]; + helper_0[10] = V4_y[index]; + helper_0[11] = V4_z[index]; + double helper_1 = helper_0[2]; + double helper_2 = helper_0[11]; + double helper_3 = helper_0[0]; + double helper_4 = helper_0[3]; + double helper_5 = helper_0[9]; + double helper_6 = 0.577350269189626*helper_3 - 1.15470053837925*helper_4 + 0.577350269189626*helper_5; + double helper_7 = helper_0[1]; + double helper_8 = helper_0[4]; + double helper_9 = helper_0[7]; + double helper_10 = helper_0[10]; + double helper_11 = 0.408248290463863*helper_10 + 0.408248290463863*helper_7 + 0.408248290463863*helper_8 - 1.22474487139159*helper_9; + double helper_12 = 0.577350269189626*helper_10 + 0.577350269189626*helper_7 - 1.15470053837925*helper_8; + double helper_13 = helper_0[6]; + double helper_14 = -1.22474487139159*helper_13 + 0.408248290463863*helper_3 + 0.408248290463863*helper_4 + 0.408248290463863*helper_5; + double helper_15 = helper_0[5]; + double helper_16 = helper_0[8]; + double helper_17 = 0.408248290463863*helper_1 + 0.408248290463863*helper_15 - 1.22474487139159*helper_16 + 0.408248290463863*helper_2; + double helper_18 = 0.577350269189626*helper_1 - 1.15470053837925*helper_15 + 0.577350269189626*helper_2; + double helper_19 = 0.5*helper_13 + 0.5*helper_4; + double helper_20 = 0.5*helper_8 + 0.5*helper_9; + double helper_21 = 0.5*helper_15 + 0.5*helper_16; + double result_0 = -(helper_1*(-1.5*helper_1 + 0.5*helper_2 + helper_21) + helper_10*(-1.5*helper_10 + helper_20 + 0.5*helper_7) + helper_13*(-1.5*helper_13 + 0.5*helper_3 + 0.5*helper_4 + 0.5*helper_5) + helper_15*(0.5*helper_1 - 1.5*helper_15 + 0.5*helper_16 + 0.5*helper_2) + helper_16*(0.5*helper_1 + 0.5*helper_15 - 1.5*helper_16 + 0.5*helper_2) + helper_2*(0.5*helper_1 - 1.5*helper_2 + helper_21) + helper_3*(helper_19 - 1.5*helper_3 + 0.5*helper_5) + helper_4*(0.5*helper_13 + 0.5*helper_3 - 1.5*helper_4 + 0.5*helper_5) + helper_5*(helper_19 + 0.5*helper_3 - 1.5*helper_5) + helper_7*(0.5*helper_10 + helper_20 - 1.5*helper_7) + helper_8*(0.5*helper_10 + 0.5*helper_7 - 1.5*helper_8 + 0.5*helper_9) + helper_9*(0.5*helper_10 + 0.5*helper_7 + 0.5*helper_8 - 1.5*helper_9))*pow(pow((helper_1 - helper_2)*(helper_11*helper_6 - helper_12*helper_14) - (-helper_10 + helper_7)*(-helper_14*helper_18 + helper_17*helper_6) + (helper_3 - helper_5)*(-helper_11*helper_18 + helper_12*helper_17), (double)2), (double)-0.333333333333333); + + // Write the result back + E[index] = result_0; + } +} diff --git a/contrib/NeRF-Editing/TetWild/src/main.cpp b/contrib/NeRF-Editing/TetWild/src/main.cpp new file mode 100644 index 00000000..30983324 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/main.cpp @@ -0,0 +1,195 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace tetwild; + +namespace tetwild { + void extractFinalTetmesh(MeshRefinement& MR, Eigen::MatrixXd &V_out, Eigen::MatrixXi &T_out, Eigen::VectorXd &A_out, const Args &args, const State &state); +} // namespace tetwild + +void saveFinalTetmesh(const std::string &output_volume, const std::string &output_surface, + const Eigen::MatrixXd &V, const Eigen::MatrixXi &T, const Eigen::VectorXd &A) +{ + logger().debug("Writing mesh to {}...", output_volume); + std::string output_format = output_volume.substr(output_volume.size() - 4, 4); + if (output_format == "mesh") { + std::ofstream f(output_volume); + f.precision(std::numeric_limits::digits10 + 1); + f << "MeshVersionFormatted 1" << std::endl; + f << "Dimension 3" << std::endl; + + f << "Vertices" << std::endl << V.rows() << std::endl; + for (int i = 0; i < V.rows(); i++) + f << V(i, 0) << " " << V(i, 1) << " " << V(i, 2) << " " << 0 << std::endl; + f << "Triangles" << std::endl << 0 <& ops, + Eigen::MatrixXd &VO, Eigen::MatrixXi &TO, Eigen::VectorXd &AO, + const Args &args = Args()) +{ + State state(args, VI); + GEO::Mesh sf, b; + MeshRefinement MR(sf, b, args, state); + MR.deserialization(VI, FI, slz_file); + +// MR.is_dealing_unrounded = true; + MR.refine(state.ENERGY_AMIPS, ops, false, true); + + extractFinalTetmesh(MR, VO, TO, AO, args, state); //do winding number and output the tetmesh +} + +#include +#include +int main(int argc, char *argv[]) { + int log_level = 1; // debug + std::string log_filename; + std::string input_surface; + std::string output_volume; + std::string output_surface; + std::string slz_file; + Args args; + + CLI::App app{"RobustTetMeshing"}; + app.add_option("input,--input", input_surface, "Input surface mesh INPUT in .off/.obj/.stl/.ply format. (string, required)")->required(); + app.add_option("output,--output", output_volume, "Output tetmesh OUTPUT in .msh or .mesh format. (string, optional, default: input_file+postfix+'.msh')"); + app.add_option("--postfix", args.postfix, "Postfix P for output files. (string, optional, default: '_')"); + auto absolute = app.add_option("-a,--ideal-absolute-edge-length", args.initial_edge_len_abs, "Absolute edge length (not scaled by bbox). -a and -l cannot both be given as arguments."); + auto relative = app.add_option("-l,--ideal-edge-length", args.initial_edge_len_rel, "ideal_edge_length = diag_of_bbox * L. (double, optional, default: 0.05)"); + relative->excludes(absolute); + app.add_option("-e,--epsilon", args.eps_rel, "epsilon = diag_of_bbox * EPS. (double, optional, default: 1e-3)"); + app.add_option("--stage", args.stage, "Run pipeline in stage STAGE. (integer, optional, default: 1)"); + app.add_option("--filter-energy", args.filter_energy_thres, "Stop mesh improvement when the maximum energy is smaller than ENERGY. (double, optional, default: 10)"); + app.add_option("--max-pass", args.max_num_passes, "Do PASS mesh improvement passes in maximum. (integer, optional, default: 80)"); + app.add_option("--targeted-num-v", args.target_num_vertices, "Output tetmesh that contains TV vertices. (integer, optional, tolerance: 5%)"); + app.add_option("--bg-mesh", args.background_mesh, "Background tetmesh BGMESH in .msh format for applying sizing field. (string, optional)"); + app.add_option("--log", log_filename, "Log info to given file."); + app.add_option("--level", log_level, "Log level (0 = most verbose, 6 = off)."); + app.add_option("--save-mid-result", args.save_mid_result, "Get result without winding number: --save-mid-result 2"); + + app.add_flag("--no-voxel", args.not_use_voxel_stuffing, "Use voxel stuffing before BSP subdivision."); + app.add_flag("--is-laplacian", args.smooth_open_boundary, "Do Laplacian smoothing for the surface of output on the holes of input (optional)"); + app.add_flag("-q,--is-quiet", args.is_quiet, "Mute console output. (optional)"); + + try { + app.parse(argc, argv); + } catch (const CLI::ParseError &e) { + return app.exit(e); + } + + Logger::init(!args.is_quiet, log_filename); + log_level = std::max(0, std::min(6, log_level)); + spdlog::set_level(static_cast(log_level)); + spdlog::flush_every(std::chrono::seconds(3)); + + //initialization + GEO::initialize(); + if(slz_file != "") { + args.working_dir = input_surface.substr(0, slz_file.size() - 4); + } else { + if(output_volume.empty()) + args.working_dir = input_surface.substr(0, input_surface.size() - 4); + else + args.working_dir = output_volume; + } + + if(args.csv_file.empty()) { + args.csv_file = args.working_dir + args.postfix + ".csv"; + } + + if(output_volume.empty()) { + output_volume = args.working_dir + args.postfix + ".msh"; + } + output_surface = args.working_dir + args.postfix+"_sf.obj"; + + if(args.is_quiet) { + args.write_csv_file = false; + } + + //do tetrahedralization + Eigen::MatrixXd VI, VO; + Eigen::MatrixXi FI, TO; + Eigen::VectorXd AO; +// igl::read_triangle_mesh(input_surface, VI, FI); + GEO::Mesh input; + GEO::mesh_load(input_surface, input); + VI.resize(input.vertices.nb(), 3); + for(int i=0;i +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by yihu on 8/22/17. +// + +#ifndef NEW_GTET_BSPELEMENTS_H +#define NEW_GTET_BSPELEMENTS_H +#include +#include + +namespace tetwild { + +class BSPEdge{ +public: + std::vector vertices; + std::unordered_set conn_faces; + + BSPEdge(){} + BSPEdge(int v1, int v2){ + vertices={v1, v2}; + } +}; + +class BSPFace{ +public: + std::vector vertices; + std::vector edges; + std::unordered_set conn_nodes; + std::unordered_set div_faces; + + int matched_f_id=-1; +}; + +class BSPtreeNode{ +public: + bool is_leaf=false; + std::vector faces; + std::unordered_set div_faces; +}; + +} // namespace tetwild + +#endif //NEW_GTET_BSPELEMENTS_H diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/BSPSubdivision.cpp b/contrib/NeRF-Editing/TetWild/src/tetwild/BSPSubdivision.cpp new file mode 100644 index 00000000..8f24ddbf --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/BSPSubdivision.cpp @@ -0,0 +1,384 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 4/3/17. +// + +#include +#include +#include + +namespace tetwild { + +void BSPSubdivision::init() { + for (int old_n_id = 0; old_n_id < MC.bsp_nodes.size(); old_n_id++) { + if (MC.bsp_nodes[old_n_id].div_faces.size() == 0) { + MC.bsp_nodes[old_n_id].is_leaf = true; + continue; + } + processing_n_ids.push(old_n_id); + } + float nf=0; + for (int old_f_id = 0; old_f_id < MC.bsp_faces.size(); old_f_id++) { + if (MC.bsp_faces[old_f_id].div_faces.size() == 0) { + continue; + } + nf+=1; + } + logger().debug("# nodes need subdivision = {}/{}/{}", nf, processing_n_ids.size(), MC.bsp_nodes.size()); +} + +void BSPSubdivision::subdivideBSPNodes() { + std::vector &nodes = MC.bsp_nodes; + std::vector &faces = MC.bsp_faces; + std::vector &edges = MC.bsp_edges; + std::vector &vertices = MC.bsp_vertices; + const std::vector &div_vertices = MC.m_vertices; + const std::vector> &div_faces = MC.m_faces; + + while (!processing_n_ids.empty()) { + int old_n_id = processing_n_ids.front(); + processing_n_ids.pop(); + + ///re-assign divfaces + int cnt_pos=0, cnt_neg=0; + Plane_3 pln; + std::unordered_set v_ids; + std::unordered_map v_sides; + std::vector rm_df_ids; + int on_df_id; + bool is_divide=false; + for(auto it=nodes[old_n_id].div_faces.begin(); it!=nodes[old_n_id].div_faces.end();it++) { + pln = Plane_3(div_vertices[div_faces[*it][0]], + div_vertices[div_faces[*it][1]], + div_vertices[div_faces[*it][2]]); + ///map sides for vertices + v_ids.clear(); + for (int i = 0; i < nodes[old_n_id].faces.size(); i++) { + for (int j = 0; j < faces[nodes[old_n_id].faces[i]].vertices.size(); j++) { + v_ids.insert(faces[nodes[old_n_id].faces[i]].vertices[j]); + } + } + v_sides.clear(); + calVertexSides(pln, v_ids, vertices, v_sides); + cnt_pos=0; + cnt_neg=0; + for (auto it = v_sides.begin(); it != v_sides.end(); it++) { + if (it->second == V_POS) + cnt_pos++; + if (it->second == V_NEG) + cnt_neg++; + } + if (cnt_pos == 0 || cnt_neg == 0) { //fixed//but how could it happen?? + nodes[old_n_id].is_leaf = true; + rm_df_ids.push_back(*it); + } + else{ + is_divide=true; + on_df_id=*it; + break; + } + } + if(!is_divide) + continue; + ///from here, the node would definitely be subdivided + BSPtreeNode pos_node, neg_node; + BSPFace on_face; + nodes.push_back(neg_node); + int new_n_id = nodes.size() - 1; + std::vector new_n_ids = {old_n_id, new_n_id}; + + for (auto it = nodes[old_n_id].div_faces.begin(); it != nodes[old_n_id].div_faces.end(); it++) { + if(std::find(rm_df_ids.begin(), rm_df_ids.end(), *it)!=rm_df_ids.end()) + continue; + if (*it == on_df_id) { + on_face.div_faces.insert(*it); + continue; + } + + int side = divfaceSide(pln, div_faces[*it], div_vertices); + if (side == DIVFACE_POS) + pos_node.div_faces.insert(*it); + else if (side == DIVFACE_NEG) + neg_node.div_faces.insert(*it); + else if (side == DIVFACE_ON) + on_face.div_faces.insert(*it); + else if (side == DIVFACE_CROSS) { + pos_node.div_faces.insert(*it); + neg_node.div_faces.insert(*it); + } + } + + ///split nodes + for (int i = 0; i < nodes[old_n_id].faces.size(); i++) { + int old_f_id = nodes[old_n_id].faces[i]; + + ///check if need splitting + int cnt_pos = 0, cnt_neg = 0, cnt_on = 0; + for (int j = 0; j < faces[old_f_id].vertices.size(); j++) { + if (v_sides[faces[old_f_id].vertices[j]] == V_POS) + cnt_pos++; + else if (v_sides[faces[old_f_id].vertices[j]] == V_NEG) + cnt_neg++; + else + cnt_on++; + } + if (cnt_pos + cnt_on == faces[old_f_id].vertices.size()) { + pos_node.faces.push_back(old_f_id); + if (cnt_on == 0) { + continue; + } + } + if (cnt_neg + cnt_on == faces[old_f_id].vertices.size()) { + neg_node.faces.push_back(old_f_id); + if (cnt_on == 0) { + continue; + } + } + + ///splitting... + BSPFace pos_face, neg_face; + BSPEdge on_edge; + int new_f_id = faces.size(); + std::vector new_f_ids = {old_f_id, new_f_id}; + + bool is_connected=false; + for (int j = 0; j < faces[old_f_id].edges.size(); j++) { + int old_e_id = faces[old_f_id].edges[j]; + + ///check if need splitting + std::vector pos_vs, neg_vs, on_vs; + for (int j = 0; j < edges[old_e_id].vertices.size(); j++) { + if (v_sides[edges[old_e_id].vertices[j]] == V_POS) + pos_vs.push_back(edges[old_e_id].vertices[j]); + else if (v_sides[edges[old_e_id].vertices[j]] == V_NEG) + neg_vs.push_back(edges[old_e_id].vertices[j]); + else + on_vs.push_back(edges[old_e_id].vertices[j]); + } + if (on_vs.size() == 2) { + if (std::find(on_face.edges.begin(), on_face.edges.end(), old_e_id) == on_face.edges.end()) + on_face.edges.push_back(old_e_id); + is_connected=true; + } + if(is_connected) + continue; + if (pos_vs.size() + on_vs.size() == 2) { + pos_face.edges.push_back(old_e_id); + if (on_vs.size() > 0 && + std::find(on_edge.vertices.begin(), on_edge.vertices.end(), on_vs[0]) == on_edge.vertices.end()) + on_edge.vertices.push_back(on_vs[0]); + continue; + } + if (neg_vs.size() + on_vs.size() == 2) { + neg_face.edges.push_back(old_e_id); + if (on_vs.size() > 0 && + std::find(on_edge.vertices.begin(), on_edge.vertices.end(), on_vs[0]) == on_edge.vertices.end()) + on_edge.vertices.push_back(on_vs[0]); + continue; + } + + ///splitting... + BSPEdge pos_edge, neg_edge; + edges.push_back(neg_edge); + int new_e_id = edges.size() - 1; + std::vector new_e_ids = {old_e_id, new_e_id}; + + pos_edge.vertices.push_back(pos_vs[0]); + neg_edge.vertices.push_back(neg_vs[0]); + + int new_v_id = 0; + Segment_3 seg(vertices[edges[old_e_id].vertices[0]], vertices[edges[old_e_id].vertices[1]]); + auto result = intersection(seg, pln); + if (result) { + const Point_3 *p = boost::get(&*result); + vertices.push_back(*p); + + new_v_id = vertices.size() - 1; + on_edge.vertices.push_back(new_v_id); + pos_edge.vertices.push_back(new_v_id); + neg_edge.vertices.push_back(new_v_id); + + v_sides[new_v_id] = V_ON;//fixed + } else { + log_and_throw("error cal p!"); + } + + ///add edges + pos_edge.conn_faces = edges[old_e_id].conn_faces; + neg_edge.conn_faces = edges[old_e_id].conn_faces; + for (auto it = edges[old_e_id].conn_faces.begin(); it != edges[old_e_id].conn_faces.end(); it++) { + if (*it == old_f_id) + continue; + faces[*it].edges.push_back(new_e_id); + faces[*it].vertices.push_back(new_v_id); + } + edges[new_e_ids[0]] = pos_edge;//if get here, it means that old_edge has been cut into 2 + edges[new_e_ids[1]] = neg_edge; + + ///add edges for faces + pos_face.edges.push_back(new_e_ids[0]); + neg_face.edges.push_back(new_e_ids[1]); + }//split one face end + + if (pos_face.edges.size() == 0 || neg_face.edges.size() == 0)//connected pos/neg + continue; + + ///from now, the face would definitely be subdivided + faces.push_back(neg_face);//have to do push_back here!!! Otherwise would producing empty faces!! + + ///clean conn_faces for neg_face's edges//fixed + for(int j=0;j tmp_df_ids = faces[old_f_id].div_faces; + for(auto it=tmp_df_ids.begin(); it!=tmp_df_ids.end();it++) { + int side = divfaceSide(pln, div_faces[*it], div_vertices); + if (side == DIVFACE_POS) + pos_face.div_faces.insert(*it); + else if (side == DIVFACE_NEG) + neg_face.div_faces.insert(*it); + else if(side==DIVFACE_CROSS){ + pos_face.div_faces.insert(*it); + neg_face.div_faces.insert(*it); + } + } + pos_face.matched_f_id=faces[old_f_id].matched_f_id; + neg_face.matched_f_id=faces[old_f_id].matched_f_id; + + faces[new_f_ids[0]] = pos_face; + faces[new_f_ids[1]] = neg_face; + + for(int j=0;j& v_ids, const std::vector& vs, + std::unordered_map& v_sides){ + for(auto it=v_ids.begin();it!=v_ids.end();it++){ + CGAL::Oriented_side side=pln.oriented_side(vs[*it]); + switch (side) { + case CGAL::ON_ORIENTED_BOUNDARY: + v_sides[*it]=V_ON; + break; + case CGAL::ON_POSITIVE_SIDE: + v_sides[*it]=V_POS; + break; + case CGAL::ON_NEGATIVE_SIDE: + v_sides[*it]=V_NEG; + } + } +} + +int BSPSubdivision::divfaceSide(const Plane_3& pln, const std::array& p_ids, + const std::vector& ps) { + int cnt_pos = 0, cnt_on = 0, cnt_neg = 0; + for (int i = 0; i < p_ids.size(); i++) { + switch (pln.oriented_side(ps[p_ids[i]])) { + case CGAL::ON_ORIENTED_BOUNDARY: + cnt_on++; + break; + case CGAL::ON_POSITIVE_SIDE: + cnt_pos++; + break; + case CGAL::ON_NEGATIVE_SIDE: + cnt_neg++; + } + } + + if (cnt_pos > 0 && cnt_neg > 0) + return DIVFACE_CROSS; + if (cnt_on == p_ids.size()) + return DIVFACE_ON; + if (cnt_neg == 0) + return DIVFACE_POS; + if (cnt_pos == 0) + return DIVFACE_NEG; + + return -1; +} + +void BSPSubdivision::getVertices(BSPFace& face){ + std::unordered_set vs; + for(int i=0;i +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 4/3/17. +// + +#ifndef NEW_GTET_BSPSUBDIVISION_H +#define NEW_GTET_BSPSUBDIVISION_H + +#include +#include +#include +#include +#include +#include + +namespace tetwild { + +class BSPSubdivision { +public: + MeshConformer& MC; + BSPSubdivision(MeshConformer& mc): MC(mc){} + void init(); + + std::queue processing_n_ids; + void subdivideBSPNodes(); + + const int V_POS=0; + const int V_NEG=1; + const int V_ON=2; + void calVertexSides(const Plane_3& pln, const std::unordered_set& v_ids, const std::vector& vs, + std::unordered_map& v_sides); + + const int DIVFACE_POS=0; + const int DIVFACE_NEG=1; + const int DIVFACE_ON=2; + const int DIVFACE_CROSS=3; + int divfaceSide(const Plane_3& pln, const std::array& p_ids, const std::vector& ps); + void getVertices(BSPFace& face); + +}; + +} // namespace tetwild + +#endif //NEW_GTET_BSPSUBDIVISION_H diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/CGALTypes.h b/contrib/NeRF-Editing/TetWild/src/tetwild/CGALTypes.h new file mode 100644 index 00000000..a96359dc --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/CGALTypes.h @@ -0,0 +1,54 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Jeremie Dumas +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Jeremie Dumas on 09/04/18. +// + +#pragma once + +#include +#include +#include +#include + +namespace tetwild { + +typedef CGAL::Exact_predicates_exact_constructions_kernel K; +typedef K::Point_2 Point_2; +typedef K::Segment_2 Segment_2; +typedef K::Line_2 Line_2; +typedef K::Iso_rectangle_2 Iso_rectangle_2; +typedef K::Triangle_2 Triangle_2; +typedef K::Intersect_2 Intersect_2; +//typedef CGAL::Polygon_2 Polygon_2; + +typedef K::Point_3 Point_3; +typedef K::Vector_3 Vector_3; +typedef K::Segment_3 Segment_3; +typedef K::Line_3 Line_3; +typedef K::Plane_3 Plane_3; +typedef K::Triangle_3 Triangle_3; +typedef K::Intersect_3 Intersect_3; +typedef K::Tetrahedron_3 Tetrahedron_3; +typedef K::Direction_3 Direction_3; + +typedef CGAL::Exact_predicates_inexact_constructions_kernel Kf; +typedef Kf::Point_3 Point_3f; +typedef Kf::Vector_3 Vector_3f; +typedef Kf::Plane_3 Plane_3f; +typedef Kf::Triangle_3 Triangle_3f; +typedef Kf::Segment_3 Segment_3f; +typedef Kf::Line_3 Line_3f; + +typedef CGAL::Epeck::FT CGAL_FT; +//#include +//typedef CGAL::Simple_cartesian::FT CGAL_FT; + +typedef K::Iso_cuboid_3 Bbox_3; + +} // namespace tetwild diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/Common.cpp b/contrib/NeRF-Editing/TetWild/src/tetwild/Common.cpp new file mode 100644 index 00000000..22fc8980 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/Common.cpp @@ -0,0 +1,257 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Jeremie Dumas +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Jeremie Dumas on 09/04/18. +// + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace tetwild { + +void addRecord(const MeshRecord& record, const Args &args, const State &state) { + if (!args.write_csv_file) + return; + static bool first_time = true; + std::ofstream f; + if (first_time) { + f.open(state.stat_file); + first_time = false; + } else { + f.open(state.stat_file, std::ios::app); + } + f << record.op << "," << record.timing << "," << record.n_v << "," << record.n_t << "," + << record.min_min_d_angle << "," << record.avg_min_d_angle << "," + << record.max_max_d_angle << "," << record.avg_max_d_angle << "," + << record.max_energy << "," << record.avg_energy << "\n"; + f.close(); +} + +void pausee(){ + logger().debug("Is pausing... (Enter '0' to exit and other characters to continue.)"); + char c; + std::cin>>c; + if(c=='0') + exit(0); +} + +bool isHaveCommonEle(const std::unordered_set& v1, const std::unordered_set& v2) { +#if 0 + for (auto it = v1.begin(); it != v1.end(); it++) + if(std::find(v2.begin(), v2.end(), *it)!=v2.end()) + return true; +#else + if (v2.size() < v1.size()) { + return isHaveCommonEle(v2, v1); + } + for (int x : v1) { + if (v2.count(x)) { + return true; + } + } +#endif + return false; +} + +void setIntersection(const std::unordered_set& s1, const std::unordered_set& s2, std::unordered_set& s) { +#if 0 + std::unordered_set s_tmp; + std::vector v1, v2; + v1.reserve(s1.size()); + for (auto it = s1.begin(); it != s1.end(); it++) + v1.push_back(*it); + v2.reserve(s2.size()); + for (auto it = s2.begin(); it != s2.end(); it++) + v2.push_back(*it); + std::sort(v1.begin(), v1.end()); + std::sort(v2.begin(), v2.end()); + std::set_intersection(v1.begin(), v1.end(), v2.begin(), v2.end(), std::inserter(s_tmp, s_tmp.end())); + s = s_tmp; +#else + if (s2.size() < s1.size()) { setIntersection(s2, s1, s); return; } + s.clear(); + s.reserve(std::min(s1.size(), s2.size())); + for (int x : s1) { + if (s2.count(x)) { + s.insert(x); + } + } +#endif + +// s.clear(); +// s.reserve(std::min(s1.size(), s2.size())); +// std::unordered_set s_tmp = s2; +// int size = s_tmp.size(); +// for(int ele:s1){ +// s_tmp.insert(ele); +// if(s_tmp.size()>size){ +// size = s_tmp.size(); +// } else +// s.insert(ele); +// } +} + +void setIntersection(const std::unordered_set& s1, const std::unordered_set& s2, std::vector& s) { +// s.clear(); +// s.reserve(std::min(s1.size(), s2.size())); +// std::unordered_set s_tmp = s2; +// int size = s_tmp.size(); +// for(int ele:s1){ +// s_tmp.insert(ele); +// if(s_tmp.size()>size){ +// size = s_tmp.size(); +// } else +// s.push_back(ele); +// } +#if 0 + std::vector v1, v2; + v1.reserve(s1.size()); + for(auto it=s1.begin();it!=s1.end();it++) + v1.push_back(*it); + v2.reserve(s2.size()); + for(auto it=s2.begin();it!=s2.end();it++) + v2.push_back(*it); + std::sort(v1.begin(), v1.end()); + std::sort(v2.begin(), v2.end()); + std::set_intersection(v1.begin(), v1.end(), v2.begin(), v2.end(), std::back_inserter(s)); +#else + if (s2.size() < s1.size()) { setIntersection(s2, s1, s); return; } + s.clear(); + s.reserve(std::min(s1.size(), s2.size())); + for (int x : s1) { + if (s2.count(x)) { + s.push_back(x); + } + } + std::sort(s.begin(), s.end()); +#endif +} + + +void sampleTriangle(const std::array& vs, std::vector& ps, const double sampling_dist) { + double sqrt3_2 = std::sqrt(3) / 2; + + std::array ls; + for (int i = 0; i < 3; i++) { + ls[i] = GEO::length2(vs[i] - vs[(i + 1) % 3]); + } + auto min_max = std::minmax_element(ls.begin(), ls.end()); + int min_i = min_max.first - ls.begin(); + int max_i = min_max.second - ls.begin(); + double N = sqrt(ls[max_i]) / sampling_dist; + if (N <= 1) { + for (int i = 0; i < 3; i++) + ps.push_back(vs[i]); + return; + } + if (N == int(N)) + N -= 1; + + GEO::vec3 v0 = vs[max_i]; + GEO::vec3 v1 = vs[(max_i + 1) % 3]; + GEO::vec3 v2 = vs[(max_i + 2) % 3]; + + GEO::vec3 n_v0v1 = GEO::normalize(v1 - v0); + for (int n = 0; n <= N; n++) { + ps.push_back(v0 + n_v0v1 * sampling_dist * n); + } + ps.push_back(v1); + + double h = GEO::distance(GEO::dot((v2 - v0), (v1 - v0)) * (v1 - v0) / ls[max_i] + v0, v2); + int M = h / (sqrt3_2 * sampling_dist); + if (M < 1) { + ps.push_back(v2); + return; + } + + GEO::vec3 n_v0v2 = GEO::normalize(v2 - v0); + GEO::vec3 n_v1v2 = GEO::normalize(v2 - v1); + double tan_v0, tan_v1, sin_v0, sin_v1; + sin_v0 = GEO::length(GEO::cross((v2 - v0), (v1 - v0))) / (GEO::distance(v0, v2) * GEO::distance(v0, v1)); + tan_v0 = GEO::length(GEO::cross((v2 - v0), (v1 - v0))) / GEO::dot((v2 - v0), (v1 - v0)); + tan_v1 = GEO::length(GEO::cross((v2 - v1), (v0 - v1))) / GEO::dot((v2 - v1), (v0 - v1)); + sin_v1 = GEO::length(GEO::cross((v2 - v1), (v0 - v1))) / (GEO::distance(v1, v2) * GEO::distance(v0, v1)); + + for (int m = 1; m <= M; m++) { + int n = sqrt3_2 / tan_v0 * m + 0.5; + int n1 = sqrt3_2 / tan_v0 * m; + if (m % 2 == 0 && n == n1) { + n += 1; + } + GEO::vec3 v0_m = v0 + m * sqrt3_2 * sampling_dist / sin_v0 * n_v0v2; + GEO::vec3 v1_m = v1 + m * sqrt3_2 * sampling_dist / sin_v1 * n_v1v2; + if (GEO::distance(v0_m, v1_m) <= sampling_dist) + break; + + double delta_d = ((n + (m % 2) / 2.0) - m * sqrt3_2 / tan_v0) * sampling_dist; + GEO::vec3 v = v0_m + delta_d * n_v0v1; + int N1 = GEO::distance(v, v1_m) / sampling_dist; +// ps.push_back(v0_m); + for (int i = 0; i <= N1; i++) { + ps.push_back(v + i * n_v0v1 * sampling_dist); + } +// ps.push_back(v1_m); + } + ps.push_back(v2); + + //sample edges + N = sqrt(ls[(max_i + 1) % 3]) / sampling_dist; + if (N > 1) { + if (N == int(N)) + N -= 1; + GEO::vec3 n_v1v2 = GEO::normalize(v2 - v1); + for (int n = 1; n <= N; n++) { + ps.push_back(v1 + n_v1v2 * sampling_dist * n); + } + } + + N = sqrt(ls[(max_i + 2) % 3]) / sampling_dist; + if (N > 1) { + if (N == int(N)) + N -= 1; + GEO::vec3 n_v2v0 = GEO::normalize(v0 - v2); + for (int n = 1; n <= N; n++) { + ps.push_back(v2 + n_v2v0 * sampling_dist * n); + } + } + + // logger().trace("triangle_samples {}", ps.size()); + +// logger().debug("ps.size = {}", ps.size()); +// logger().debug("is output samples?"); +// int anw = 0; +// cin >> anw; +// if (anw != 0) { +//// if (true) { +// Eigen::MatrixXd V_tmp(ps.size() * 3 + 3, 3); +// Eigen::MatrixXi F_tmp(ps.size() + 1, 3); +// for (int i = 0; i < 3; i++) { +// for (int j = 0; j < 3; j++) +// V_tmp(i, j) = vs[i][j]; +// F_tmp(0, i) = i; +// } +// +// for (int i = 0; i < ps.size(); i++) { +// for (int k = 0; k < 3; k++) { +// for (int j = 0; j < 3; j++) +// V_tmp((1 + i) * 3 + k, j) = ps[i][j]; +// F_tmp(1 + i, k) = (1 + i) * 3 + k; +// } +// } +// igl::writeSTL(state.working_dir + "_sample.stl", V_tmp, F_tmp); +// } +} + +} // namespace tetwild diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/Common.h b/contrib/NeRF-Editing/TetWild/src/tetwild/Common.h new file mode 100644 index 00000000..571258bc --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/Common.h @@ -0,0 +1,32 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Jeremie Dumas +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Jeremie Dumas on 09/04/18. +// + +#pragma once + +#include +#include +#include +#include + +#define TIMING_BREAKDOWN true + +namespace tetwild { + +void pausee(); + +bool isHaveCommonEle(const std::unordered_set& v1, const std::unordered_set& v2); +void setIntersection(const std::unordered_set& s1, const std::unordered_set& s2, std::unordered_set& s); +void setIntersection(const std::unordered_set& s1, const std::unordered_set& s2, std::vector& s); +void sampleTriangle(const std::array& vs, std::vector& ps, double sampling_dist); + +void addRecord(const MeshRecord& record, const Args &args, const State &state); + +} // namespace tetwild diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/DelaunayTetrahedralization.cpp b/contrib/NeRF-Editing/TetWild/src/tetwild/DelaunayTetrahedralization.cpp new file mode 100644 index 00000000..8df5ecc5 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/DelaunayTetrahedralization.cpp @@ -0,0 +1,443 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 3/29/17. +// + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#define USE_GEOGRAM false + +namespace tetwild { + +void DelaunayTetrahedralization::init(const std::vector& m_vertices, const std::vector>& m_faces, + std::vector& m_f_tags, std::vector& raw_e_tags, std::vector>& raw_conn_e4v) { + m_f_tags.reserve(m_faces.size()); + for (int i = 0; i < m_faces.size(); i++) + m_f_tags.push_back(i); + + std::vector> m_edges; + for (int i = 0; i < m_faces.size(); i++) { + for (int j = 0; j < 3; j++) { + std::array e = {{m_faces[i][j], m_faces[i][(j + 1) % 3]}}; + if (e[0] > e[1]) + e = {{e[1], e[0]}}; + m_edges.push_back(e); + } + } + std::sort(m_edges.begin(), m_edges.end()); + m_edges.erase(std::unique(m_edges.begin(), m_edges.end()), m_edges.end()); + + raw_e_tags = std::vector(m_edges.size(), -1); + raw_conn_e4v = std::vector>(m_vertices.size(), std::vector()); + for (int i = 0; i < m_edges.size(); i++) { + for (int j = 0; j < 2; j++) + raw_conn_e4v[m_edges[i][j]].push_back(i); + raw_e_tags[i] = i; + } +} + +void DelaunayTetrahedralization::getVoxelPoints(const Point_3& p_min, const Point_3& p_max, GEO::Mesh& geo_surface_mesh, + std::vector& voxel_points, const Args &args, const State &state) { +#if !USE_GEOGRAM + GEO::MeshFacetsAABB geo_face_tree(geo_surface_mesh); + + double voxel_resolution; + if(args.getRelativeEdgeLength(state.bbox_diag) < 5.0) { + voxel_resolution = state.bbox_diag / 20.0; + } else { + voxel_resolution = args.getAbsoluteEdgeLength(state.bbox_diag); + } + std::array d; + std::array N; + for (int i = 0; i < 3; i++) { + double D = CGAL::to_double(p_max[i] - p_min[i]); + N[i] = (D / voxel_resolution) + 1; + d[i] = D / N[i]; + } + std::array, 3> ds; + for (int i = 0; i < 3; i++) { + ds[i].push_back(p_min[i]); + for (int j = 0; j < N[i] - 1; j++) { + ds[i].push_back(p_min[i] + d[i] * (j + 1)); + } + ds[i].push_back(p_max[i]); + } + + double min_dis = voxel_resolution * voxel_resolution / 4; +// double min_dis = state.target_edge_len * state.target_edge_len;//epsilon*2 + for (int i = 0; i < ds[0].size(); i++) { + for (int j = 0; j < ds[1].size(); j++) { + for (int k = 0; k < ds[2].size(); k++) { + if ((i == 0 || i == ds[0].size() - 1) && (j == 0 || j == ds[1].size() - 1) + && (k == 0 || k == ds[2].size() - 1)) + continue; + GEO::vec3 geo_p(CGAL::to_double(ds[0][i]), CGAL::to_double(ds[1][j]), CGAL::to_double(ds[2][k])); + if (geo_face_tree.squared_distance(geo_p) < min_dis) + continue; + voxel_points.push_back(Point_d(ds[0][i], ds[1][j], ds[2][k])); + } + } + } +#else + //todo +#endif +} + +void DelaunayTetrahedralization::tetra(const std::vector& m_vertices, GEO::Mesh& geo_surface_mesh, + std::vector& bsp_vertices, std::vector& bsp_edges, + std::vector& bsp_faces, std::vector& bsp_nodes, + const Args &args, const State &state) { +#if USE_GEOGRAM + cout<<"using geogram delaunay tetrahedralization"< points; + const int m_vertices_size = m_vertices.size(); + points.reserve(m_vertices_size); + for (int i = 0; i < m_vertices_size; i++) { + points.push_back(m_vertices[i]); + } + + ///add 8 virtual vertices + Bbox_3 bbox = CGAL::bounding_box(m_vertices.begin(), m_vertices.end()); + Point_3 p_min = bbox.min(); + Point_3 p_max = bbox.max(); + + double dis = g_eps * 2;//todo: use epsilon to determine the size of bbx + if (dis < g_diag_l / 20) + dis = g_diag_l / 20; + else + dis = g_eps * 1.1; + p_min = Point_3(p_min[0] - dis, p_min[1] - dis, p_min[2] - dis); + p_max = Point_3(p_max[0] + dis, p_max[1] + dis, p_max[2] + dis); + + for (int i = 0; i < 8; i++) { + std::array p; + std::bitset a(i); + for (int j = 0; j < 3; j++) { + if (a.test(j)) + p[j] = p_max[j]; + else + p[j] = p_min[j]; + } + points.push_back(Point_3(p[0], p[1], p[2])); + } + + ///add voxel points +// std::vector voxel_points; +// if (args.is_using_voxel) +// getVoxelPoints(p_min, p_max, geo_surface_mesh, voxel_points); +// for (int i = 0; i < voxel_points.size(); i++) { +// points.push_back(std::make_pair(voxel_points[i], m_vertices_size + 8 + i)); +// } +// cout << voxel_points.size() << " voxel points are added!" << endl; + //todo + + GEO::Delaunay::initialize(); + GEO::Delaunay_var T = GEO::Delaunay::create(3, "BDEL"); + std::vector V_d; + V_d.resize(points.size()*3); +// Eigen::MatrixXd V_d(points.size(), 3); + //points -> V_d + for(int i=0;iset_vertices(points.size(), V_d.data()); + + auto tet2v = T->cell_to_v(); + std::vector> cells; + cells.reserve(T->nb_cells()); + std::vector> conn_c4v; + conn_c4v.resize(T->nb_vertices()); + for (int i = 0; i < T->nb_cells(); i++) { + cells.push_back(std::array({tet2v[i * 4], tet2v[i * 4 + 1], tet2v[i * 4 + 2], tet2v[i * 4 + 3]})); + for (int j = 0; j < 4; j++) + conn_c4v[tet2v[i * 4 + j]].push_back(i); + } + + std::vector> edges; + std::vector> faces; + edges.reserve(cells.size() * 6 / 6); + faces.reserve(cells.size() * 4 / 2); + for (int i = 0; i < cells.size(); i++) { + for (int j = 0; j < 3; j++) {//edge + if(cells[i][j] < cells[i][(j + 1) % 3]) + edges.push_back(std::array({cells[i][j], cells[i][(j + 1) % 3]})); + else + edges.push_back(std::array({cells[i][(j + 1) % 3], cells[i][j]})); + + if(cells[i][j] < cells[i][3]) + edges.push_back(std::array({cells[i][j], cells[i][3]})); + else + edges.push_back(std::array({cells[i][3], cells[i][j]})); + } + + for (int j = 0; j < 4; j++) {//face + std::array f = {cells[i][j], cells[i][(j + 1) % 4], cells[i][(j + 2) % 4]}; + std::sort(f.begin(), f.end()); + faces.push_back(f); + } + } + std::sort(edges.begin(), edges.end()); + edges.erase(std::unique(edges.begin(), edges.end()), edges.end()); + std::sort(faces.begin(), faces.end()); + faces.erase(std::unique(faces.begin(), faces.end()), faces.end()); + + bsp_vertices = points; + bsp_edges = std::vector(edges.size(), BSPEdge()); + bsp_faces = std::vector(faces.size(), BSPFace()); + bsp_nodes = std::vector(cells.size(), BSPtreeNode()); + + std::vector> conn_f4v; + conn_f4v.resize(T->nb_vertices()); + for (int i = 0; i < faces.size(); i++) { + for (int j = 0; j < 3; j++) { + conn_f4v[faces[i][j]].push_back(i); + bsp_faces[i].vertices.push_back(faces[i][j]); + } + + //conn_nodes + std::vector tmp; + std::set_intersection(conn_c4v[faces[i][0]].begin(), conn_c4v[faces[i][0]].end(), + conn_c4v[faces[i][1]].begin(), conn_c4v[faces[i][1]].end(), std::back_inserter(tmp)); + std::set_intersection(tmp.begin(), tmp.end(), + conn_c4v[faces[i][2]].begin(), conn_c4v[faces[i][2]].end(), + std::inserter(bsp_faces[i].conn_nodes, bsp_faces[i].conn_nodes.begin())); + + //faces for node + for(int n_id:bsp_faces[i].conn_nodes) + bsp_nodes[n_id].faces.push_back(i); + } + + for (int i = 0; i < edges.size(); i++) { + for (int j = 0; j < 2; j++){ + bsp_edges[i].vertices.push_back(edges[i][j]); + } + + //conn_faces + std::set_intersection(conn_f4v[edges[i][0]].begin(), conn_f4v[edges[i][0]].end(), + conn_f4v[edges[i][1]].begin(), conn_f4v[edges[i][1]].end(), + std::inserter(bsp_edges[i].conn_faces, bsp_edges[i].conn_faces.begin())); + + //edges for face + for(int f_id:bsp_edges[i].conn_faces) + bsp_faces[f_id].edges.push_back(i); + } + + + //check flip // DO NOT DELETE IT! +// for(int i=0;i::digits10 + 1); +// f << "Vertices" << std::endl << bsp_vertices.size() << std::endl; +// for (int i = 0; i < bsp_vertices.size(); i++) +// f << CGAL::to_double(bsp_vertices[i][0])<<" "<> points; + const int m_vertices_size = m_vertices.size(); + points.reserve(m_vertices_size); + for (int i = 0; i < m_vertices_size; i++) { + points.push_back(std::make_pair(Point_d(m_vertices[i][0], m_vertices[i][1], m_vertices[i][2]), i)); + } + + ///add 8 virtual vertices + Bbox_3 bbox = CGAL::bounding_box(m_vertices.begin(), m_vertices.end()); + Point_3 p_min = bbox.min(); + Point_3 p_max = bbox.max(); + + double dis = state.eps * 2;//todo: use epsilon to determine the size of bbx + if (dis < state.bbox_diag / 20) + dis = state.bbox_diag / 20; + else + dis = state.eps * 1.1; + p_min = Point_3(p_min[0] - dis, p_min[1] - dis, p_min[2] - dis); + p_max = Point_3(p_max[0] + dis, p_max[1] + dis, p_max[2] + dis); + + for (int i = 0; i < 8; i++) { + std::array p; + std::bitset a(i); + for (int j = 0; j < 3; j++) { + if (a.test(j)) + p[j] = p_max[j]; + else + p[j] = p_min[j]; + } + points.push_back(std::make_pair(Point_d(p[0], p[1], p[2]), m_vertices_size + i)); + } + ///add voxel points + std::vector voxel_points; + if(!args.not_use_voxel_stuffing) { + getVoxelPoints(p_min, p_max, geo_surface_mesh, voxel_points, args, state); + } + for(int i=0;i> cells; + cells.reserve(T.number_of_finite_cells()); + std::vector> conn_n_ids(points.size(), std::vector()); + for (auto it = T.finite_cells_begin(); it != T.finite_cells_end(); ++it) {//it is determinate + std::array c; + for (int i = 0; i < 4; i++) { + int n = it->vertex(i)->info(); + c[i] = n; + } + std::sort(c.begin(), c.end()); + cells.push_back(c); + } + std::sort(cells.begin(), cells.end()); + for(int i=0;i> faces; + faces.reserve(T.number_of_finite_facets()); + std::vector> conn_f_ids(points.size(), std::vector()); + for (auto it = T.finite_facets_begin(); it != T.finite_facets_end(); ++it) { + std::array f; + for (int i = 0; i < 3; i++) { + int n = it->first->vertex((it->second + i + 1) % 4)->info(); + assert(!(points[n].first != it->first->vertex((it->second + i + 1) % 4)->point())); + f[i] = n; + } + std::sort(f.begin(), f.end()); + faces.push_back(f); + } + std::sort(faces.begin(), faces.end()); + for(int i=0;i> edges; + edges.reserve(T.number_of_finite_edges()); + for (auto it = T.finite_edges_begin(); it != T.finite_edges_end(); ++it) { + std::array e; + assert(!(points[it->first->vertex(it->second)->info()].first != + it->first->vertex(it->second)->point())); + assert(!(points[it->first->vertex(it->third)->info()].first != + it->first->vertex(it->third)->point())); + e[0] = it->first->vertex(it->second)->info(); + e[1] = it->first->vertex(it->third)->info(); + if(e[0]>e[1]) + e={{e[1], e[0]}}; + edges.push_back(e); + } + std::sort(edges.begin(), edges.end()); + + //////construct bsp tree + bsp_vertices.reserve(points.size());//+++ + bsp_vertices = m_vertices; + for (int i = m_vertices_size; i < points.size(); i++) { + bsp_vertices.push_back(Point_3(points[i].first[0], points[i].first[1], points[i].first[2])); + } + bsp_edges = std::vector(edges.size(), BSPEdge()); + bsp_faces = std::vector(faces.size(), BSPFace()); + bsp_nodes = std::vector(cells.size(), BSPtreeNode()); + + const int faces_size = faces.size(); + for (int i = 0; i < faces_size; i++) { + std::array &f = faces[i]; + //vertices + bsp_faces[i].vertices = {{f[0], f[1], f[2]}}; + //conn_nodes + std::vector tmp; + //no need to sort before intersection because elements have been sorted + std::set_intersection(conn_n_ids[f[0]].begin(), conn_n_ids[f[0]].end(), + conn_n_ids[f[1]].begin(), conn_n_ids[f[1]].end(), + std::back_inserter(tmp)); + std::set_intersection(tmp.begin(), tmp.end(), + conn_n_ids[f[2]].begin(), conn_n_ids[f[2]].end(), + std::inserter(bsp_faces[i].conn_nodes, bsp_faces[i].conn_nodes.begin())); + //faces for nodes + for (auto it = bsp_faces[i].conn_nodes.begin(); it != bsp_faces[i].conn_nodes.end(); it++) { + bsp_nodes[*it].faces.push_back(i); + } + } + + const int edges_size = edges.size(); + for (int i = 0; i < edges_size; i++) { + std::array &e = edges[i]; + //vertices + bsp_edges[i].vertices = {{e[0], e[1]}}; + //conn_faces + std::set_intersection(conn_f_ids[e[0]].begin(), conn_f_ids[e[0]].end(), + conn_f_ids[e[1]].begin(), conn_f_ids[e[1]].end(), + std::inserter(bsp_edges[i].conn_faces, bsp_edges[i].conn_faces.begin())); + //edges for faces + for (auto it = bsp_edges[i].conn_faces.begin(); it != bsp_edges[i].conn_faces.end(); it++) { + bsp_faces[*it].edges.push_back(i); + } + } +#endif +} + +void DelaunayTetrahedralization::outputTetmesh(const std::vector& m_vertices, std::vector>& cells, + const std::string& output_file){ + std::ofstream of(output_file); + + of< +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 3/29/17. +// + +#ifndef GTET_DELAUNAYTETRAHEDRALIZATION_H +#define GTET_DELAUNAYTETRAHEDRALIZATION_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace tetwild { + +// More CGAL types +typedef CGAL::Triangulation_vertex_base_with_info_3 Vb; +typedef CGAL::Triangulation_data_structure_3 Tds; +typedef CGAL::Delaunay_triangulation_3 Delaunay; +typedef Delaunay::Point Point_d; + +class DelaunayTetrahedralization { +public: + Eigen::MatrixXd V_sf; + Eigen::MatrixXi F_sf; + + void init(const std::vector& m_vertices, const std::vector>& m_faces, + std::vector& m_f_tags, std::vector& raw_e_tags, std::vector>& raw_conn_e4v); + + void getVoxelPoints(const Point_3& p_min, const Point_3& p_max, GEO::Mesh& geo_surface_mesh, + std::vector& voxel_points, const Args &args, const State &state); + void tetra(const std::vector& m_vertices, GEO::Mesh& geo_surface_mesh, + std::vector& bsp_vertices, std::vector& bsp_edges, + std::vector& bsp_faces, std::vector& bsp_nodes, + const Args &args, const State &state); + void outputTetmesh(const std::vector& m_vertices, std::vector>& cells, + const std::string& output_file); +}; + +} // namespace tetwild + +#endif //GTET_DELAUNAYTETRAHEDRALIZATION_H diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/DisableWarnings.h b/contrib/NeRF-Editing/TetWild/src/tetwild/DisableWarnings.h new file mode 100644 index 00000000..3d7a94c2 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/DisableWarnings.h @@ -0,0 +1,62 @@ +// Disable compiler warnings before including third party code +#if defined(__clang__) + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wshadow" + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wsign-compare" + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wswitch-default" + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wformat-nonliteral" + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wswitch-enum" + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wstrict-overflow" + // #pragma clang diagnostic push + // #pragma clang diagnostic ignored "-Wnoexcept" + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wctor-dtor-privacy" + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wnull-dereference" + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wcast-qual" + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wmissing-noreturn" + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Woverloaded-virtual" + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wsign-promo" + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wcast-align" + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wnull-pointer-arithmetic" + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wc++17-extensions" +#elif (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wshadow" + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wsign-compare" + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wswitch-default" + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wformat-nonliteral" + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wswitch-enum" + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstrict-overflow" + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wnoexcept" + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wctor-dtor-privacy" + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wnull-dereference" + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wcast-qual" + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wmissing-noreturn" + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Woverloaded-virtual" + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wsign-promo" +#endif diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/DistanceQuery.cpp b/contrib/NeRF-Editing/TetWild/src/tetwild/DistanceQuery.cpp new file mode 100644 index 00000000..e69de29b diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/DistanceQuery.h b/contrib/NeRF-Editing/TetWild/src/tetwild/DistanceQuery.h new file mode 100644 index 00000000..700da4ca --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/DistanceQuery.h @@ -0,0 +1,41 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Jeremie Dumas +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Jeremie Dumas on 09/04/18. +// + +#pragma once + +#include +#include +#include + +namespace tetwild { + +inline void get_point_facet_nearest_point( + const GEO::Mesh& M, + const GEO::vec3& p, + GEO::index_t f, + GEO::vec3& nearest_p, + double& squared_dist +) { + using namespace GEO; + geo_debug_assert(M.facets.nb_vertices(f) == 3); + index_t c = M.facets.corners_begin(f); + const vec3& p1 = Geom::mesh_vertex(M, M.facet_corners.vertex(c)); + ++c; + const vec3& p2 = Geom::mesh_vertex(M, M.facet_corners.vertex(c)); + ++c; + const vec3& p3 = Geom::mesh_vertex(M, M.facet_corners.vertex(c)); + double lambda1, lambda2, lambda3; // barycentric coords, not used. + squared_dist = Geom::point_triangle_squared_distance( + p, p1, p2, p3, nearest_p, lambda1, lambda2, lambda3 + ); +} + +} // namespace tetwild diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/EdgeCollapser.cpp b/contrib/NeRF-Editing/TetWild/src/tetwild/EdgeCollapser.cpp new file mode 100644 index 00000000..42021d34 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/EdgeCollapser.cpp @@ -0,0 +1,802 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 4/11/17. +// + +#include +#include +#include +#include + +namespace tetwild { + +void EdgeCollapser::init() { + energy_time = 0; + + ////cal dir_edge + //find all edges + //check if collapsable 1 + //if yes, insert it into queue + const unsigned int tets_size = tets.size(); + std::vector> edges; + edges.reserve(tets_size*6); +// for (unsigned int i = 0; i < tets_size; i++) { +// if (t_is_removed[i]) +// continue; +// for (int j = 0; j < 4; j++) { +// std::array e = {{tets[i][j], tets[i][(j + 1) % 4]}}; +// if (e[0] > e[1]) e = {{e[1], e[0]}}; +// if(!isLocked_ui(e)) +// edges.push_back(e); +// } +// } + for (unsigned int i = 0; i < tets.size(); i++) { + if (t_is_removed[i]) + continue; + for (int j = 0; j < 3; j++) { + std::array e = {{tets[i][0], tets[i][j + 1]}}; + if(e[0]>e[1]) e={{e[1], e[0]}}; + if(!isLocked_ui(e)) + edges.push_back(e); + e = {{tets[i][j + 1], tets[i][(j + 1) % 3 + 1]}}; + if(e[0]>e[1]) e={{e[1], e[0]}}; + if(!isLocked_ui(e)) + edges.push_back(e); + } + } + std::sort(edges.begin(), edges.end()); + edges.erase(std::unique(edges.begin(), edges.end()), edges.end()); + + const unsigned int edges_size = edges.size(); + for (unsigned int i = 0; i < edges_size; i++) { + double weight = -1; +// if (isCollapsable_cd1(edges[i][0], edges[i][1]) && isCollapsable_cd2(edges[i][0], edges[i][1])) { + if (isCollapsable_cd1(edges[i][0], edges[i][1])) { + weight = calEdgeLength(edges[i][0], edges[i][1]); + if (isCollapsable_cd3(edges[i][0], edges[i][1], weight)) { + ElementInQueue_ec ele(edges[i], weight); + ec_queue.push(ele); + } + } +// if (isCollapsable_cd1(edges[i][1], edges[i][0]) && isCollapsable_cd2(edges[i][0], edges[i][1])) { + if (isCollapsable_cd1(edges[i][1], edges[i][0])) { + weight = weight == -1 ? calEdgeLength(edges[i][0], edges[i][1]) : weight; + if (isCollapsable_cd3(edges[i][0], edges[i][1], weight)) { + ElementInQueue_ec ele({{edges[i][1], edges[i][0]}}, weight); + ec_queue.push(ele); + } + } + } + + counter = 0; + suc_counter = 0; + breakdown_timing = {{0, 0, 0, 0, 0}}; + breakdown_timing0 = {{0, 0}}; +} + +void EdgeCollapser::collapse() { + tet_tss.assign(tets.size(), 0); + int cnt = 0; + logger().debug("edge queue size = {}", ec_queue.size()); + while (!ec_queue.empty()) { + std::array v_ids = ec_queue.top().v_ids; + double old_weight = ec_queue.top().weight; + ec_queue.pop(); + + if (!isEdgeValid(v_ids)) { + continue; + } + + //during operations, the length of edges in the queue may be changed + //also, we need to eliminate the old edges, that is, the edges have an wrong/old weight in the queue + double weight = calEdgeLength(v_ids); + if (weight != old_weight || !isCollapsable_cd3(v_ids[0], v_ids[1], weight)) { + continue; + } +// if(!isCollapsable_cd2(v_ids[0], v_ids[1])){ +// continue; +// } + + while (!ec_queue.empty()) { + std::array tmp_v_ids = ec_queue.top().v_ids; + if (tmp_v_ids == v_ids) + ec_queue.pop(); + else + break; + } + +#if TIMING_BREAKDOWN + igl_timer.start(); +#endif + int return_code = collapseAnEdge(v_ids[0], v_ids[1]); + if (return_code == SUCCESS) { +#if TIMING_BREAKDOWN + breakdown_timing[id_success] += igl_timer.getElapsedTime(); +#endif + suc_counter++; + if (budget > 0) { + budget--; + if(budget == 0) + return; + } + } else if (return_code == ENVELOP_SUC) { +#if TIMING_BREAKDOWN + breakdown_timing[id_env_success] += igl_timer.getElapsedTime(); +#endif + suc_counter++; + cnt++; + if (budget > 0) { + budget--; + if(budget == 0) + return; + } + } else { + if (return_code == ENVELOP) { +#if TIMING_BREAKDOWN + breakdown_timing[id_env_fail] += igl_timer.getElapsedTime(); +#endif + } else if (return_code == FLIP) { +#if TIMING_BREAKDOWN + breakdown_timing[id_flip_fail] += igl_timer.getElapsedTime(); +#endif + } else { +#if TIMING_BREAKDOWN + breakdown_timing[id_energy_fail] += igl_timer.getElapsedTime(); +#endif + } + + inf_es.push_back(v_ids); + inf_e_tss.push_back(ts); + } + + counter++; + } + logger().debug("{} {} {}", suc_counter, counter, inf_es.size()); + logger().debug("envelop accept = {}", envelop_accept_cnt); + + if (suc_counter == 0 || inf_es.size() == 0) { +// logger().debug("checking......."); +// init(); +// logger().debug("{}", ec_queue.size()); +// int cnt_flip=0, cnt_quality=0, cnt_envelop=0, cnt_suc=0; +// while (!ec_queue.empty()) { +// std::array v_ids = ec_queue.top().v_ids; +// double old_weight=ec_queue.top().weight; +// ec_queue.pop(); +// +// if (!isEdgeValid(v_ids)) { +// continue; +// } +// +// double weight = calEdgeLength(v_ids); +// if (weight != old_weight || !isCollapsable_cd3(v_ids[0], v_ids[1], weight)) { +// continue; +// } +// if(!isCollapsable_cd2(v_ids[0], v_ids[1])){ +// continue; +// } +// +// while (!ec_queue.empty()) { +// std::array tmp_v_ids = ec_queue.top().v_ids; +// if (tmp_v_ids == v_ids) +// ec_queue.pop(); +// else +// break; +// } +// +// int return_code=collapseAnEdge(v_ids[0], v_ids[1]); +// if (return_code == FLIP) +// cnt_flip++; +// else if(return_code==QUALITY) +// cnt_quality++; +// else if(return_code==ENVELOP) +// cnt_envelop++; +// else +// cnt_suc++; +// } +// logger().debug("{} {} {} {}", cnt_flip, cnt_quality, cnt_envelop, cnt_suc); + + logger().debug("{}: {}s", breakdown_name0[id_sampling], breakdown_timing0[id_sampling]); + logger().debug("{}: {}s", breakdown_name0[id_aabb], breakdown_timing0[id_aabb]); + logger().debug("----"); + for (int i = 0; i < breakdown_timing.size(); i++) + logger().debug("{}: {}s", breakdown_name[i], breakdown_timing[i]); + +// std::ofstream of(timing_log_file_name, std::fstream::app); +// if (of.is_open()) { +// of<> tmp_inf_es; + const unsigned int inf_es_size = inf_es.size(); + tmp_inf_es.reserve(inf_es_size/4.0+1); + for (unsigned int i = 0; i < inf_es_size; i++) { + if (!isEdgeValid(inf_es[i])) + continue; + double weight = calEdgeLength(inf_es[i][0], inf_es[i][1]); + if (!isCollapsable_cd3(inf_es[i][0], inf_es[i][1], weight)) + continue; + + bool is_recal = false; + for (auto it = tet_vertices[inf_es[i][0]].conn_tets.begin(); it != tet_vertices[inf_es[i][0]].conn_tets.end(); + it++) { + if (tet_tss[*it] > inf_e_tss[i]) { + is_recal = true; + break; + } + } + +// if (is_recal && isCollapsable_cd1(inf_es[i][0], inf_es[i][1]) && isCollapsable_cd2(inf_es[i][0], inf_es[i][1])) { + if (is_recal && isCollapsable_cd1(inf_es[i][0], inf_es[i][1])) { + if(!isLocked_ui(inf_es[i])) { + ElementInQueue_ec ele(inf_es[i], weight); + ec_queue.push(ele); + } + } else + tmp_inf_es.push_back(inf_es[i]); + } + + std::sort(tmp_inf_es.begin(), tmp_inf_es.end()); + tmp_inf_es.erase(std::unique(tmp_inf_es.begin(), tmp_inf_es.end()), tmp_inf_es.end());//it's better + inf_es = tmp_inf_es; + ts++; + inf_e_tss = std::vector(inf_es.size(), ts); + +#if TIMING_BREAKDOWN + breakdown_timing[id_postprocessing]+=igl_timer.getElapsedTime(); +#endif + + collapse(); +} + +int EdgeCollapser::collapseAnEdge(int v1_id, int v2_id) { + bool is_edge_too_short = false; + bool is_edge_degenerate = false; + double length = sqrt(CGAL::squared_distance(tet_vertices[v1_id].posf, tet_vertices[v2_id].posf)); + if(length == 0) { + is_edge_degenerate = true; + } +// else if(length < 1e-30) { +// logger().debug("{} {} {}{}{} {} {}{}{}", v1_id, tet_vertices[v1_id].is_on_surface, tet_vertices[v1_id].is_on_boundary, ", " +//, v2_id, tet_vertices[v2_id].is_on_surface, tet_vertices[v2_id].is_on_boundary, ": " +//, length); +// logger().debug("{} {}", tet_vertices[v1_id].is_rounded, tet_vertices[v2_id].is_rounded); +// is_edge_too_short = true; +// } + + //check isolated + if(tet_vertices[v1_id].is_on_surface && isIsolated(v1_id)) { + tet_vertices[v1_id].is_on_surface = false; + tet_vertices[v1_id].is_on_boundary = false; + tet_vertices[v1_id].on_fixed_vertex = -1; + tet_vertices[v1_id].on_face.clear(); + tet_vertices[v1_id].on_edge.clear(); + } + if(!isBoundaryPoint(v1_id)) + tet_vertices[v1_id].is_on_boundary = false; + + //check boundary + if(tet_vertices[v1_id].is_on_boundary && !tet_vertices[v2_id].is_on_boundary) + if(!is_edge_degenerate && isPointOutBoundaryEnvelop(tet_vertices[v2_id].posf)) { +// if(is_edge_too_short) { +// logger().debug("v2 bonndary"); +// logger().debug("v1 boundary = {}", isPointOutBoundaryEnvelop(tet_vertices[v1_id].posf)); +// } + return ENVELOP; + } + + //check envelop + if(tet_vertices[v1_id].is_on_surface && !tet_vertices[v2_id].is_on_surface){ + if(!is_edge_degenerate && isPointOutEnvelop(tet_vertices[v2_id].posf)) { +// if(is_edge_too_short) { +// logger().debug("v2 envelop"); +// logger().debug("v1 envelop = {}", isPointOutEnvelop(tet_vertices[v1_id].posf)); +// } + return ENVELOP; + } + } + + //old_t_ids + std::vector old_t_ids; + old_t_ids.reserve(tet_vertices[v1_id].conn_tets.size()); + for (auto it = tet_vertices[v1_id].conn_tets.begin(); it != tet_vertices[v1_id].conn_tets.end(); it++) + old_t_ids.push_back(*it); + std::vector is_removed(old_t_ids.size(), false); + + //new_tets + std::vector> new_tets; + new_tets.reserve(old_t_ids.size()); + std::unordered_set n12_v_ids; + std::vector n12_t_ids; + for (int i = 0; i < old_t_ids.size(); i++) { + auto it = std::find(tets[old_t_ids[i]].begin(), tets[old_t_ids[i]].end(), v2_id); + if (it == tets[old_t_ids[i]].end()) { + std::array t = tets[old_t_ids[i]]; + auto jt = std::find(t.begin(), t.end(), v1_id); + *jt = v2_id; + new_tets.push_back(t); + } else { + is_removed[i] = true; + for (int j = 0; j < 4; j++) + if (tets[old_t_ids[i]][j] != v1_id && tets[old_t_ids[i]][j] != v2_id) + n12_v_ids.insert(tets[old_t_ids[i]][j]); + n12_t_ids.push_back(old_t_ids[i]); + } + } + + //check is_valid + //check 1 //todo: look in details later +// for (auto it = n12_v_ids.begin(); it != n12_v_ids.end(); it++) { +// bool is_degenerate = true; +// for (auto jt = tet_vertices[*it].conn_tets.begin(); jt != tet_vertices[*it].conn_tets.end(); jt++) { +// auto kt = std::find(n12_t_ids.begin(), n12_t_ids.end(), *jt); +// if (kt == n12_t_ids.end()) { +// is_degenerate = false; +// break; +// } +// } +// if (is_degenerate) +// return FLIP; +// +// is_degenerate = true; +// for (auto jt = tet_vertices[v2_id].conn_tets.begin(); jt != tet_vertices[v2_id].conn_tets.end(); jt++) { +// auto kt = std::find(n12_t_ids.begin(), n12_t_ids.end(), *jt); +// if (kt == n12_t_ids.end()) { +// is_degenerate = false; +// break; +// } +// } +// if (is_degenerate) +// return FLIP; +// } + + //check 2 + if (isFlip(new_tets)) { +// if(is_edge_too_short) +// logger().debug("flip"); + return FLIP; + } + std::vector tet_qs; + igl::Timer tmp_timer; + tmp_timer.start(); + calTetQualities(new_tets, tet_qs); + energy_time+=tmp_timer.getElapsedTime(); + + if (energy_type != state.ENERGY_NA && is_check_quality) { + TetQuality old_tq, new_tq; + getCheckQuality(old_t_ids, old_tq); + getCheckQuality(tet_qs, new_tq); +// if (is_edge_too_short) +// logger().debug("old {} new {}", old_tq.slim_energy, new_tq.slim_energy); +// if (is_soft && old_tq.slim_energy < soft_energy) { +// old_tq.slim_energy = Args::args().filter_energy_thres; +// } + if(is_soft) + old_tq.slim_energy = soft_energy; + if (!tet_vertices[v1_id].is_rounded) //remove an unroundable vertex anyway + new_tq.slim_energy = 0; + if (!is_edge_degenerate && !new_tq.isBetterOrEqualThan(old_tq, energy_type, state)) { +// if (is_edge_too_short) +// logger().debug("quality"); + return QUALITY; + } + } + + //check 2.5 + if (tet_vertices[v1_id].is_on_boundary) { + Point_3 old_p = tet_vertices[v1_id].pos; + Point_3f old_pf = tet_vertices[v1_id].posf; + tet_vertices[v1_id].posf = tet_vertices[v2_id].posf; + tet_vertices[v1_id].pos = tet_vertices[v2_id].pos; + if (!is_edge_degenerate && isBoundarySlide(v1_id, v2_id, old_pf)) { + tet_vertices[v1_id].posf = old_pf; + tet_vertices[v1_id].pos = old_p; +// if (is_edge_too_short) +// logger().debug("boundary"); + return ENVELOP; + } + tet_vertices[v1_id].posf = old_pf; + tet_vertices[v1_id].pos = old_p; + } + + //check 3 + bool is_envelop_suc = false; + if (state.eps != state.EPSILON_NA && state.eps != state.EPSILON_INFINITE && tet_vertices[v1_id].is_on_surface) { + if (!is_edge_degenerate && !isCollapsable_epsilon(v1_id, v2_id)) { +// if (is_edge_too_short) +// logger().debug("envelop"); + return ENVELOP; + } + is_envelop_suc = true; + envelop_accept_cnt++; + if (envelop_accept_cnt % 1000 == 0) + logger().debug("1000 accepted!"); + } + + + //real update +// if(is_edge_too_short) +// logger().debug("success"); + if(tet_vertices[v1_id].is_on_boundary) + tet_vertices[v2_id].is_on_boundary=true; + + std::vector> update_sf_t_ids(n12_t_ids.size(), std::array()); + if (tet_vertices[v1_id].is_on_surface || tet_vertices[v2_id].is_on_surface) { + for (int i = 0; i < n12_t_ids.size(); i++) { + for (int j = 0; j < 4; j++) { + if (tets[n12_t_ids[i]][j] == v1_id || tets[n12_t_ids[i]][j] == v2_id) { + std::vector ts; + getFaceConnTets(tets[n12_t_ids[i]][(j + 1) % 4], tets[n12_t_ids[i]][(j + 2) % 4], + tets[n12_t_ids[i]][(j + 3) % 4], ts); + +// if(ts.size() != 2) { +// logger().debug("ts.size() != 2 but = {}", ts.size()); +// logger().debug("v1 info:"); +// tet_vertices[v1_id].printInfo(); +// logger().debug("v2 info:"); +// tet_vertices[v2_id].printInfo(); +// +// tet_vertices[tets[n12_t_ids[i]][(j + 1) % 4]].printInfo(); +// tet_vertices[tets[n12_t_ids[i]][(j + 2) % 4]].printInfo(); +// tet_vertices[tets[n12_t_ids[i]][(j + 3) % 4]].printInfo(); +// pausee(); +// } + + if(tets[n12_t_ids[i]][j] == v1_id) + update_sf_t_ids[i][1] = ts[0] != n12_t_ids[i] ? ts[0] : ts[1]; + else + update_sf_t_ids[i][0] = ts[0] != n12_t_ids[i] ? ts[0] : ts[1]; + } + } + } + } + + std::unordered_set n1_v_ids; + int cnt = 0; + for (int i = 0; i < old_t_ids.size(); i++) { + if (is_removed[i]) { + t_is_removed[old_t_ids[i]] = true; + for (int j = 0; j < 4; j++) + if (tets[old_t_ids[i]][j] != v1_id && tets[old_t_ids[i]][j] != v2_id) { + tet_vertices[tets[old_t_ids[i]][j]].conn_tets.erase( + std::find(tet_vertices[tets[old_t_ids[i]][j]].conn_tets.begin(), + tet_vertices[tets[old_t_ids[i]][j]].conn_tets.end(), old_t_ids[i])); + } + tet_vertices[v2_id].conn_tets.erase(std::find(tet_vertices[v2_id].conn_tets.begin(), + tet_vertices[v2_id].conn_tets.end(), old_t_ids[i])); + } else { + tet_vertices[v2_id].conn_tets.insert(old_t_ids[i]); + tet_qualities[old_t_ids[i]] = tet_qs[cnt]; + for (int j = 0; j < 4; j++) { + if (tets[old_t_ids[i]][j] != v1_id) + n1_v_ids.insert(tets[old_t_ids[i]][j]);//n12_v_ids would still be inserted + } + tets[old_t_ids[i]] = new_tets[cnt]; + cnt++; + } + } + + + if (tet_vertices[v1_id].is_on_surface || tet_vertices[v2_id].is_on_surface) { + tet_vertices[v2_id].is_on_surface = true; + + bool is_check_isolated = false; + for (int i = 0; i < n12_t_ids.size(); i++) { + std::array is_sf_fs; + std::vector es; + for (int j = 0; j < 4; j++) { + if (tets[n12_t_ids[i]][j] != v1_id && tets[n12_t_ids[i]][j] != v2_id) + es.push_back(tets[n12_t_ids[i]][j]); + else if (tets[n12_t_ids[i]][j] == v1_id) + is_sf_fs[0] = is_surface_fs[n12_t_ids[i]][j]; + else + is_sf_fs[1] = is_surface_fs[n12_t_ids[i]][j]; + } + //be careful about the order!! + +// if (is_sf_fs[0] == is_sf_fs[1]) { +// if (is_sf_fs[0] != ON_SURFACE_FALSE) { +// is_sf_fs = {ON_SURFACE_FALSE, ON_SURFACE_FALSE}; +// is_check_isolated = true; +// } else +// continue; +// } else if (is_sf_fs[0] != ON_SURFACE_FALSE && is_sf_fs[1] != ON_SURFACE_FALSE) +// continue; +// else if (is_sf_fs[0] != ON_SURFACE_FALSE) +// is_sf_fs[1] = is_sf_fs[0] == ON_SURFACE_TRUE_INSIDE ? ON_SURFACE_TRUE_OUTSIDE : ON_SURFACE_TRUE_INSIDE; +// else +// is_sf_fs[0] = is_sf_fs[1] == ON_SURFACE_TRUE_INSIDE ? ON_SURFACE_TRUE_OUTSIDE : ON_SURFACE_TRUE_INSIDE; + + if (is_sf_fs[0] == is_sf_fs[1] && is_sf_fs[0] == state.NOT_SURFACE) + continue; + if(is_sf_fs[0] == state.NOT_SURFACE) + is_sf_fs[0] = 0; + if(is_sf_fs[1] == state.NOT_SURFACE) + is_sf_fs[1] = 0; + + int tmp0 = is_sf_fs[0]; + int tmp1 = is_sf_fs[1]; + is_sf_fs[0] += -tmp1; + is_sf_fs[1] += -tmp0; + + for (int j = 0; j < 4; j++) { + int v_id0 = tets[update_sf_t_ids[i][0]][j]; + if (v_id0 != v2_id && v_id0 != es[0] && v_id0 != es[1]) + is_surface_fs[update_sf_t_ids[i][0]][j] = is_sf_fs[0]; + + int v_id1 = tets[update_sf_t_ids[i][1]][j]; + if (v_id1 != v2_id && v_id1 != es[0] && v_id1 != es[1]) + is_surface_fs[update_sf_t_ids[i][1]][j] = is_sf_fs[1]; + } + } + } + + //update boundary points //todo: Pls figure out a more efficient way +// if(tet_vertices[v2_id].is_on_boundary && !isBoundaryPoint(v2_id)) { +// tet_vertices[v2_id].is_on_boundary = false; +//// logger().debug("a boundary vertex is removed"); +// } +// for(int v_id:n12_v_ids) { +// if (tet_vertices[v_id].is_on_boundary && !isBoundaryPoint(v_id)) { +// tet_vertices[v_id].is_on_boundary = false; +//// logger().debug("a boundary vertex is removed"); +// } +// } + + v_is_removed[v1_id] = true; + + //update time stamps + ts++; + for (int i = 0; i < old_t_ids.size(); i++) { + tet_tss[old_t_ids[i]] = ts; + } + + //add new elements +// std::vector> es; +// for (int i = 0; i < new_tets.size(); i++) { +// for (int j = 0; j < 3; j++) { +// std::array e = {new_tets[i][0], new_tets[i][j + 1]}; +// std::sort(e.begin(), e.end()); +// es.push_back(e); +// e = {new_tets[i][j + 1], new_tets[i][(j + 1) % 3 + 1]}; +// es.push_back(e); +// } +// } +// std::sort(es.begin(), es.end()); +// es.erase(std::unique(es.begin(), es.end()), es.end()); +// for (int i = 0; i < es.size(); i++) { +// addNewEdge(es[i]); +// } + +// logger().debug("{}{}jt==tri.end()", n1_v_ids.size(), "->"; + std::vector n1_v_ids_vec, n12_v_ids_vec; + n1_v_ids_vec.reserve(n1_v_ids.size()); + n12_v_ids_vec.reserve(n12_v_ids.size()); + for(auto it = n1_v_ids.begin(); it != n1_v_ids.end(); it++) + n1_v_ids_vec.push_back(*it); + for(auto it = n12_v_ids.begin(); it != n12_v_ids.end(); it++) + n12_v_ids_vec.push_back(*it); + std::sort(n1_v_ids_vec.begin(), n1_v_ids_vec.end()); + std::sort(n12_v_ids_vec.begin(), n12_v_ids_vec.end()); + n1_v_ids.clear(); + std::set_difference(n1_v_ids_vec.begin(), n1_v_ids_vec.end(),n12_v_ids_vec.begin(), n12_v_ids_vec.end(), + std::inserter(n1_v_ids, n1_v_ids.end())); + + for (auto it = n1_v_ids.begin(); it != n1_v_ids.end(); it++) { + double weight = -1; +// if (isCollapsable_cd1(v2_id, *it) && isCollapsable_cd2(v2_id, *it)) { + if (isCollapsable_cd1(v2_id, *it)) { + weight = calEdgeLength(v2_id, *it); + if (isCollapsable_cd3(v2_id, *it, weight)) { + std::array e={{v2_id, *it}}; + if(!isLocked_ui(e)) { + ElementInQueue_ec ele(e, weight); + ec_queue.push(ele); + } + } + } +// if (isCollapsable_cd1(*it, v2_id) && isCollapsable_cd2(v2_id, *it)) { + if (isCollapsable_cd1(*it, v2_id)) { + weight = weight == -1 ? calEdgeLength(*it, v2_id) : weight; + if (isCollapsable_cd3(*it, v2_id, weight)) { + std::array e={{*it, v2_id}}; + if(!isLocked_ui(e)) { + ElementInQueue_ec ele(e, weight); + ec_queue.push(ele); + } + } + } + } + + if(is_envelop_suc) + return ENVELOP_SUC; + return SUCCESS; +} + +//bool EdgeCollapser::isCollapsable_cd2(int v1_id, int v2_id) { +// return true; +// +// //check envelop +// if (tet_vertices[v1_id].is_on_surface && !tet_vertices[v2_id].is_on_surface) { +// if (isPointOutEnvelop(tet_vertices[v2_id].posf)) +// return false; +// } +// return true; +//} + +bool EdgeCollapser::isCollapsable_cd1(int v1_id, int v2_id) { + //check the bbox tags //if the moved vertex is on the bbox + bool is_movable = false; + if (tet_vertices[v1_id].on_fixed_vertex < -1) + return false; + if (tet_vertices[v1_id].is_on_bbox && !tet_vertices[v2_id].is_on_bbox) + return false; + else if (tet_vertices[v1_id].is_on_bbox && tet_vertices[v2_id].is_on_bbox) { + if (tet_vertices[v1_id].on_edge.size() == 0) {//inside the face + is_movable = isHaveCommonEle(tet_vertices[v1_id].on_face, tet_vertices[v2_id].on_face); + } else {//on the edge + is_movable = isHaveCommonEle(tet_vertices[v1_id].on_edge, tet_vertices[v2_id].on_edge); + } + return is_movable; + } + + //check the surface tags //if the vertex is on the surface +// if (state.eps != state.EPSILON_NA) { +// return true; +// } + return true; + + //////////////////////////////////// +// //without envelop +// if (tet_vertices[v1_id].on_fixed_vertex >= 0) +// return false; +// +// is_movable = false; +// if (tet_vertices[v1_id].on_face.size() == 0) //inside the volumn +// is_movable = true; +// else { +// if (tet_vertices[v1_id].on_edge.size() == 0) {//inside the face +// if (isHaveCommonEle(tet_vertices[v1_id].on_face, tet_vertices[v2_id].on_face)) +// is_movable = true; +// } else {//on the edge +// if (isHaveCommonEle(tet_vertices[v1_id].on_edge, tet_vertices[v2_id].on_edge)) +// is_movable = true; +// } +// } +// return is_movable; +} + +//bool EdgeCollapser::isCollapsable_cd3(double weight) { +// if (!is_limit_length) +// return true; +// +// if (weight < ideal_weight) +// return true; +// return false; +//} + +bool EdgeCollapser::isCollapsable_cd3(int v1_id, int v2_id, double weight) { + if (!is_limit_length) + return true; + + double adaptive_scale = (tet_vertices[v1_id].adaptive_scale + tet_vertices[v2_id].adaptive_scale) / 2; + if (weight < ideal_weight * adaptive_scale * adaptive_scale) + return true; +// if (tet_vertices[v1_id].is_on_surface || tet_vertices[v2_id].is_on_surface) { +// if (weight < ideal_weight * adaptive_scale * adaptive_scale) +// return true; +// } else { +// if (weight < ideal_weight) +// return true; +// } + return false; +} + +bool EdgeCollapser::isCollapsable_epsilon(int v1_id, int v2_id) { +// std::vector tris; +// for (auto it = tet_vertices[v1_id].conn_tets.begin(); it != tet_vertices[v1_id].conn_tets.end(); it++) { +// for (int j = 0; j < 4; j++) { +// if (tets[*it][j] == v2_id && is_surface_fs[*it][j] != state.NOT_SURFACE) { +// std::array tri = {tets[*it][(j + 1) % 4], tets[*it][(j + 2) % 4], tets[*it][(j + 3) % 4]}; +// auto jt = std::find(tri.begin(), tri.end(), v1_id); +// if(jt==tri.end()){ +// std::cout); +// throw TetWildError(""); +// } +// *jt = v2_id; +// Triangle_3f tr(tet_vertices[tri[0]].posf, tet_vertices[tri[1]].posf, tet_vertices[tri[2]].posf); +// tris.push_back(tr); +// } +// } +// } + + std::vector> tri_ids; + for (auto it = tet_vertices[v1_id].conn_tets.begin(); it != tet_vertices[v1_id].conn_tets.end(); it++) { + for (int j = 0; j < 4; j++) { + if (tets[*it][j] != v1_id && is_surface_fs[*it][j] != state.NOT_SURFACE) { + std::array tri = {{tets[*it][(j + 1) % 4], tets[*it][(j + 2) % 4], tets[*it][(j + 3) % 4]}}; + std::sort(tri.begin(), tri.end()); + tri_ids.push_back(tri); + } + } + } + std::sort(tri_ids.begin(), tri_ids.end()); + tri_ids.erase(std::unique(tri_ids.begin(), tri_ids.end()), tri_ids.end()); + + std::vector tris; + for (int i = 0; i < tri_ids.size(); i++) { + if (std::find(tri_ids[i].begin(), tri_ids[i].end(), v2_id) != tri_ids[i].end()) + continue; + auto jt = std::find(tri_ids[i].begin(), tri_ids[i].end(), v1_id); + *jt = v2_id; + Triangle_3f tri(tet_vertices[tri_ids[i][0]].posf, tet_vertices[tri_ids[i][1]].posf, tet_vertices[tri_ids[i][2]].posf); + tris.push_back(tri); + } + + ///note that tris.size() can be 0 when v1 is on the boundary of the surface!!! + for (int i = 0; i < tris.size(); i++) { + if (isFaceOutEnvelop(tris[i])) + return false; + } + + return true; +} + +bool EdgeCollapser::isEdgeValid(const std::array& e){ + if(v_is_removed[e[0]] || v_is_removed[e[1]]) + return false; + return isHaveCommonEle(tet_vertices[e[0]].conn_tets, tet_vertices[e[1]].conn_tets); + +// if(!isHaveCommonEle(tet_vertices[e[0]].conn_tets, tet_vertices[e[1]].conn_tets)) +// return false; +// return true; +} + +//void EdgeCollapser::addNewEdge(const std::array& e){ +// double weight = -1; +// if (isCollapsable_cd1(e[0], e[1])) { +// weight = calEdgeLength(e[0], e[1]); +// ElementInQueue_ec ele(e, weight); +// if (isCollapsable_cd3(weight)) +// ec_queue.push(ele); +// } +// if (isCollapsable_cd1(e[1], e[0])) { +// ElementInQueue_ec ele(e, weight == -1 ? calEdgeLength(e[0], e[1]) : weight); +// if (isCollapsable_cd3(weight)) +// ec_queue.push(ele); +// } +//} + +} // namespace tetwild diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/EdgeCollapser.h b/contrib/NeRF-Editing/TetWild/src/tetwild/EdgeCollapser.h new file mode 100644 index 00000000..9dc604e4 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/EdgeCollapser.h @@ -0,0 +1,102 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 4/11/17. +// + +#ifndef NEW_GTET_EDGECOLLAPSER_H +#define NEW_GTET_EDGECOLLAPSER_H + +#include +#include + +namespace tetwild { + +class ElementInQueue_ec{ +public: + std::array v_ids; + double weight; + + ElementInQueue_ec(){} + ElementInQueue_ec(const std::array& ids, double w): + v_ids(ids), weight(w){} +}; + +struct cmp_ec { + bool operator()(const ElementInQueue_ec &e1, const ElementInQueue_ec &e2) { + if (e1.weight == e2.weight) + return e1.v_ids < e2.v_ids; + return e1.weight > e2.weight; + } +}; + +class EdgeCollapser: public LocalOperations { +public: + std::priority_queue, cmp_ec> ec_queue; + double ideal_weight=0; + + bool is_limit_length=true; + bool is_check_quality=true; + + int envelop_accept_cnt=0; + EdgeCollapser(LocalOperations& lo, double ideal_w): LocalOperations(lo), ideal_weight(ideal_w){} + + void init(); + void collapse(); + + const int SUCCESS=0; + const int FLIP=1; + const int QUALITY=2; + const int ENVELOP=3; + const int ENVELOP_SUC=4; + int collapseAnEdge(int v1_id, int v2_id); + + bool is_soft = false; + double soft_energy = 6; + int budget = 0; + + int ts = 0; + std::vector> inf_es; + std::vector inf_e_tss; + std::vector tet_tss; + void postProcess(); + + bool isCollapsable_cd1(int v1_id, int v2_id); +// bool isCollapsable_cd2(int v1_id, int v2_id);//check if a vertex is outside the envelop +// bool isCollapsable_cd3(double weight); + bool isCollapsable_cd3(int v1_id, int v2_id, double weight); + bool isCollapsable_epsilon(int v1_id, int v2_id); + + bool isEdgeValid(const std::array& e); +// void addNewEdge(const std::array& e); + + int tmp=0; + int tmp0=0; + + double energy_time = 0; + + //for timing + int id_postprocessing=0; + int id_flip_fail=1; + int id_env_fail=2; + int id_success=3; + int id_env_success=4; + int id_energy_fail = 5; + std::array breakdown_timing; + std::array breakdown_name={{"Postprocessing", + "Failed (flip)", + "Failed (envelop)", + "Successful (non-surface)", + "Successful (surface)", + "Failed (energy)"}}; + igl::Timer igl_timer; +}; + +} // namespace tetwild + +#endif //NEW_GTET_EDGECOLLAPSER_H diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/EdgeRemover.cpp b/contrib/NeRF-Editing/TetWild/src/tetwild/EdgeRemover.cpp new file mode 100644 index 00000000..2ea6404c --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/EdgeRemover.cpp @@ -0,0 +1,775 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 4/17/17. +// + +#include +#include +#include +#include + +namespace tetwild { + +void EdgeRemover::init() { + energy_time = 0; + + const unsigned int tets_size = tets.size(); + std::vector> edges; + edges.reserve(tets_size * 6); + for (unsigned int i = 0; i < tets_size; i++) { + if (t_is_removed[i]) + continue; + for (int j = 0; j < 3; j++) { + std::array e = {{tets[i][0], tets[i][j + 1]}}; + if (e[0] > e[1]) e = {{e[1], e[0]}}; + if (!isLocked_ui(e)) + edges.push_back(e); + e = {{tets[i][j + 1], tets[i][(j + 1) % 3 + 1]}}; + if (e[0] > e[1]) e = {{e[1], e[0]}}; + if (!isLocked_ui(e)) + edges.push_back(e); + } + } + std::sort(edges.begin(), edges.end()); + edges.erase(std::unique(edges.begin(), edges.end()), edges.end()); + + for (unsigned int i = 0; i < edges.size(); i++) { + std::vector t_ids; + setIntersection(tet_vertices[edges[i][0]].conn_tets, tet_vertices[edges[i][1]].conn_tets, t_ids); + addNewEdge(edges[i]); +// if (isSwappable_cd1(edges[i])) { +// double weight = calEdgeLength(edges[i]); +// if (isSwappable_cd2(weight)) { +// ElementInQueue_er ele(edges[i], weight); +// er_queue.push(ele); +// } +// } + } + + counter = 0; + suc_counter = 0; + t_empty_start = 0; + v_empty_start = 0; + + equal_buget = 100; +} + +void EdgeRemover::swap(){ + tmp_cnt3=0; + tmp_cnt4=0; + tmp_cnt5=0; + tmp_cnt6=0; + int cnt5=0; + + while(!er_queue.empty()){ + const ElementInQueue_er& ele=er_queue.top(); + + if(!isEdgeValid(ele.v_ids)){ + er_queue.pop(); + continue; + } + + std::vector t_ids; + if(!isSwappable_cd1(ele.v_ids, t_ids, true)){ + er_queue.pop(); + continue; + } + + std::array v_ids=ele.v_ids; + er_queue.pop(); + +// logger().debug("{} {} {} ", v_ids[0], v_ids[1], t_ids.size()); + + while(!er_queue.empty()){ + std::array tmp_v_ids = er_queue.top().v_ids; + if(tmp_v_ids==v_ids) + er_queue.pop(); + else + break; + } + + bool is_fail=false; + if(removeAnEdge_32(v_ids[0], v_ids[1], t_ids)) + suc_counter++; + else if(removeAnEdge_44(v_ids[0], v_ids[1], t_ids)) + suc_counter++; + else if(removeAnEdge_56(v_ids[0], v_ids[1], t_ids)) { + suc_counter++; + cnt5++; + } else{ + is_fail=true; + } + +// if(is_fail){ +// logger().debug("f"); +// } else +// logger().debug("s"); + + counter++; + } + logger().debug("tmp_cnt3 = {}", tmp_cnt3); + logger().debug("tmp_cnt4 = {}", tmp_cnt4); + logger().debug("tmp_cnt5 = {}", tmp_cnt5); + logger().debug("tmp_cnt6 = {}", tmp_cnt6); + logger().debug("{}", cnt5); + + logger().debug("energy_time = {}", energy_time); +} + +bool EdgeRemover::removeAnEdge_32(int v1_id, int v2_id, const std::vector& old_t_ids) { + if(old_t_ids.size() >= 6) tmp_cnt6++; + if(old_t_ids.size() == 5) tmp_cnt5++; + if(old_t_ids.size() == 4) tmp_cnt4++; + if(old_t_ids.size() == 3) tmp_cnt3++; + + if (old_t_ids.size() != 3) + return false; + + //new_tets + std::array v_ids; + std::vector> new_tets; + std::array t_ids; + int cnt = 0; + for (int i = 0; i < 4; i++) { + if (tets[old_t_ids[0]][i] != v1_id && tets[old_t_ids[0]][i] != v2_id) { + v_ids[cnt++] = tets[old_t_ids[0]][i]; + } + } + auto it = std::find(tets[old_t_ids[1]].begin(), tets[old_t_ids[1]].end(), v_ids[0]); + if (it != tets[old_t_ids[1]].end()) { + new_tets.push_back(tets[old_t_ids[1]]); + new_tets.push_back(tets[old_t_ids[2]]); + t_ids = {{old_t_ids[1], old_t_ids[2]}}; + } else { + new_tets.push_back(tets[old_t_ids[2]]); + new_tets.push_back(tets[old_t_ids[1]]); + t_ids = {{old_t_ids[2], old_t_ids[1]}}; + } + it = std::find(new_tets[0].begin(), new_tets[0].end(), v1_id); + *it = v_ids[1]; + it = std::find(new_tets[1].begin(), new_tets[1].end(), v2_id); + *it = v_ids[0]; + + //check is_valid + std::vector tet_qs; + if(isFlip(new_tets)) + return false; + TetQuality old_tq, new_tq; + getCheckQuality(old_t_ids, old_tq); + tmp_timer.start(); + calTetQualities(new_tets, tet_qs); + energy_time+=tmp_timer.getElapsedTime(); + getCheckQuality(tet_qs, new_tq); + if(equal_buget>0) { + equal_buget--; + if (!new_tq.isBetterOrEqualThan(old_tq, energy_type, state)) + return false; + } else { + if (!new_tq.isBetterThan(old_tq, energy_type, state)) + return false; + } + + //real update + std::vector> fs; + std::vector is_sf_fs; + for(int i=0;i tmp = {{tets[old_t_ids[i]][(j + 1) % 4], tets[old_t_ids[i]][(j + 2) % 4], + tets[old_t_ids[i]][(j + 3) % 4]}}; + std::sort(tmp.begin(), tmp.end()); + fs.push_back(tmp); + is_sf_fs.push_back(is_surface_fs[old_t_ids[i]][j]); + } + } + } + + t_is_removed[old_t_ids[0]] = true; + tets[t_ids[0]] = new_tets[0];//v2 + tets[t_ids[1]] = new_tets[1];//v1 + + for(int i=0;i<4;i++) { + if (tets[t_ids[0]][i] != v2_id) { + std::array tmp = {{tets[t_ids[0]][(i + 1) % 4], tets[t_ids[0]][(i + 2) % 4], + tets[t_ids[0]][(i + 3) % 4]}}; + std::sort(tmp.begin(), tmp.end()); + auto it = std::find(fs.begin(), fs.end(), tmp); + is_surface_fs[t_ids[0]][i] = is_sf_fs[it - fs.begin()]; + } else + is_surface_fs[t_ids[0]][i] = state.NOT_SURFACE; + + if (tets[t_ids[1]][i] != v1_id) { + std::array tmp = {{tets[t_ids[1]][(i + 1) % 4], tets[t_ids[1]][(i + 2) % 4], + tets[t_ids[1]][(i + 3) % 4]}}; + std::sort(tmp.begin(), tmp.end()); + auto it = std::find(fs.begin(), fs.end(), tmp); + is_surface_fs[t_ids[1]][i] = is_sf_fs[it - fs.begin()]; + } else + is_surface_fs[t_ids[1]][i] = state.NOT_SURFACE; + } + + tet_vertices[v_ids[0]].conn_tets.erase(std::find(tet_vertices[v_ids[0]].conn_tets.begin(), + tet_vertices[v_ids[0]].conn_tets.end(), old_t_ids[0])); + tet_vertices[v_ids[1]].conn_tets.erase(std::find(tet_vertices[v_ids[1]].conn_tets.begin(), + tet_vertices[v_ids[1]].conn_tets.end(), old_t_ids[0])); + + tet_vertices[v_ids[0]].conn_tets.insert(t_ids[1]); + tet_vertices[v_ids[1]].conn_tets.insert(t_ids[0]); + + tet_vertices[v1_id].conn_tets.erase(std::find(tet_vertices[v1_id].conn_tets.begin(), + tet_vertices[v1_id].conn_tets.end(), old_t_ids[0])); + tet_vertices[v2_id].conn_tets.erase(std::find(tet_vertices[v2_id].conn_tets.begin(), + tet_vertices[v2_id].conn_tets.end(), old_t_ids[0])); + + tet_vertices[v1_id].conn_tets.erase(std::find(tet_vertices[v1_id].conn_tets.begin(), + tet_vertices[v1_id].conn_tets.end(), t_ids[0])); + tet_vertices[v2_id].conn_tets.erase(std::find(tet_vertices[v2_id].conn_tets.begin(), + tet_vertices[v2_id].conn_tets.end(), t_ids[1])); + + for (int i = 0; i < 2; i++) { + tet_qualities[t_ids[i]] = tet_qs[i]; + } + + //repush new edges + //Note that you need to pop out the current element first!! + std::unordered_set n12_v_ids; + for(int i=0;i({{*it, v1_id}})); +// addNewEdge(std::array({{*it, v2_id}})); +// } + + std::vector> es; + es.reserve(new_tets.size()*6); + for(int i=0;i e = {{new_tets[i][0], new_tets[i][j + 1]}}; + if(e[0]>e[1]) e={{e[1], e[0]}}; + if(!isLocked_ui(e)) + es.push_back(e); + e = {{new_tets[i][j + 1], new_tets[i][(j + 1) % 3 + 1]}}; + if(e[0]>e[1]) e={{e[1], e[0]}}; + if(!isLocked_ui(e)) + es.push_back(e); + } + } + std::sort(es.begin(), es.end()); + es.erase(std::unique(es.begin(), es.end()), es.end()); + for(int i=0;i& old_t_ids) { + const int N = 4; + if (old_t_ids.size() != N) + return false; + + std::vector> n12_es; + n12_es.reserve(old_t_ids.size()); + for (int i = 0; i < old_t_ids.size(); i++) { + std::array e; + int cnt = 0; + for (int j = 0; j < 4; j++) + if (tets[old_t_ids[i]][j] != v1_id && tets[old_t_ids[i]][j] != v2_id) { + e[cnt++] = tets[old_t_ids[i]][j]; + } + e[cnt] = old_t_ids[i]; + n12_es.push_back(e); + } + + std::vector n12_v_ids; + std::vector n12_t_ids; + n12_v_ids.push_back(n12_es[0][0]); + n12_v_ids.push_back(n12_es[0][1]); + n12_t_ids.push_back(n12_es[0][2]); + std::vector is_visited(N, false); + is_visited[0] = true; + for (int i = 0; i < N - 2; i++) { + for (int j = 0; j < N; j++) { + if (!is_visited[j]) { + if (n12_es[j][0] == n12_v_ids.back()) { + is_visited[j] = true; + n12_v_ids.push_back(n12_es[j][1]); + } else if (n12_es[j][1] == n12_v_ids.back()) {//else if!!!!!!!!!! + is_visited[j] = true; + n12_v_ids.push_back(n12_es[j][0]); + } + if (is_visited[j]) { + n12_t_ids.push_back(n12_es[j][2]); + break; + } + } + } + } + n12_t_ids.push_back(n12_es[std::find(is_visited.begin(), is_visited.end(), false) - is_visited.begin()][2]); + + bool is_valid = false; + std::vector> new_tets; + new_tets.reserve(4); + std::vector tags; + std::vector tet_qs; + std::array v_ids; + TetQuality old_tq, new_tq; + getCheckQuality(old_t_ids, old_tq); + for (int i = 0; i < 2; i++) { + std::vector> tmp_new_tets; + std::vector tmp_tags; + std::vector tmp_tet_qs; + std::array tmp_v_ids; + tmp_v_ids = {{n12_v_ids[0 + i], n12_v_ids[2 + i]}}; + for (int j = 0; j < old_t_ids.size(); j++) { + std::array t = tets[old_t_ids[j]]; + auto it = std::find(t.begin(), t.end(), tmp_v_ids[0]); + if (it != t.end()) { + auto jt = std::find(t.begin(), t.end(), v2_id); + *jt = tmp_v_ids[1]; + tmp_tags.push_back(1); + } else { + auto jt = std::find(t.begin(), t.end(), v1_id); + *jt = tmp_v_ids[0]; + tmp_tags.push_back(0); + } + tmp_new_tets.push_back(t); + } + + if (isFlip(tmp_new_tets)) + continue; + tmp_timer.start(); + calTetQualities(tmp_new_tets, tmp_tet_qs); + energy_time+=tmp_timer.getElapsedTime(); + getCheckQuality(tmp_tet_qs, new_tq); + if(equal_buget>0) { + equal_buget--; + if (!new_tq.isBetterOrEqualThan(old_tq, energy_type, state)) + return false; + } else { + if (!new_tq.isBetterThan(old_tq, energy_type, state)) + return false; + } + + is_valid = true; + old_tq = new_tq; + new_tets = tmp_new_tets; + tags = tmp_tags; + tet_qs = tmp_tet_qs; + v_ids = tmp_v_ids; + } + if (!is_valid) + return false; + + //real update + std::vector> fs; + std::vector is_sf_fs; + for (int i = 0; i < old_t_ids.size(); i++) { + for (int j = 0; j < 4; j++) { + if (tets[old_t_ids[i]][j] == v1_id || tets[old_t_ids[i]][j] == v2_id) { + std::array tmp = {{tets[old_t_ids[i]][(j + 1) % 4], tets[old_t_ids[i]][(j + 2) % 4], + tets[old_t_ids[i]][(j + 3) % 4]}}; + std::sort(tmp.begin(), tmp.end()); + fs.push_back(tmp); + is_sf_fs.push_back(is_surface_fs[old_t_ids[i]][j]); + } + } + } + + for (int j = 0; j < new_tets.size(); j++) { + if (tags[j] == 0) { + tet_vertices[v1_id].conn_tets.erase( + std::find(tet_vertices[v1_id].conn_tets.begin(), + tet_vertices[v1_id].conn_tets.end(), old_t_ids[j])); + tet_vertices[v_ids[0]].conn_tets.insert(old_t_ids[j]); + } else { + tet_vertices[v2_id].conn_tets.erase( + std::find(tet_vertices[v2_id].conn_tets.begin(), + tet_vertices[v2_id].conn_tets.end(), old_t_ids[j])); + tet_vertices[v_ids[1]].conn_tets.insert(old_t_ids[j]); + } + tets[old_t_ids[j]] = new_tets[j]; + tet_qualities[old_t_ids[j]] = tet_qs[j]; + } + + for (int i = 0; i < old_t_ids.size(); i++) {//old_t_ids contains new tets + for (int j = 0; j < 4; j++) { + is_surface_fs[old_t_ids[i]][j] = state.NOT_SURFACE; + if (tets[old_t_ids[i]][j] == v_ids[0] || tets[old_t_ids[i]][j] == v_ids[1]) { + std::array tmp = {{tets[old_t_ids[i]][(j + 1) % 4], tets[old_t_ids[i]][(j + 2) % 4], + tets[old_t_ids[i]][(j + 3) % 4]}}; + std::sort(tmp.begin(), tmp.end()); + auto it = std::find(fs.begin(), fs.end(), tmp); + is_surface_fs[old_t_ids[i]][j] = is_sf_fs[it - fs.begin()]; + } + } + } + + //repush + std::vector> es; + es.reserve(new_tets.size()*6); + for (int i = 0; i < new_tets.size(); i++) { + for (int j = 0; j < 3; j++) { + std::array e = {{new_tets[i][0], new_tets[i][j + 1]}}; + if(e[0]>e[1]) e={{e[1], e[0]}}; + if(!isLocked_ui(e)) + es.push_back(e); + e = {{new_tets[i][j + 1], new_tets[i][(j + 1) % 3 + 1]}}; + if(e[0]>e[1]) e={{e[1], e[0]}}; + if(!isLocked_ui(e)) + es.push_back(e); + } + } + std::sort(es.begin(), es.end()); + es.erase(std::unique(es.begin(), es.end()), es.end()); + for (int i = 0; i < es.size(); i++) { +// if (es[i][0] != v_ids[0] && es[i][1] != v_ids[0] && es[i][0] != v_ids[1] && es[i][1] != v_ids[1]) + addNewEdge(es[i]); + } + + return true; +} + +//void getMinQuality(const std::vector& qs, double& d_min, double& d_max, double& r_max) { +// d_min = 10; +// d_max = 0; +// r_max = 0; +// for (int i = 0; i < qs.size(); i++) { +// if (qs[i].min_d_angle < d_min) +// d_min = qs[i].min_d_angle; +// if (qs[i].max_d_angle > d_max) +// d_max = qs[i].max_d_angle; +// if (qs[i].asp_ratio_2 > r_max) +// r_max = qs[i].asp_ratio_2; +// } +//} + +bool EdgeRemover::removeAnEdge_56(int v1_id, int v2_id, const std::vector& old_t_ids) { + if (old_t_ids.size() != 5) + return false; + + //oriented the n12_v_ids + std::vector> n12_es; + n12_es.reserve(old_t_ids.size()); + for (int i = 0; i < old_t_ids.size(); i++) { + std::array e; + int cnt = 0; + for (int j = 0; j < 4; j++) + if (tets[old_t_ids[i]][j] != v1_id && tets[old_t_ids[i]][j] != v2_id) { + e[cnt++] = tets[old_t_ids[i]][j]; + } + e[cnt] = old_t_ids[i]; + n12_es.push_back(e); + } + + std::vector n12_v_ids; + std::vector n12_t_ids; + n12_v_ids.push_back(n12_es[0][0]); + n12_v_ids.push_back(n12_es[0][1]); + n12_t_ids.push_back(n12_es[0][2]); + std::vector is_visited(5, false); + is_visited[0] = true; + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 5; j++) { + if (!is_visited[j]) { + if (n12_es[j][0] == n12_v_ids.back()) { + is_visited[j] = true; + n12_v_ids.push_back(n12_es[j][1]); + } else if (n12_es[j][1] == n12_v_ids.back()) {//else if!!!!!!!!!! + is_visited[j] = true; + n12_v_ids.push_back(n12_es[j][0]); + } + if (is_visited[j]) { + n12_t_ids.push_back(n12_es[j][2]); + break; + } + } + } + } + n12_t_ids.push_back(n12_es[std::find(is_visited.begin(), is_visited.end(), false) - is_visited.begin()][2]); + + + //check valid + TetQuality old_tq, new_tq; + getCheckQuality(old_t_ids, old_tq); + std::unordered_map> tet_qs; + std::unordered_map, 2>> new_tets; + std::vector is_v_valid(5, true); + for (int i = 0; i < n12_v_ids.size(); i++) { + if (!is_v_valid[(i + 1) % 5] && !is_v_valid[(i - 1 + 5) % 5]) + continue; + + std::vector> new_ts; + new_ts.reserve(6); + std::array t = tets[n12_t_ids[i]]; + auto it = std::find(t.begin(), t.end(), v1_id); + *it = n12_v_ids[(i - 1 + 5) % 5]; + new_ts.push_back(t); + + t = tets[n12_t_ids[i]]; + it = std::find(t.begin(), t.end(), v2_id); + *it = n12_v_ids[(i - 1 + 5) % 5]; + new_ts.push_back(t); + if (isFlip(new_ts)) { + is_v_valid[(i + 1) % 5] = false; + is_v_valid[(i - 1 + 5) % 5] = false; + continue; + } + + std::vector qs; + tmp_timer.start(); + calTetQualities(new_ts, qs); + energy_time+=tmp_timer.getElapsedTime(); + tet_qs[i] = std::array({{qs[0], qs[1]}}); + new_tets[i] = std::array, 2>({{new_ts[0], new_ts[1]}}); + +// std::vector qs; +// calTetQualities(new_ts, qs); +// getCheckQuality(qs, new_tq); +// if(new_tq.isBetterThan(old_tq, energy_type)){ +// tet_qs[i] = std::array({{qs[0], qs[1]}}); +// new_tets[i] = std::array, 2>({{new_ts[0], new_ts[1]}}); +// } else { +// is_v_valid[(i + 1) % 5] = false; +// is_v_valid[(i - 1 + 5) % 5] = false; +// } + } + if (std::count(is_v_valid.begin(), is_v_valid.end(), true) == 0) + return false; + + + int selected_id = -1; + for (int i = 0; i < is_v_valid.size(); i++) { + if (!is_v_valid[i]) + continue; + + std::vector> new_ts; + new_ts.reserve(6); + std::array t = tets[n12_t_ids[(i + 2) % 5]]; + auto it = std::find(t.begin(), t.end(), v1_id); + *it = n12_v_ids[i]; + new_ts.push_back(t); + t = tets[n12_t_ids[(i + 2) % 5]]; + it = std::find(t.begin(), t.end(), v2_id); + *it = n12_v_ids[i]; + new_ts.push_back(t); + if (isFlip(new_ts)) + continue; + + std::vector qs; + tmp_timer.start(); + calTetQualities(new_ts, qs); + energy_time+=tmp_timer.getElapsedTime(); + for (int j = 0; j < 2; j++) { + qs.push_back(tet_qs[(i + 1) % 5][j]); + qs.push_back(tet_qs[(i - 1 + 5) % 5][j]); + } + if(qs.size() != 6){ + log_and_throw("qs.size() != 6"); + } + getCheckQuality(qs, new_tq); + if(equal_buget>0) { + equal_buget--; + if (!new_tq.isBetterOrEqualThan(old_tq, energy_type, state)) + continue; + } else { + if (!new_tq.isBetterThan(old_tq, energy_type, state)) + continue; + } + + old_tq = new_tq; + selected_id = i; + tet_qs[i + 5] = std::array({{qs[0], qs[1]}}); + new_tets[i + 5] = std::array, 2>({{new_ts[0], new_ts[1]}}); + } + if (selected_id < 0) + return false; + + //real update + //update on surface -- 1 + std::vector> fs; + std::vector is_sf_fs; + for (int i = 0; i < old_t_ids.size(); i++) { + for (int j = 0; j < 4; j++) { + if (tets[old_t_ids[i]][j] == v1_id || tets[old_t_ids[i]][j] == v2_id) { + std::array tmp = {{tets[old_t_ids[i]][(j + 1) % 4], tets[old_t_ids[i]][(j + 2) % 4], + tets[old_t_ids[i]][(j + 3) % 4]}}; + std::sort(tmp.begin(), tmp.end()); + fs.push_back(tmp); + is_sf_fs.push_back(is_surface_fs[old_t_ids[i]][j]); + } + } + } + + std::vector new_t_ids = old_t_ids; + getNewTetSlots(1, new_t_ids); + t_is_removed[new_t_ids.back()] = false; + for (int i = 0; i < 2; i++) { + tets[new_t_ids[i]] = new_tets[(selected_id + 1) % 5][i]; + tets[new_t_ids[i + 2]] = new_tets[(selected_id - 1 + 5) % 5][i]; + tets[new_t_ids[i + 4]] = new_tets[selected_id + 5][i]; + + tet_qualities[new_t_ids[i]] = tet_qs[(selected_id + 1) % 5][i]; + tet_qualities[new_t_ids[i + 2]] = tet_qs[(selected_id - 1 + 5) % 5][i]; + tet_qualities[new_t_ids[i + 4]] = tet_qs[selected_id + 5][i]; + } + + //update on_surface -- 2 + for (int i = 0; i < new_t_ids.size(); i++) { + for (int j = 0; j < 4; j++) { + is_surface_fs[new_t_ids[i]][j] = state.NOT_SURFACE; + if (tets[new_t_ids[i]][j] != v1_id && tets[new_t_ids[i]][j] != v2_id + && tets[new_t_ids[i]][j] != n12_v_ids[(selected_id + 1) % 5] + && tets[new_t_ids[i]][j] != n12_v_ids[(selected_id - 1 + 5) % 5]) { + std::array tmp = {{tets[new_t_ids[i]][(j + 1) % 4], tets[new_t_ids[i]][(j + 2) % 4], + tets[new_t_ids[i]][(j + 3) % 4]}}; + std::sort(tmp.begin(), tmp.end()); + auto it = std::find(fs.begin(), fs.end(), tmp); + if (it != fs.end()) + is_surface_fs[new_t_ids[i]][j] = is_sf_fs[it - fs.begin()]; + } + } + } + + //update conn_tets + for (int i = 0; i < n12_v_ids.size(); i++) { + tet_vertices[n12_v_ids[i]].conn_tets.erase(n12_t_ids[i]); + tet_vertices[n12_v_ids[i]].conn_tets.erase(n12_t_ids[(i - 1 + 5) % 5]); + } + for (int i = 0; i < n12_t_ids.size(); i++) { + tet_vertices[v1_id].conn_tets.erase(n12_t_ids[i]); + tet_vertices[v2_id].conn_tets.erase(n12_t_ids[i]); + } + + //add + for (int i = 0; i < new_t_ids.size(); i++) { + for (int j = 0; j < 4; j++) + tet_vertices[tets[new_t_ids[i]][j]].conn_tets.insert(new_t_ids[i]); + } + + //repush +// addNewEdge(std::array({{n12_v_ids[selected_id], n12_v_ids[(selected_id + 2) % 5]}})); +// addNewEdge(std::array({{n12_v_ids[selected_id], n12_v_ids[(selected_id - 2 + 5) % 5]}})); +// +// addNewEdge(std::array({{v1_id, n12_v_ids[(selected_id + 1) % 5]}})); +// addNewEdge(std::array({{v1_id, n12_v_ids[(selected_id - 1 + 5) % 5]}})); +// addNewEdge(std::array({{v2_id, n12_v_ids[(selected_id + 1) % 5]}})); +// addNewEdge(std::array({{v2_id, n12_v_ids[(selected_id - 1 + 5) % 5]}})); + + std::vector> es; + es.reserve(new_t_ids.size()*6); + for(int i=0;i e = {{tets[new_t_ids[i]][0], tets[new_t_ids[i]][j + 1]}}; + if(e[0]>e[1]) e={{e[1], e[0]}}; + if(!isLocked_ui(e)) + es.push_back(e); + e = {{tets[new_t_ids[i]][j + 1], tets[new_t_ids[i]][(j + 1) % 3 + 1]}}; + if(e[0]>e[1]) e={{e[1], e[0]}}; + if(!isLocked_ui(e)) + es.push_back(e); + } + } + std::sort(es.begin(), es.end()); + es.erase(std::unique(es.begin(), es.end()), es.end()); + for(int i=0;i& v_ids){ + std::vector t_ids; + setIntersection(tet_vertices[v_ids[0]].conn_tets, tet_vertices[v_ids[1]].conn_tets, t_ids); + + if(isEdgeOnSurface(v_ids[0], v_ids[1], t_ids)) + return false; + if(isEdgeOnBbox(v_ids[0], v_ids[1], t_ids)) + return false; + + return true; +} + +bool EdgeRemover::isSwappable_cd1(const std::array& v_ids, std::vector& t_ids, bool is_check_conn_tet_num){ +// std::vector t_ids; + setIntersection(tet_vertices[v_ids[0]].conn_tets, tet_vertices[v_ids[1]].conn_tets, t_ids); + + if(is_check_conn_tet_num) + if(t_ids.size()<3 || t_ids.size()>5) + return false; + if(isEdgeOnSurface(v_ids[0], v_ids[1], t_ids)) + return false; + if(isEdgeOnBbox(v_ids[0], v_ids[1], t_ids)) + return false; + + return true; +} + +bool EdgeRemover::isSwappable_cd2(double weight){ + return true; + + if(weight>ideal_weight) + return true; + return false; +} + +bool EdgeRemover::isEdgeValid(const std::array& v_ids){ + if(v_is_removed[v_ids[0]] || v_is_removed[v_ids[1]]) + return false; + if(!isHaveCommonEle(tet_vertices[v_ids[0]].conn_tets, tet_vertices[v_ids[1]].conn_tets)) + return false; + return true; +} + +void EdgeRemover::getNewTetSlots(int n, std::vector& new_conn_tets) { + unsigned int cnt = 0; + for (unsigned int i = t_empty_start; i < t_is_removed.size(); i++) { + if (t_is_removed[i]) { + new_conn_tets.push_back(i); + cnt++; + if (cnt == n) { + t_empty_start = i + 1; + break; + } + } + } + if (cnt < n) { + for (unsigned int i = 0; i < n - cnt; i++) + new_conn_tets.push_back(tets.size() + i); + + tets.resize(tets.size() + n - cnt); + t_is_removed.resize(t_is_removed.size() + n - cnt); + tet_qualities.resize(tet_qualities.size() + n - cnt); + is_surface_fs.resize(is_surface_fs.size() + n - cnt); + t_empty_start = tets.size(); + } +} + +void EdgeRemover::addNewEdge(const std::array& e){ + if (isSwappable_cd1(e)) { + double weight = calEdgeLength(e); + if (isSwappable_cd2(weight)) { + if (e[0] > e[1]) { + ElementInQueue_er ele(std::array({{e[1], e[0]}}), weight); + er_queue.push(ele); + } else { + ElementInQueue_er ele(e, weight); + er_queue.push(ele); + } + } + } +} + +} // namespace tetwild diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/EdgeRemover.h b/contrib/NeRF-Editing/TetWild/src/tetwild/EdgeRemover.h new file mode 100644 index 00000000..3c5c5023 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/EdgeRemover.h @@ -0,0 +1,78 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 4/17/17. +// + +#ifndef NEW_GTET_EDGEREMOVER_H +#define NEW_GTET_EDGEREMOVER_H + +#include +#include + +namespace tetwild { + +class ElementInQueue_er{ +public: + std::array v_ids; + double weight; + + ElementInQueue_er(){} + ElementInQueue_er(const std::array& ids, double w): v_ids(ids), weight(w){} +}; + +struct cmp_er { + bool operator()(const ElementInQueue_er &e1, const ElementInQueue_er &e2) { + if (e1.weight == e2.weight) + return e1.v_ids < e2.v_ids; + return e1.weight < e2.weight;///choose larger edge for removal + } +}; + +class EdgeRemover:public LocalOperations { +public: + std::priority_queue, cmp_er> er_queue; + + double ideal_weight; + + int v_empty_start=0; + int t_empty_start=0; + + int flag_cnt=0; + + int tmp_cnt3=0; + int tmp_cnt4=0; + int tmp_cnt5=0; + int tmp_cnt6=0; + + int equal_buget = 100; + + EdgeRemover(LocalOperations lo, double i_weight): LocalOperations(lo), ideal_weight(i_weight){} + + void init(); + void swap(); + bool removeAnEdge_32(int v1_id, int v2_id, const std::vector& old_t_ids); + bool removeAnEdge_44(int v1_id, int v2_id, const std::vector& old_t_ids); + bool removeAnEdge_56(int v1_id, int v2_id, const std::vector& old_t_ids); + + bool isSwappable_cd1(const std::array& v_ids, std::vector& t_ids, bool is_check_conn_tet_num=false); + bool isSwappable_cd1(const std::array& v_ids); + + bool isSwappable_cd2(double weight); + bool isEdgeValid(const std::array& v_ids); + void getNewTetSlots(int n, std::vector& new_conn_tets); + + void addNewEdge(const std::array& e); + + igl::Timer tmp_timer; + double energy_time = 0; +}; + +} // namespace tetwild + +#endif //NEW_GTET_EDGEREMOVER_H diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/EdgeSplitter.cpp b/contrib/NeRF-Editing/TetWild/src/tetwild/EdgeSplitter.cpp new file mode 100644 index 00000000..4bfe7ce5 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/EdgeSplitter.cpp @@ -0,0 +1,413 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 4/11/17. +// + +#include +#include +#include + +namespace tetwild { + +void EdgeSplitter::getMesh_ui(const std::vector>& tets, Eigen::MatrixXd& V, Eigen::MatrixXi& F) { + ///get V, F, C + V.resize(tets.size() * 4, 3); + F.resize(tets.size() * 4, 3); + Eigen::VectorXd Z(F.rows()); + int i = 0; + for (unsigned j = 0; j < tets.size(); ++j) { + for (int k = 0; k < 4; k++) { + for (int r = 0; r < 3; r++) + V(i * 4 + k, r) = tet_vertices[tets[j][k]].posf[r]; + } + F.row(i * 4 + 0) << (i * 4) + 0, (i * 4) + 1, (i * 4) + 3; + F.row(i * 4 + 1) << (i * 4) + 0, (i * 4) + 2, (i * 4) + 1; + F.row(i * 4 + 2) << (i * 4) + 3, (i * 4) + 2, (i * 4) + 0; + F.row(i * 4 + 3) << (i * 4) + 1, (i * 4) + 2, (i * 4) + 3; + i++; + } +} + +void EdgeSplitter::init() { + std::vector> edges; + for (unsigned int i = 0; i < tets.size(); i++) { + if (t_is_removed[i]) + continue; + for (int j = 0; j < 3; j++) { + std::array e = {{tets[i][0], tets[i][j + 1]}}; + if(e[0]>e[1]) e={{e[1], e[0]}}; + if(!isLocked_ui(e)) + edges.push_back(e); + e = {{tets[i][j + 1], tets[i][(j + 1) % 3 + 1]}}; + if(e[0]>e[1]) e={{e[1], e[0]}}; + if(!isLocked_ui(e)) + edges.push_back(e); + } + } + std::sort(edges.begin(), edges.end()); + edges.erase(std::unique(edges.begin(), edges.end()), edges.end()); + + for (unsigned int i = 0; i < edges.size(); i++) { + double weight = calEdgeLength(edges[i][0], edges[i][1]); + if (isSplittable_cd1(edges[i][0], edges[i][1], weight)) { + ElementInQueue_es ele(edges[i], weight); + es_queue.push(ele); + } + } + + t_empty_start = 0; + v_empty_start = 0; + +// if(budget > 0) +// is_cal_quality_end = true; + + counter=0; + suc_counter=0; + +} + +void EdgeSplitter::split() { + + if(budget >0) { + int v_slots = std::count(v_is_removed.begin(), v_is_removed.end(), true); + v_slots = budget - v_slots; + if (v_slots > 0) { + tet_vertices.reserve(tet_vertices.size() + v_slots); + v_is_removed.reserve(tet_vertices.size() + v_slots); + } + int t_slots = std::count(t_is_removed.begin(), t_is_removed.end(), true); + t_slots = budget * 6 - t_slots; + if (t_slots > 0) { + tet_vertices.reserve(tet_vertices.size() + t_slots); + v_is_removed.reserve(tet_vertices.size() + t_slots); + } + } else { + // reserve space + int v_slot_size = std::count(v_is_removed.begin(), v_is_removed.end(), true); + int t_slot_size = std::count(t_is_removed.begin(), t_is_removed.end(), true); + if (v_slot_size < es_queue.size() * 2) + tet_vertices.reserve(es_queue.size() * 2 - v_slot_size); + if (t_slot_size < es_queue.size() * 6 * 2) + tets.reserve(es_queue.size() * 6 * 2 - t_slot_size + 1); + } + logger().debug("{}", es_queue.size()); + logger().debug("ideal_weight = {}", ideal_weight); + + while (!es_queue.empty()) { + const ElementInQueue_es &ele = es_queue.top(); + + std::array v_ids = ele.v_ids; +// if (state.is_print_tmp) +// logger().debug("{}{}{} {} {} {} {}", v_ids[0], ' ', v_ids[1] +//, std::sqrt(calEdgeLength(v_ids)) +//, std::sqrt(ideal_weight) * +// (tet_vertices[v_ids[0]].adaptive_scale + tet_vertices[v_ids[1]].adaptive_scale) / 2.0 +//, tet_vertices[v_ids[0]].adaptive_scale +//, tet_vertices[v_ids[1]].adaptive_scale +//); + es_queue.pop(); + if (splitAnEdge(v_ids)) + suc_counter++; + counter++; + + if (budget > 0) { + budget--; + if(budget == 0) + break; + } + } + + //cal the qualities in the very end + //not really, in the last few pass, only some of the edges are splitted + //maybe, we can marked all the splitted tets in this pass and the update their quality in the end? -- Try it. + //todo ... + if (is_cal_quality_end) { + const int tets_size = tets.size(); + std::vector> tmp_tets; + for (int i = 0; i < tets_size; i++) { + if (t_is_removed[i]) + continue; + tmp_tets.push_back(tets[i]); + } + + std::vector tet_qs; + calTetQualities(tmp_tets, tet_qs); + int cnt = 0; + for (int i = 0; i < tets_size; i++) { + if (t_is_removed[i]) + continue; + tet_qualities[i] = tet_qs[cnt++]; + } + } + +} + +bool EdgeSplitter::splitAnEdge(const std::array& edge) { + int v1_id = edge[0]; + int v2_id = edge[1]; + + //add new vertex + TetVertex v;//tet_vertices[v_id] is actually be reset + bool is_found = false; + for(int i=v_empty_start;i old_t_ids; + setIntersection(tet_vertices[v1_id].conn_tets, tet_vertices[v2_id].conn_tets, old_t_ids); + + //new_tets + std::vector new_t_ids; + std::vector n12_v_ids; + std::vector> new_tets; + new_tets.reserve(old_t_ids.size() * 2); + for (int i = 0; i < old_t_ids.size(); i++) { + for (int j = 0; j < 4; j++) { + if (tets[old_t_ids[i]][j] != v1_id && tets[old_t_ids[i]][j] != v2_id) + n12_v_ids.push_back(tets[old_t_ids[i]][j]); + } + + std::array tet1 = tets[old_t_ids[i]], tet2 = tets[old_t_ids[i]]; + auto it = std::find(tet1.begin(), tet1.end(), v2_id); + *it = v_id; + it = std::find(tet2.begin(), tet2.end(), v1_id); + *it = v_id; + new_tets.push_back(tet1); + new_tets.push_back(tet2); + } + + //check is_valid + tet_vertices[v_id].adaptive_scale = (tet_vertices[v1_id].adaptive_scale + tet_vertices[v2_id].adaptive_scale) / 2; + if(tet_vertices[v1_id].is_locked && tet_vertices[v2_id].is_locked) + tet_vertices[v_id].is_locked = true; + + tet_vertices[v_id].posf = CGAL::midpoint(tet_vertices[v1_id].posf, tet_vertices[v2_id].posf); + tet_vertices[v_id].pos = Point_3(tet_vertices[v_id].posf[0], tet_vertices[v_id].posf[1], tet_vertices[v_id].posf[2]); + std::vector tet_qs; + if(!is_cal_quality_end) { + calTetQualities(new_tets, tet_qs); + } + + if (isFlip(new_tets)) { + tet_vertices[v_id].pos = CGAL::midpoint(tet_vertices[v1_id].pos, tet_vertices[v2_id].pos); + tet_vertices[v_id].posf = Point_3f(CGAL::to_double(tet_vertices[v_id].pos[0]), CGAL::to_double(tet_vertices[v_id].pos[1]), + CGAL::to_double(tet_vertices[v_id].pos[2])); + tet_vertices[v_id].is_rounded = false; + } else { + tet_vertices[v_id].is_rounded = true; + } + +// if(!is_cal_quality_end) +// calTetQualities(new_tets, tet_qs); + + ////real update// + //update boundary tags + if(isEdgeOnBoundary(v1_id, v2_id)) { + tet_vertices[v_id].is_on_boundary = true; + } + + //update surface tags + if (state.eps != state.EPSILON_INFINITE) { + if (isEdgeOnSurface(v1_id, v2_id)) { + tet_vertices[v_id].is_on_surface = true; + if (state.eps == state.EPSILON_NA) { + setIntersection(tet_vertices[v1_id].on_edge, tet_vertices[v2_id].on_edge, tet_vertices[v_id].on_edge); + setIntersection(tet_vertices[v1_id].on_face, tet_vertices[v2_id].on_face, tet_vertices[v_id].on_face); + } + } else + tet_vertices[v_id].is_on_surface = false; + } + + //get new tet ids + getNewTetSlots(old_t_ids.size(), new_t_ids); + for (int i = 0; i < old_t_ids.size(); i++) { + tets[old_t_ids[i]] = new_tets[i * 2]; + tets[new_t_ids[i]] = new_tets[i * 2 + 1]; + if(!is_cal_quality_end) { + tet_qualities[old_t_ids[i]] = tet_qs[i * 2]; + tet_qualities[new_t_ids[i]] = tet_qs[i * 2 + 1]; + } + t_is_removed[new_t_ids[i]] = false; + is_surface_fs[new_t_ids[i]] = is_surface_fs[old_t_ids[i]]; + } + + //track surface + for (int i = 0; i < new_t_ids.size(); i++) { + for (int j = 0; j < 4; j++) {//v1->v + if (tets[new_t_ids[i]][j] == v2_id) + is_surface_fs[new_t_ids[i]][j] = state.NOT_SURFACE; +// else if(tets[new_t_ids[i]][j]==v_id)//no need to change +// is_surface_fs[new_t_ids[i]][j]=is_surface_fs[old_t_ids[i]][j]; + } + } + for (int i = 0; i < old_t_ids.size(); i++) { + for (int j = 0; j < 4; j++) {//v2->v + if (tets[old_t_ids[i]][j] == v1_id) + is_surface_fs[old_t_ids[i]][j] = state.NOT_SURFACE; + } + } + + //update bbox tags //Note that no matter what the epsilon is, the bbox has to be preserved anyway + if (tet_vertices[v1_id].is_on_bbox && tet_vertices[v2_id].is_on_bbox) { + setIntersection(tet_vertices[v1_id].on_face, tet_vertices[v2_id].on_face, tet_vertices[v_id].on_face); + if (tet_vertices[v_id].on_face.size() == 0) + tet_vertices[v_id].is_on_bbox = false; + else { + tet_vertices[v_id].is_on_bbox = true; + setIntersection(tet_vertices[v1_id].on_edge, tet_vertices[v2_id].on_edge, tet_vertices[v_id].on_edge); + } + } + + //update the connection + for (int i = 0; i < old_t_ids.size(); i++) { + tet_vertices[v2_id].conn_tets.erase(old_t_ids[i]); + tet_vertices[v2_id].conn_tets.insert(new_t_ids[i]); + } + for (int i = 0; i < n12_v_ids.size(); i++) { + tet_vertices[n12_v_ids[i]].conn_tets.insert(new_t_ids[i / 2]); + } + tet_vertices[v_id].conn_tets.insert(old_t_ids.begin(), old_t_ids.end()); + tet_vertices[v_id].conn_tets.insert(new_t_ids.begin(), new_t_ids.end()); + + //push new ele into queue + double weight = calEdgeLength(v1_id, v_id); + if (isSplittable_cd1(v1_id, v_id, weight)) { + std::array e={{v1_id, v_id}}; + if(!isLocked_ui(e)) { + ElementInQueue_es ele(e, weight); + es_queue.push(ele); + } + } + + weight = calEdgeLength(v2_id, v_id); + if (isSplittable_cd1(v2_id, v_id, weight)) { + std::array e={{v2_id, v_id}}; + if(!isLocked_ui(e)) { + ElementInQueue_es ele(e, weight); + es_queue.push(ele); + } + } + + std::sort(n12_v_ids.begin(), n12_v_ids.end()); + n12_v_ids.erase(std::unique(n12_v_ids.begin(), n12_v_ids.end()), n12_v_ids.end()); + for (auto it = n12_v_ids.begin(); it != n12_v_ids.end(); it++) { + weight = calEdgeLength(*it, v_id); + if (isSplittable_cd1(*it, v_id, weight)) { + std::array e = {{*it, v_id}}; + if(!isLocked_ui(e)) { + ElementInQueue_es ele(e, weight); + es_queue.push(ele); + } + } + } + + return true; +} + +int EdgeSplitter::getOverRefineScale(int v1_id, int v2_id){ + return 1; + + if(is_over_refine) { + std::vector n12_t_ids; + setIntersection(tet_vertices[v1_id].conn_tets, tet_vertices[v2_id].conn_tets, n12_t_ids); + for(int i=0;i 500) {//todo: add || for other types of energy + int scale = 1; + scale = (tet_qualities[n12_t_ids[i]].slim_energy - 500) / 500.0; + if (scale < 1) + scale = 1; + else if (scale > 5) + scale = 5; + return 1 + scale; + } + } + } + return 1; +} + +bool EdgeSplitter::isSplittable_cd1(double weight) { + if(is_check_quality) + return true; + + if (weight > ideal_weight) + return true; + return false; +} + +bool EdgeSplitter::isSplittable_cd1(int v1_id, int v2_id, double weight) { + double adaptive_scale = (tet_vertices[v1_id].adaptive_scale + tet_vertices[v2_id].adaptive_scale) / 2.0; +// if(adaptive_scale==0){ +// logger().debug("adaptive_scale==0!!!"); +// } + if (weight > ideal_weight * adaptive_scale * adaptive_scale) + return true; +// if (tet_vertices[v1_id].is_on_surface || tet_vertices[v2_id].is_on_surface) { +// if (weight > ideal_weight * adaptive_scale * adaptive_scale) +// return true; +// } else { +// if (weight > ideal_weight) +// return true; +// } + return false; +} + +void EdgeSplitter::getNewTetSlots(int n, std::vector& new_conn_tets) { + int cnt = 0; + for (int i = t_empty_start; i < t_is_removed.size(); i++) { + if (t_is_removed[i]) { + new_conn_tets.push_back(i); + cnt++; + if (cnt == n) { + t_empty_start = i + 1; + break; + } + } + } + if (cnt < n) { + for (int i = 0; i < n - cnt; i++) + new_conn_tets.push_back(tets.size() + i); + + tets.resize(tets.size() + n - cnt); + t_is_removed.resize(t_is_removed.size() + n - cnt); + tet_qualities.resize(tet_qualities.size() + n - cnt); + is_surface_fs.resize(is_surface_fs.size() + n - cnt); + t_empty_start = tets.size(); + } +} + +} // namespace tetwild diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/EdgeSplitter.h b/contrib/NeRF-Editing/TetWild/src/tetwild/EdgeSplitter.h new file mode 100644 index 00000000..d7398dd7 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/EdgeSplitter.h @@ -0,0 +1,69 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 4/11/17. +// + +#ifndef NEW_GTET_EDGESPLITTER_H +#define NEW_GTET_EDGESPLITTER_H + +#include +#include + +namespace tetwild { + +class ElementInQueue_es{ +public: + std::array v_ids; + double weight; + + ElementInQueue_es(){} + ElementInQueue_es(const std::array& ids, double w): + v_ids(ids), weight(w){} +}; + +struct cmp_es { + bool operator()(const ElementInQueue_es &e1, const ElementInQueue_es &e2) { + return e1.weight < e2.weight; + } +}; + +class EdgeSplitter:public LocalOperations { +public: + bool is_check_quality = false; + bool is_cal_quality_end = false; + + std::priority_queue, cmp_es> es_queue; + + int t_empty_start=0; + int v_empty_start=0; + + double max_weight=0; + double ideal_weight=0; + + EdgeSplitter(LocalOperations lo, double ideal_w): LocalOperations(lo), ideal_weight(ideal_w){} + + void init(); + void split(); + + bool is_over_refine=false; + int getOverRefineScale(int v1_id, int v2_id); + bool splitAnEdge(const std::array& edge); + + bool isSplittable_cd1(double weight); + bool isSplittable_cd1(int v1_id, int v2_id, double weight); + void getNewTetSlots(int n, std::vector& new_conn_tets); +// igl::viewer::Viewer viewer; + void getMesh_ui(const std::vector>& tets, Eigen::MatrixXd& V, Eigen::MatrixXi& F); + + unsigned int budget = 0; +}; + +} // namespace tetwild + +#endif //NEW_GTET_EDGESPLITTER_H diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/EnableWarnings.h b/contrib/NeRF-Editing/TetWild/src/tetwild/EnableWarnings.h new file mode 100644 index 00000000..3467f9fa --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/EnableWarnings.h @@ -0,0 +1,33 @@ +// Reenable the warnings disabled before including third party code +#if defined(__clang__) +#pragma clang diagnostic pop // -Wshadow +#pragma clang diagnostic pop // -Wsign-compare +#pragma clang diagnostic pop // -Wswitch-default +#pragma clang diagnostic pop // -Wformat-nonliteral +#pragma clang diagnostic pop // -Wswitch-enum +#pragma clang diagnostic pop // -Wstrict-overflow +// #pragma clang diagnostic pop // -Wnoexcept +#pragma clang diagnostic pop // -Wctor-dtor-privacy +#pragma clang diagnostic pop // -Wnull-dereference +#pragma clang diagnostic pop // -Wcast-qual +#pragma clang diagnostic pop // -Wmissing-noreturn +#pragma clang diagnostic pop // -Woverloaded-virtual +#pragma clang diagnostic pop // "-Wsign-promo" +#pragma clang diagnostic pop // "-Wcast-align" +#pragma clang diagnostic pop // "-Wnull-pointer-arithmetic" +#pragma clang diagnostic pop // "-Wc++17-extensions" +#elif (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) +#pragma GCC diagnostic pop // -Wshadow +#pragma GCC diagnostic pop // -Wsign-compare +#pragma GCC diagnostic pop // -Wswitch-default +#pragma GCC diagnostic pop // -Wformat-nonliteral +#pragma GCC diagnostic pop // -Wswitch-enum +#pragma GCC diagnostic pop // -Wstrict-overflow +#pragma GCC diagnostic pop // -Wnoexcept +#pragma GCC diagnostic pop // -Wctor-dtor-privacy +#pragma GCC diagnostic pop // -Wnull-dereference +#pragma GCC diagnostic pop // -Wcast-qual +#pragma GCC diagnostic pop // -Wmissing-noreturn +#pragma GCC diagnostic pop // -Woverloaded-virtual +#pragma GCC diagnostic pop // "-Wsign-promo" +#endif diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/ForwardDecls.h b/contrib/NeRF-Editing/TetWild/src/tetwild/ForwardDecls.h new file mode 100644 index 00000000..45da4132 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/ForwardDecls.h @@ -0,0 +1,28 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Jeremie Dumas +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Jeremie Dumas on 09/04/18. +// + +#pragma once + +#include + +namespace tetwild { + +struct Args; +struct State; +struct MeshRecord; +class BSPFace; +class MeshConformer; +class EdgeCollapser; +class EdgeSplitter; +class EdgeRemover; +class VertexSmoother; + +} // namespace tetwild diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/InoutFiltering.cpp b/contrib/NeRF-Editing/TetWild/src/tetwild/InoutFiltering.cpp new file mode 100644 index 00000000..8d57c87a --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/InoutFiltering.cpp @@ -0,0 +1,164 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by yihu on 6/13/17. +// + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace tetwild { + +void InoutFiltering::filter() { + logger().debug("In/out filtering..."); + + Eigen::MatrixXd C(std::count(t_is_removed.begin(), t_is_removed.end(), false), 3); + int cnt = 0; + for (int i = 0; i < tets.size(); i++) { + if (t_is_removed[i]) + continue; + std::vector vs; + vs.reserve(4); + for (int j = 0; j < 4; j++) + vs.push_back(tet_vertices[tets[i][j]].posf); + Point_3f p = CGAL::centroid(vs.begin(), vs.end(), CGAL::Dimension_tag<0>()); + for (int j = 0; j < 3; j++) + C(cnt, j) = p[j]; + cnt++; + } + + Eigen::MatrixXd V; + Eigen::MatrixXi F; + getSurface(V, F); + Eigen::VectorXd W; + igl::winding_number(V, F, C, W); + + std::vector tmp_t_is_removed = t_is_removed; + cnt = 0; + for (int i = 0; i < tets.size(); i++) { + if (tmp_t_is_removed[i]) + continue; + tmp_t_is_removed[i] = !(W(cnt) > 0.5); + cnt++; + } + + //if the surface is totally reversed + //TODO: test the correctness + if(std::count(tmp_t_is_removed.begin(), tmp_t_is_removed.end(), false)==0) { + logger().debug("Winding number gives a empty mesh! trying again"); + for (int i = 0; i < F.rows(); i++) { + int tmp = F(i, 1); + F(i, 1) = F(i, 2); + F(i, 2) = tmp; + } +// igl::writeSTL(state.working_dir+state.postfix_str+"_debug.stl", V, F); + igl::winding_number(V, F, C, W); + + tmp_t_is_removed = t_is_removed; + cnt = 0; + for (int i = 0; i < tets.size(); i++) { + if (tmp_t_is_removed[i]) + continue; + tmp_t_is_removed[i] = !(W(cnt) > 0.5); + cnt++; + } + } + +// outputWindingNumberField(W); + + t_is_removed = tmp_t_is_removed; + logger().debug("In/out Filtered!"); +} + +void InoutFiltering::getSurface(Eigen::MatrixXd& V, Eigen::MatrixXi& F){ + std::vector> fs; + std::vector vs; + for(int i=0;i 0) {//outside + std::array v_ids = {{tets[i][(j + 1) % 4], tets[i][(j + 2) % 4], tets[i][(j + 3) % 4]}}; + if (CGAL::orientation(tet_vertices[v_ids[0]].pos, tet_vertices[v_ids[1]].pos, + tet_vertices[v_ids[2]].pos, tet_vertices[tets[i][j]].pos) != CGAL::POSITIVE) { + int tmp = v_ids[0]; + v_ids[0] = v_ids[2]; + v_ids[2] = tmp; + } + for (int k = 0; k < is_surface_fs[i][j]; k++) + fs.push_back(v_ids); + for (int k = 0; k < 3; k++) + vs.push_back(v_ids[k]); + } + } + } + std::sort(vs.begin(), vs.end()); + vs.erase(std::unique(vs.begin(), vs.end()), vs.end()); + + V.resize(vs.size(), 3); + std::map map_ids; + for(int i=0;i v_ids; + for (int i = 0; i < tets.size(); i++) { + if (t_is_removed[i]) + continue; + for (int j = 0; j < 4; j++) + v_ids.push_back(tets[i][j]); + } + std::sort(v_ids.begin(), v_ids.end()); + v_ids.erase(std::unique(v_ids.begin(), v_ids.end()), v_ids.end()); + std::unordered_map map_ids; + for (int i = 0; i < v_ids.size(); i++) + map_ids[v_ids[i]] = i; + + PyMesh::MshSaver mSaver(state.working_dir+state.postfix+"_wn.msh", true); + Eigen::VectorXd oV(v_ids.size() * 3); + Eigen::VectorXi oT(t_cnt * 4); + for (int i = 0; i < v_ids.size(); i++) { + for (int j = 0; j < 3; j++) + oV(i * 3 + j) = tet_vertices[v_ids[i]].posf[j]; + } + int cnt = 0; + for (int i = 0; i < tets.size(); i++) { + if (t_is_removed[i]) + continue; + for (int j = 0; j < 4; j++) + oT(cnt * 4 + j) = map_ids[tets[i][j]]; + cnt++; + } + mSaver.save_mesh(oV, oT, 3, mSaver.TET); + logger().debug("#v = {}", oV.rows() / 3); + logger().debug("#t = {}", oT.rows() / 4); + + mSaver.save_elem_scalar_field("winding number", W); +} + +} // namespace tetwild diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/InoutFiltering.h b/contrib/NeRF-Editing/TetWild/src/tetwild/InoutFiltering.h new file mode 100644 index 00000000..ff267980 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/InoutFiltering.h @@ -0,0 +1,47 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by yihu on 6/13/17. +// + +#ifndef NEW_GTET_INOUTFILTERING_H +#define NEW_GTET_INOUTFILTERING_H + +#include +#include + +namespace tetwild { + +class InoutFiltering { +public: + const State &state; + std::vector& tet_vertices; + std::vector>& tets; + std::vector>& is_surface_fs; + std::vector& v_is_removed; + std::vector& t_is_removed; + std::vector& tet_qualities; + + std::vector is_inside; + InoutFiltering(std::vector& t_vs, std::vector>& ts, + std::vector>& is_sf_fs, + std::vector& v_is_rm, std::vector& t_is_rm, std::vector& tet_qs, + const State &st): + tet_vertices(t_vs), tets(ts), is_surface_fs(is_sf_fs), v_is_removed(v_is_rm), t_is_removed(t_is_rm), + tet_qualities(tet_qs), state(st) + { } + + void getSurface(Eigen::MatrixXd& V_sf, Eigen::MatrixXi& F_sf); + void filter(); + + void outputWindingNumberField(const Eigen::VectorXd& W); +}; + +} // namespace tetwild + +#endif //NEW_GTET_INOUTFILTERING_H diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/LocalOperations.cpp b/contrib/NeRF-Editing/TetWild/src/tetwild/LocalOperations.cpp new file mode 100644 index 00000000..d688a79f --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/LocalOperations.cpp @@ -0,0 +1,1525 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 5/6/17. +// + +#include +#include +#include +#include +#include +#include +#include +#include +//#include +//#include + +#define FULL_LOG false +#define CHECK_ENVELOP true + +namespace tetwild { + +double LocalOperations::comformalAMIPSEnergy_new(const double * T) { + double helper_0[12]; + helper_0[0] = T[0]; + helper_0[1] = T[1]; + helper_0[2] = T[2]; + helper_0[3] = T[3]; + helper_0[4] = T[4]; + helper_0[5] = T[5]; + helper_0[6] = T[6]; + helper_0[7] = T[7]; + helper_0[8] = T[8]; + helper_0[9] = T[9]; + helper_0[10] = T[10]; + helper_0[11] = T[11]; + double helper_1 = helper_0[2]; + double helper_2 = helper_0[11]; + double helper_3 = helper_0[0]; + double helper_4 = helper_0[3]; + double helper_5 = helper_0[9]; + double helper_6 = 0.577350269189626 * helper_3 - 1.15470053837925 * helper_4 + 0.577350269189626 * helper_5; + double helper_7 = helper_0[1]; + double helper_8 = helper_0[4]; + double helper_9 = helper_0[7]; + double helper_10 = helper_0[10]; + double helper_11 = 0.408248290463863 * helper_10 + 0.408248290463863 * helper_7 + 0.408248290463863 * helper_8 - + 1.22474487139159 * helper_9; + double helper_12 = 0.577350269189626 * helper_10 + 0.577350269189626 * helper_7 - 1.15470053837925 * helper_8; + double helper_13 = helper_0[6]; + double helper_14 = -1.22474487139159 * helper_13 + 0.408248290463863 * helper_3 + 0.408248290463863 * helper_4 + + 0.408248290463863 * helper_5; + double helper_15 = helper_0[5]; + double helper_16 = helper_0[8]; + double helper_17 = 0.408248290463863 * helper_1 + 0.408248290463863 * helper_15 - 1.22474487139159 * helper_16 + + 0.408248290463863 * helper_2; + double helper_18 = 0.577350269189626 * helper_1 - 1.15470053837925 * helper_15 + 0.577350269189626 * helper_2; + double helper_19 = 0.5 * helper_13 + 0.5 * helper_4; + double helper_20 = 0.5 * helper_8 + 0.5 * helper_9; + double helper_21 = 0.5 * helper_15 + 0.5 * helper_16; + return -(helper_1 * (-1.5 * helper_1 + 0.5 * helper_2 + helper_21) + + helper_10 * (-1.5 * helper_10 + helper_20 + 0.5 * helper_7) + + helper_13 * (-1.5 * helper_13 + 0.5 * helper_3 + 0.5 * helper_4 + 0.5 * helper_5) + + helper_15 * (0.5 * helper_1 - 1.5 * helper_15 + 0.5 * helper_16 + 0.5 * helper_2) + + helper_16 * (0.5 * helper_1 + 0.5 * helper_15 - 1.5 * helper_16 + 0.5 * helper_2) + + helper_2 * (0.5 * helper_1 - 1.5 * helper_2 + helper_21) + + helper_3 * (helper_19 - 1.5 * helper_3 + 0.5 * helper_5) + + helper_4 * (0.5 * helper_13 + 0.5 * helper_3 - 1.5 * helper_4 + 0.5 * helper_5) + + helper_5 * (helper_19 + 0.5 * helper_3 - 1.5 * helper_5) + + helper_7 * (0.5 * helper_10 + helper_20 - 1.5 * helper_7) + + helper_8 * (0.5 * helper_10 + 0.5 * helper_7 - 1.5 * helper_8 + 0.5 * helper_9) + + helper_9 * (0.5 * helper_10 + 0.5 * helper_7 + 0.5 * helper_8 - 1.5 * helper_9)) * + pow(pow((helper_1 - helper_2) * (helper_11 * helper_6 - helper_12 * helper_14) - + (-helper_10 + helper_7) * (-helper_14 * helper_18 + helper_17 * helper_6) + + (helper_3 - helper_5) * (-helper_11 * helper_18 + helper_12 * helper_17), 2), -0.333333333333333); +} + +void LocalOperations::comformalAMIPSJacobian_new(const double * T, double *result_0) { + double helper_0[12]; + helper_0[0] = T[0]; + helper_0[1] = T[1]; + helper_0[2] = T[2]; + helper_0[3] = T[3]; + helper_0[4] = T[4]; + helper_0[5] = T[5]; + helper_0[6] = T[6]; + helper_0[7] = T[7]; + helper_0[8] = T[8]; + helper_0[9] = T[9]; + helper_0[10] = T[10]; + helper_0[11] = T[11]; + double helper_1 = helper_0[1]; + double helper_2 = helper_0[10]; + double helper_3 = helper_1 - helper_2; + double helper_4 = helper_0[0]; + double helper_5 = helper_0[3]; + double helper_6 = helper_0[9]; + double helper_7 = 0.577350269189626*helper_4 - 1.15470053837925*helper_5 + 0.577350269189626*helper_6; + double helper_8 = helper_0[2]; + double helper_9 = 0.408248290463863*helper_8; + double helper_10 = helper_0[5]; + double helper_11 = 0.408248290463863*helper_10; + double helper_12 = helper_0[8]; + double helper_13 = 1.22474487139159*helper_12; + double helper_14 = helper_0[11]; + double helper_15 = 0.408248290463863*helper_14; + double helper_16 = helper_11 - helper_13 + helper_15 + helper_9; + double helper_17 = 0.577350269189626*helper_8; + double helper_18 = 1.15470053837925*helper_10; + double helper_19 = 0.577350269189626*helper_14; + double helper_20 = helper_17 - helper_18 + helper_19; + double helper_21 = helper_0[6]; + double helper_22 = -1.22474487139159*helper_21 + 0.408248290463863*helper_4 + 0.408248290463863*helper_5 + 0.408248290463863*helper_6; + double helper_23 = helper_16*helper_7 - helper_20*helper_22; + double helper_24 = -helper_14 + helper_8; + double helper_25 = 0.408248290463863*helper_1; + double helper_26 = helper_0[4]; + double helper_27 = 0.408248290463863*helper_26; + double helper_28 = helper_0[7]; + double helper_29 = 1.22474487139159*helper_28; + double helper_30 = 0.408248290463863*helper_2; + double helper_31 = helper_25 + helper_27 - helper_29 + helper_30; + double helper_32 = helper_31*helper_7; + double helper_33 = 0.577350269189626*helper_1; + double helper_34 = 1.15470053837925*helper_26; + double helper_35 = 0.577350269189626*helper_2; + double helper_36 = helper_33 - helper_34 + helper_35; + double helper_37 = helper_22*helper_36; + double helper_38 = helper_4 - helper_6; + double helper_39 = helper_23*helper_3 - helper_24*(helper_32 - helper_37) - helper_38*(helper_16*helper_36 - helper_20*helper_31); + double helper_40 = pow(pow(helper_39, 2), -0.333333333333333); + double helper_41 = 0.707106781186548*helper_10 - 0.707106781186548*helper_12; + double helper_42 = 0.707106781186548*helper_26 - 0.707106781186548*helper_28; + double helper_43 = 0.5*helper_21 + 0.5*helper_5; + double helper_44 = 0.5*helper_26 + 0.5*helper_28; + double helper_45 = 0.5*helper_10 + 0.5*helper_12; + double helper_46 = 0.666666666666667*(helper_1*(-1.5*helper_1 + 0.5*helper_2 + helper_44) + helper_10*(-1.5*helper_10 + 0.5*helper_12 + 0.5*helper_14 + 0.5*helper_8) + helper_12*(0.5*helper_10 - 1.5*helper_12 + 0.5*helper_14 + 0.5*helper_8) + helper_14*(-1.5*helper_14 + helper_45 + 0.5*helper_8) + helper_2*(0.5*helper_1 - 1.5*helper_2 + helper_44) + helper_21*(-1.5*helper_21 + 0.5*helper_4 + 0.5*helper_5 + 0.5*helper_6) + helper_26*(0.5*helper_1 + 0.5*helper_2 - 1.5*helper_26 + 0.5*helper_28) + helper_28*(0.5*helper_1 + 0.5*helper_2 + 0.5*helper_26 - 1.5*helper_28) + helper_4*(-1.5*helper_4 + helper_43 + 0.5*helper_6) + helper_5*(0.5*helper_21 + 0.5*helper_4 - 1.5*helper_5 + 0.5*helper_6) + helper_6*(0.5*helper_4 + helper_43 - 1.5*helper_6) + helper_8*(0.5*helper_14 + helper_45 - 1.5*helper_8))/helper_39; + double helper_47 = -0.707106781186548*helper_21 + 0.707106781186548*helper_5; + result_0[0] = -helper_40*(1.0*helper_21 - 3.0*helper_4 + helper_46*(helper_41*(-helper_1 + helper_2) - helper_42*(helper_14 - helper_8) - (-helper_17 + helper_18 - helper_19)*(-helper_25 - helper_27 + helper_29 - helper_30) + (-helper_33 + helper_34 - helper_35)*(-helper_11 + helper_13 - helper_15 - helper_9)) + 1.0*helper_5 + 1.0*helper_6); + result_0[1] = helper_40*(3.0*helper_1 - 1.0*helper_2 - 1.0*helper_26 - 1.0*helper_28 + helper_46*(helper_23 + helper_24*helper_47 - helper_38*helper_41)); + result_0[2] = helper_40*(-1.0*helper_10 - 1.0*helper_12 - 1.0*helper_14 + helper_46*(-helper_3*helper_47 - helper_32 + helper_37 + helper_38*helper_42) + 3.0*helper_8); +} + +void LocalOperations::comformalAMIPSHessian_new(const double * T, double *result_0){ + double helper_0[12]; + helper_0[0] = T[0]; + helper_0[1] = T[1]; + helper_0[2] = T[2]; + helper_0[3] = T[3]; + helper_0[4] = T[4]; + helper_0[5] = T[5]; + helper_0[6] = T[6]; + helper_0[7] = T[7]; + helper_0[8] = T[8]; + helper_0[9] = T[9]; + helper_0[10] = T[10]; + helper_0[11] = T[11]; + double helper_1 = helper_0[2]; + double helper_2 = helper_0[11]; + double helper_3 = helper_1 - helper_2; + double helper_4 = helper_0[0]; + double helper_5 = 0.577350269189626*helper_4; + double helper_6 = helper_0[3]; + double helper_7 = 1.15470053837925*helper_6; + double helper_8 = helper_0[9]; + double helper_9 = 0.577350269189626*helper_8; + double helper_10 = helper_5 - helper_7 + helper_9; + double helper_11 = helper_0[1]; + double helper_12 = 0.408248290463863*helper_11; + double helper_13 = helper_0[4]; + double helper_14 = 0.408248290463863*helper_13; + double helper_15 = helper_0[7]; + double helper_16 = 1.22474487139159*helper_15; + double helper_17 = helper_0[10]; + double helper_18 = 0.408248290463863*helper_17; + double helper_19 = helper_12 + helper_14 - helper_16 + helper_18; + double helper_20 = helper_10*helper_19; + double helper_21 = 0.577350269189626*helper_11; + double helper_22 = 1.15470053837925*helper_13; + double helper_23 = 0.577350269189626*helper_17; + double helper_24 = helper_21 - helper_22 + helper_23; + double helper_25 = 0.408248290463863*helper_4; + double helper_26 = 0.408248290463863*helper_6; + double helper_27 = helper_0[6]; + double helper_28 = 1.22474487139159*helper_27; + double helper_29 = 0.408248290463863*helper_8; + double helper_30 = helper_25 + helper_26 - helper_28 + helper_29; + double helper_31 = helper_24*helper_30; + double helper_32 = helper_3*(helper_20 - helper_31); + double helper_33 = helper_4 - helper_8; + double helper_34 = 0.408248290463863*helper_1; + double helper_35 = helper_0[5]; + double helper_36 = 0.408248290463863*helper_35; + double helper_37 = helper_0[8]; + double helper_38 = 1.22474487139159*helper_37; + double helper_39 = 0.408248290463863*helper_2; + double helper_40 = helper_34 + helper_36 - helper_38 + helper_39; + double helper_41 = helper_24*helper_40; + double helper_42 = 0.577350269189626*helper_1; + double helper_43 = 1.15470053837925*helper_35; + double helper_44 = 0.577350269189626*helper_2; + double helper_45 = helper_42 - helper_43 + helper_44; + double helper_46 = helper_19*helper_45; + double helper_47 = helper_41 - helper_46; + double helper_48 = helper_33*helper_47; + double helper_49 = helper_11 - helper_17; + double helper_50 = helper_10*helper_40; + double helper_51 = helper_30*helper_45; + double helper_52 = helper_50 - helper_51; + double helper_53 = helper_49*helper_52; + double helper_54 = helper_32 + helper_48 - helper_53; + double helper_55 = pow(helper_54, 2); + double helper_56 = pow(helper_55, -0.333333333333333); + double helper_57 = 1.0*helper_27 - 3.0*helper_4 + 1.0*helper_6 + 1.0*helper_8; + double helper_58 = 0.707106781186548*helper_13; + double helper_59 = 0.707106781186548*helper_15; + double helper_60 = helper_58 - helper_59; + double helper_61 = helper_3*helper_60; + double helper_62 = 0.707106781186548*helper_35 - 0.707106781186548*helper_37; + double helper_63 = helper_49*helper_62; + double helper_64 = helper_47 + helper_61 - helper_63; + double helper_65 = 1.33333333333333/helper_54; + double helper_66 = 1.0/helper_55; + double helper_67 = 0.5*helper_27 + 0.5*helper_6; + double helper_68 = -1.5*helper_4 + helper_67 + 0.5*helper_8; + double helper_69 = 0.5*helper_4 + helper_67 - 1.5*helper_8; + double helper_70 = -1.5*helper_27 + 0.5*helper_4 + 0.5*helper_6 + 0.5*helper_8; + double helper_71 = 0.5*helper_27 + 0.5*helper_4 - 1.5*helper_6 + 0.5*helper_8; + double helper_72 = 0.5*helper_13 + 0.5*helper_15; + double helper_73 = -1.5*helper_11 + 0.5*helper_17 + helper_72; + double helper_74 = 0.5*helper_11 - 1.5*helper_17 + helper_72; + double helper_75 = 0.5*helper_11 + 0.5*helper_13 - 1.5*helper_15 + 0.5*helper_17; + double helper_76 = 0.5*helper_11 - 1.5*helper_13 + 0.5*helper_15 + 0.5*helper_17; + double helper_77 = 0.5*helper_35 + 0.5*helper_37; + double helper_78 = -1.5*helper_1 + 0.5*helper_2 + helper_77; + double helper_79 = 0.5*helper_1 - 1.5*helper_2 + helper_77; + double helper_80 = 0.5*helper_1 + 0.5*helper_2 + 0.5*helper_35 - 1.5*helper_37; + double helper_81 = 0.5*helper_1 + 0.5*helper_2 - 1.5*helper_35 + 0.5*helper_37; + double helper_82 = helper_1*helper_78 + helper_11*helper_73 + helper_13*helper_76 + helper_15*helper_75 + helper_17*helper_74 + helper_2*helper_79 + helper_27*helper_70 + helper_35*helper_81 + helper_37*helper_80 + helper_4*helper_68 + helper_6*helper_71 + helper_69*helper_8; + double helper_83 = 0.444444444444444*helper_66*helper_82; + double helper_84 = helper_66*helper_82; + double helper_85 = -helper_32 - helper_48 + helper_53; + double helper_86 = 1.0/helper_85; + double helper_87 = helper_86*pow(pow(helper_85, 2), -0.333333333333333); + double helper_88 = 0.707106781186548*helper_6; + double helper_89 = 0.707106781186548*helper_27; + double helper_90 = helper_88 - helper_89; + double helper_91 = 0.666666666666667*helper_10*helper_40 + 0.666666666666667*helper_3*helper_90 - 0.666666666666667*helper_30*helper_45 - 0.666666666666667*helper_33*helper_62; + double helper_92 = -3.0*helper_11 + 1.0*helper_13 + 1.0*helper_15 + 1.0*helper_17; + double helper_93 = -helper_11 + helper_17; + double helper_94 = -helper_1 + helper_2; + double helper_95 = -helper_21 + helper_22 - helper_23; + double helper_96 = -helper_34 - helper_36 + helper_38 - helper_39; + double helper_97 = -helper_42 + helper_43 - helper_44; + double helper_98 = -helper_12 - helper_14 + helper_16 - helper_18; + double helper_99 = -0.666666666666667*helper_60*helper_94 + 0.666666666666667*helper_62*helper_93 + 0.666666666666667*helper_95*helper_96 - 0.666666666666667*helper_97*helper_98; + double helper_100 = helper_3*helper_90; + double helper_101 = helper_33*helper_62; + double helper_102 = helper_100 - helper_101 + helper_52; + double helper_103 = -helper_60*helper_94 + helper_62*helper_93 + helper_95*helper_96 - helper_97*helper_98; + double helper_104 = 0.444444444444444*helper_102*helper_103*helper_82*helper_86 + helper_57*helper_91 - helper_92*helper_99; + double helper_105 = 1.85037170770859e-17*helper_1*helper_78 + 1.85037170770859e-17*helper_11*helper_73 + 1.85037170770859e-17*helper_13*helper_76 + 1.85037170770859e-17*helper_15*helper_75 + 1.85037170770859e-17*helper_17*helper_74 + 1.85037170770859e-17*helper_2*helper_79 + 1.85037170770859e-17*helper_27*helper_70 + 1.85037170770859e-17*helper_35*helper_81 + 1.85037170770859e-17*helper_37*helper_80 + 1.85037170770859e-17*helper_4*helper_68 + 1.85037170770859e-17*helper_6*helper_71 + 1.85037170770859e-17*helper_69*helper_8; + double helper_106 = helper_64*helper_82*helper_86; + double helper_107 = -0.666666666666667*helper_10*helper_19 + 0.666666666666667*helper_24*helper_30 + 0.666666666666667*helper_33*helper_60 - 0.666666666666667*helper_49*helper_90; + double helper_108 = -3.0*helper_1 + 1.0*helper_2 + 1.0*helper_35 + 1.0*helper_37; + double helper_109 = -helper_20 + helper_31 + helper_33*helper_60 - helper_49*helper_90; + double helper_110 = 0.444444444444444*helper_109*helper_82*helper_86; + double helper_111 = helper_103*helper_110 + helper_107*helper_57 - helper_108*helper_99; + double helper_112 = -helper_4 + helper_8; + double helper_113 = -helper_88 + helper_89; + double helper_114 = -helper_5 + helper_7 - helper_9; + double helper_115 = -helper_25 - helper_26 + helper_28 - helper_29; + double helper_116 = helper_82*helper_86*(helper_112*helper_62 + helper_113*helper_94 + helper_114*helper_96 - helper_115*helper_97); + double helper_117 = -helper_100 + helper_101 - helper_50 + helper_51; + double helper_118 = -helper_102*helper_110 + helper_107*helper_92 + helper_108*helper_91; + double helper_119 = helper_82*helper_86*(helper_112*(-helper_58 + helper_59) - helper_113*helper_93 - helper_114*helper_98 + helper_115*helper_95); + result_0[0] = helper_56*(helper_57*helper_64*helper_65 - pow(helper_64, 2)*helper_83 + 0.666666666666667*helper_64*helper_84*(-helper_41 + helper_46 - helper_61 + helper_63) + 3.0); + result_0[1] = helper_87*(helper_104 - helper_105*helper_35 + helper_106*helper_91); + result_0[2] = helper_87*(helper_106*helper_107 + helper_111); + result_0[3] = helper_87*(helper_104 + helper_116*helper_99); + result_0[4] = helper_56*(-pow(helper_117, 2)*helper_83 + helper_117*helper_65*helper_92 + helper_117*helper_84*helper_91 + 3.0); + result_0[5] = helper_87*(-helper_105*helper_6 - helper_107*helper_116 + helper_118); + result_0[6] = helper_87*(-helper_105*helper_13 + helper_111 + helper_119*helper_99); + result_0[7] = helper_87*(helper_118 - helper_119*helper_91); + result_0[8] = helper_56*(-helper_108*helper_109*helper_65 - 1.11111111111111*pow(helper_109, 2)*helper_84 + 3.0); +} + +void LocalOperations::check() { + ///check correctness + int n_size=0; + int d_size=0; + for (int i = 0; i < tet_vertices.size(); i++) { + if (v_is_removed[i]) + continue; + + for (auto it = tet_vertices[i].conn_tets.begin(); it != tet_vertices[i].conn_tets.end(); it++) { + if (t_is_removed[*it]) + logger().debug("t {} is removed!", *it); + auto jt=std::find(tets[*it].begin(), tets[*it].end(), i); + if(jt==tets[*it].end()){ + logger().debug("t {} is not a conn_tet for v {}", *it, i); + } + } + + if(tet_vertices[i].conn_tets.size()==0) { + logger().debug("empty conn_tets: v {}", i); + assert(tet_vertices[i].conn_tets.size()>0); + } + +// for(int j=0;j<3;j++) { +// int tmp_n_size = CGAL::exact(tet_vertices[i].pos[j]).numerator().bit_size(); +// int tmp_d_size = CGAL::exact(tet_vertices[i].pos[j]).denominator().bit_size(); +// if(tmp_n_size>n_size) +// n_size=tmp_n_size; +// if(tmp_d_size>d_size) +// d_size=tmp_d_size; +// } + } + + for (int i = 0; i < tets.size(); i++) { + if (t_is_removed[i]) + continue; + for (int j = 0; j < 4; j++) { + bool is_found = false; + for (auto it = tet_vertices[tets[i][j]].conn_tets.begin(); + it != tet_vertices[tets[i][j]].conn_tets.end(); it++) { + if (*it == i) { + is_found = true; + } + if (t_is_removed[*it]) + logger().debug("tet {} is removed!", *it); + } + if (!is_found) { + logger().debug("{} {} {} {}", tets[i][0], tets[i][1], tets[i][2], tets[i][3]); + logger().debug("tet {} should be conn to v {}", i, tets[i][j]); + } + } + } + + +} + +void LocalOperations::outputInfo(int op_type, double time, bool is_log) { + logger().debug("outputing info"); + //update min/max dihedral angle infos + for (int i = 0; i < tets.size(); i++) { + if (!t_is_removed[i]) + calTetQuality_AD(tets[i], tet_qualities[i]); + } + + if(args.is_quiet) + return; + + //some tmp checks for experiments +// for (int i = 0; i < tets.size(); i++) { +// if (t_is_removed[i]) +// continue; +// CGAL::Orientation ori = CGAL::orientation(tet_vertices[tets[i][0]].pos, +// tet_vertices[tets[i][1]].pos, +// tet_vertices[tets[i][2]].pos, +// tet_vertices[tets[i][3]].pos); +// if (ori != CGAL::POSITIVE) { +// logger().debug("outputInfo(): tet flipped!!"); +// pausee(); +// } +// +// //check quality +//// TetQuality tq; +//// calTetQuality_AMIPS(tets[i], tq); +//// if (tq.slim_energy != tet_qualities[i].slim_energy) { +//// logger().debug("quality incorrect!"); +//// logger().debug("{} {}", tq.slim_energy, tet_qualities[i].slim_energy); +//// pausee(); +//// } +// } + +// int cnt = 0; +// for (int i = 0; i < tet_vertices.size(); i++) { +// if (v_is_removed[i]) +// continue; +// +// for (int j = 0; j < 3; j++) { +// if (std::isnan(tet_vertices[i].posf[j])) { +// logger().debug("v {} is nan", i); +// pausee(); +// } +// } +// +// if (tet_vertices[i].is_on_bbox && tet_vertices[i].is_on_surface) { +// logger().debug("ERROR: tet_vertices[i].is_on_bbox && tet_vertices[i].is_on_surface"); +// pausee(); +// } +// +// if (tet_vertices[i].is_on_bbox) +// cnt++; +// } +// logger().debug("on bbox = {}", cnt); + +// std::vector> fs; +// for (int i = 0; i < tets.size(); i++) { +// if (t_is_removed[i]) +// continue; +// for (int j = 0; j < 4; j++) { +// if (is_surface_fs[i][j] != state.NOT_SURFACE) { +// std::array f = {tets[i][(j + 1) % 4], tets[i][(j + 2) % 4], tets[i][(j + 3) % 4]}; +// std::sort(f.begin(), f.end()); +// fs.push_back(std::array({{f[0], f[1], f[2], is_surface_fs[i][j]}})); +// } +// } +// } +// if (fs.size() % 2 != 0) { +// logger().debug("fs.size()%2!=0"); +// } +// std::sort(fs.begin(), fs.end()); +// for (int i = 0; i < fs.size() - 1; i += 2) { +// if (fs[i][0] == fs[i + 1][0] && fs[i][1] == fs[i + 1][1] && fs[i][2] == fs[i + 1][2] && +// fs[i][3] + fs[i + 1][3] == 0);//good +// else { +// logger().debug("{}", i); +// logger().debug("hehehehe"); +// for (int j = 0; j < 4; j++) +// logger().debug("{}{}{}{}#vertices outside of envelop = {}", fs[i][j], " "; +// for (int j = 0; j < 4; j++) +// std::cout, fs[i + 1][j], " "; +// pausee(); +// } +// } + + //check envelop +// if(op_type != MeshRecord::OpType::OP_OPT_INIT) { +// cnt = 0; +// for (int i = 0; i < tet_vertices.size(); i++) { +// if (!v_is_removed[i] && tet_vertices[i].is_on_surface) { +// double dis = geo_sf_tree.squared_distance( +// GEO::vec3(tet_vertices[i].posf[0], tet_vertices[i].posf[1], tet_vertices[i].posf[2])); +// if (dis > state.eps_2) +// cnt++; +// } +// } +// std::cout, cnt); +// } + + int cnt = 0; + int r_cnt = 0; + for (int i = 0; i < tet_vertices.size(); i++) { + if (!v_is_removed[i]) { + cnt++; + if (tet_vertices[i].is_rounded) { +// if (tet_vertices[i].pos[0] != tet_vertices[i].posf[0] +// || tet_vertices[i].pos[1] != tet_vertices[i].posf[1] +// || tet_vertices[i].pos[2] != tet_vertices[i].posf[2]) { +// logger().debug("tet_vertices[i].pos!=tet_vertices[i].posf"); +// logger().debug("{}{}{}{}{}", tet_vertices[i].pos[0] - tet_vertices[i].posf[0], " " +//, tet_vertices[i].pos[1] - tet_vertices[i].posf[1], " " +//, tet_vertices[i].pos[2] - tet_vertices[i].posf[2]); +// +// } + r_cnt++; + } +// else { +// if (CGAL::to_double(tet_vertices[i].pos[0]) != tet_vertices[i].posf[0] +// || CGAL::to_double(tet_vertices[i].pos[1]) != tet_vertices[i].posf[1] +// || CGAL::to_double(tet_vertices[i].pos[2]) != tet_vertices[i].posf[2]) { +// logger().debug("CGAL::to_double(tet_vertices[i].pos)!=tet_vertices[i].posf"); +// logger().debug("{}{}{}{}{}", CGAL::to_double(tet_vertices[i].pos[0]) - tet_vertices[i].posf[0], " " +//, CGAL::to_double(tet_vertices[i].pos[1]) - tet_vertices[i].posf[1], " " +//, CGAL::to_double(tet_vertices[i].pos[2]) - tet_vertices[i].posf[2]); +// } +// } + } + } + + + logger().debug("# vertices = {}({}) {}(r)", cnt, tet_vertices.size(), r_cnt); + + cnt = 0; + for (int i = 0; i < tets.size(); i++) { + if (!t_is_removed[i]) + cnt++; + } + logger().debug("# tets = {}({})", cnt, tets.size()); + logger().debug("# total operations = {}", counter); + logger().debug("# accepted operations = {}", suc_counter); + + + double min = 10, max = 0; + double min_avg = 0, max_avg = 0; + double max_slim_energy = 0, avg_slim_energy = 0; + std::array cmp_cnt = {{0, 0, 0, 0, 0, 0}}; + cnt = 0; + + for (int i = 0; i < tet_qualities.size(); i++) { + if (t_is_removed[i]) + continue; + if (isTetLocked_ui(i)) + continue; + + cnt++; + if (tet_qualities[i].min_d_angle < min) + min = tet_qualities[i].min_d_angle; + if (tet_qualities[i].max_d_angle > max) + max = tet_qualities[i].max_d_angle; + if (tet_qualities[i].slim_energy > max_slim_energy) + max_slim_energy = tet_qualities[i].slim_energy; + min_avg += tet_qualities[i].min_d_angle; + max_avg += tet_qualities[i].max_d_angle; + avg_slim_energy += tet_qualities[i].slim_energy; + + for (int j = 0; j < 3; j++) { + if (tet_qualities[i].min_d_angle < cmp_d_angles[j]) + cmp_cnt[j]++; + } + for (int j = 0; j < 3; j++) { + if (tet_qualities[i].max_d_angle > cmp_d_angles[j + 3]) + cmp_cnt[j + 3]++; + } + } + + logger().debug("min_d_angle = {}, max_d_angle = {}, max_slim_energy = {}", min, max, max_slim_energy); + logger().debug("avg_min_d_angle = {}, avg_max_d_angle = {}, avg_slim_energy = {}", min_avg / cnt, max_avg / cnt, avg_slim_energy / cnt); + logger().debug("min_d_angle: <6 {}; <12 {}; <18 {}", cmp_cnt[0] / cnt, cmp_cnt[1] / cnt, cmp_cnt[2] / cnt); + logger().debug("max_d_angle: >174 {}; >168 {}; >162 {}", cmp_cnt[5] / cnt, cmp_cnt[4] / cnt, cmp_cnt[3] / cnt); + + if(is_log) { + addRecord(MeshRecord(op_type, time, std::count(v_is_removed.begin(), v_is_removed.end(), false), cnt, + min, min_avg / cnt, max, max_avg / cnt, max_slim_energy, avg_slim_energy / cnt), args, state); + } +} + +bool LocalOperations::isTetFlip(const std::array& t) { + CGAL::Orientation ori; + bool is_rounded = true; + for (int j = 0; j < 4; j++) + if (!tet_vertices[t[j]].is_rounded) { + is_rounded = false; + break; + } + if (is_rounded) + ori = CGAL::orientation(tet_vertices[t[0]].posf, tet_vertices[t[1]].posf, tet_vertices[t[2]].posf, + tet_vertices[t[3]].posf); + else + ori = CGAL::orientation(tet_vertices[t[0]].pos, tet_vertices[t[1]].pos, tet_vertices[t[2]].pos, + tet_vertices[t[3]].pos); + + if (ori != CGAL::POSITIVE) + return true; + return false; +} + +bool LocalOperations::isTetFlip(int t_id){ + return isTetFlip(tets[t_id]); +} + +bool LocalOperations::isFlip(const std::vector>& new_tets) { + ////check orientation + for (int i = 0; i < new_tets.size(); i++) { +// CGAL::Orientation ori = CGAL::orientation(tet_vertices[new_tets[i][0]].pos, +// tet_vertices[new_tets[i][1]].pos, +// tet_vertices[new_tets[i][2]].pos, +// tet_vertices[new_tets[i][3]].pos); +// if (ori != CGAL::POSITIVE) +// return true; + if(isTetFlip(new_tets[i])) + return true; + } + + return false; +} + +void LocalOperations::getCheckQuality(const std::vector& tet_qs, TetQuality& tq) { + double slim_sum = 0, slim_max = 0; + for (int i = 0; i < tet_qs.size(); i++) { + if (state.use_energy_max) { + if (tet_qs[i].slim_energy > slim_max) + slim_max = tet_qs[i].slim_energy; + } else + slim_sum += tet_qs[i].slim_energy * tet_qs[i].volume; + } + if (state.use_energy_max) + tq.slim_energy = slim_max; + else + tq.slim_energy = slim_sum; +} + +void LocalOperations::getCheckQuality(const std::vector& t_ids, TetQuality& tq){ + double slim_sum = 0, slim_max = 0; + for (int i = 0; i < t_ids.size(); i++) { + if (state.use_energy_max) { + if (tet_qualities[t_ids[i]].slim_energy > slim_max) + slim_max = tet_qualities[t_ids[i]].slim_energy; + } else + slim_sum += tet_qualities[t_ids[i]].slim_energy * tet_qualities[t_ids[i]].volume; + } + if (state.use_energy_max) + tq.slim_energy = slim_max; + else + tq.slim_energy = slim_sum; +} + +void LocalOperations::getAvgMaxEnergy(double& avg_tq, double& max_tq) { + avg_tq = 0; + max_tq = 0; + int cnt = 0; + for (unsigned int i = 0; i < tet_qualities.size(); i++) { + if (t_is_removed[i]) + continue; + if(isTetLocked_ui(i)) + continue; + if (tet_qualities[i].slim_energy > max_tq) + max_tq = tet_qualities[i].slim_energy; + avg_tq += tet_qualities[i].slim_energy; + cnt++; + } + avg_tq /= cnt; + if(std::isinf(avg_tq)) + avg_tq = state.MAX_ENERGY; +} + +double LocalOperations::getMaxEnergy(){ + double max_tq = 0; + for (unsigned int i = 0; i < tet_qualities.size(); i++) { + if (t_is_removed[i]) + continue; + if(isTetLocked_ui(i)) + continue; + if (tet_qualities[i].slim_energy > max_tq) + max_tq = tet_qualities[i].slim_energy; + } + return max_tq; +} + +double LocalOperations::getSecondMaxEnergy(double max_energy){ + double max_tq = 0; + for (unsigned int i = 0; i < tet_qualities.size(); i++) { + if (t_is_removed[i]) + continue; + if(tet_qualities[i].slim_energy == state.MAX_ENERGY) + continue; + if(isTetLocked_ui(i)) + continue; + if (tet_qualities[i].slim_energy > max_tq) + max_tq = tet_qualities[i].slim_energy; + } + return max_tq; +} + +double LocalOperations::getFilterEnergy(bool& is_clean_up) { + std::array buckets; + for (int i = 0; i < 11; i++) + buckets[i] = 0; + for (unsigned int i = 0; i < tet_qualities.size(); i++) { + if (t_is_removed[i]) + continue; + if (tet_qualities[i].slim_energy > args.filter_energy_thres - 1 + 1e10) + buckets[10]++; + else { + for (int j = 0; j < 10; j++) { + if (tet_qualities[i].slim_energy > args.filter_energy_thres - 1 + pow(10, j) + && tet_qualities[i].slim_energy <= args.filter_energy_thres - 1 + pow(10, j + 1)) { + buckets[j]++; + break; + } + } + } + } + + std::array tmps1; + std::array tmps2; + for (int i = 0; i < 10; i++) { + tmps1[i] = std::accumulate(buckets.begin(), buckets.begin() + i + 1, 0); + tmps2[i] = std::accumulate(buckets.begin() + i + 1, buckets.end(), 0); + } + + if(tmps1[0]>=tmps2[0]) { + is_clean_up = (tmps2[5] > 0); + return 8; + } + if(tmps1[8]<=tmps2[8]) + return 1e11; + + for (int i = 0; i < 8; i++) { + if (tmps1[i] < tmps2[i] && tmps1[i + 1] > tmps2[i + 1]){ + return args.filter_energy_thres - 1 + 5 * pow(10, i+1); + } + } + + return 8;//would never be execuate, it's fine +} + +void LocalOperations::calTetQualities(const std::vector>& new_tets, std::vector& tet_qs, + bool all_measure) { + tet_qs.resize(new_tets.size()); +#ifdef TETWILD_WITH_ISPC + int n = new_tets.size(); + + static thread_local std::vector T0; + static thread_local std::vector T1; + static thread_local std::vector T2; + static thread_local std::vector T3; + static thread_local std::vector T4; + static thread_local std::vector T5; + static thread_local std::vector T6; + static thread_local std::vector T7; + static thread_local std::vector T8; + static thread_local std::vector T9; + static thread_local std::vector T10; + static thread_local std::vector T11; + static thread_local std::vector energy; + + if (T0.empty()) { + // logger().trace("Initial ISPC allocation: n = {}", n); + } else if (T0.size() != n) { + // logger().trace("ISPC reallocation: n = {}", n); + } + + T0.resize(n); + T1.resize(n); + T2.resize(n); + T3.resize(n); + T4.resize(n); + T5.resize(n); + T6.resize(n); + T7.resize(n); + T8.resize(n); + T9.resize(n); + T10.resize(n); + T11.resize(n); + energy.resize(n); + + for (int i = 0; i < n; i++) { + T0[i] = tet_vertices[new_tets[i][0]].posf[0]; + T1[i] = tet_vertices[new_tets[i][0]].posf[1]; + T2[i] = tet_vertices[new_tets[i][0]].posf[2]; + T3[i] = tet_vertices[new_tets[i][1]].posf[0]; + T4[i] = tet_vertices[new_tets[i][1]].posf[1]; + T5[i] = tet_vertices[new_tets[i][1]].posf[2]; + T6[i] = tet_vertices[new_tets[i][2]].posf[0]; + T7[i] = tet_vertices[new_tets[i][2]].posf[1]; + T8[i] = tet_vertices[new_tets[i][2]].posf[2]; + T9[i] = tet_vertices[new_tets[i][3]].posf[0]; + T10[i] = tet_vertices[new_tets[i][3]].posf[1]; + T11[i] = tet_vertices[new_tets[i][3]].posf[2]; + } + + ispc::energy_ispc(T0.data(), T1.data(), T2.data(), T3.data(), T4.data(), + T5.data(), T6.data(), T7.data(), T8.data(), + T9.data(), T10.data(), T11.data(), energy.data(), n); + + for (int i = 0; i < new_tets.size(); i++) { + CGAL::Orientation ori = CGAL::orientation(tet_vertices[new_tets[i][0]].posf, + tet_vertices[new_tets[i][1]].posf, + tet_vertices[new_tets[i][2]].posf, + tet_vertices[new_tets[i][3]].posf); + if (ori != CGAL::POSITIVE) { + tet_qs[i].slim_energy = state.MAX_ENERGY; + continue; + } else + tet_qs[i].slim_energy = energy[i]; + + if (std::isinf(energy[i]) || std::isnan(energy[i])) + tet_qs[i].slim_energy = state.MAX_ENERGY; + } +#else + for (int i = 0; i < new_tets.size(); i++) { + calTetQuality_AMIPS(new_tets[i], tet_qs[i]); + } +#endif +} + +double LocalOperations::calEdgeLength(const std::array& v_ids){ + return CGAL::squared_distance(tet_vertices[v_ids[0]].posf, tet_vertices[v_ids[1]].posf); +} + +double LocalOperations::calEdgeLength(int v1_id,int v2_id, bool is_over_refine) { + return CGAL::squared_distance(tet_vertices[v1_id].posf, tet_vertices[v2_id].posf); +} + +void LocalOperations::calTetQuality_AD(const std::array& tet, TetQuality& t_quality) { + std::array nv; + std::array nv_length; + std::array heights; + for (int i = 0; i < 4; i++) { + Plane_3f pln(tet_vertices[tet[(i + 1) % 4]].posf, + tet_vertices[tet[(i + 2) % 4]].posf, + tet_vertices[tet[(i + 3) % 4]].posf); + if(pln.is_degenerate()){ + t_quality.min_d_angle = 0; + t_quality.max_d_angle = M_PI; + return; + } + Point_3f tmp_p = pln.projection(tet_vertices[tet[i]].posf); + if(tmp_p == tet_vertices[tet[i]].posf){ + t_quality.min_d_angle = 0; + t_quality.max_d_angle = M_PI; + return; + } + nv[i] = tet_vertices[tet[i]].posf - tmp_p; + heights[i] = CGAL::squared_distance(tet_vertices[tet[i]].posf, tmp_p); + +// if(std::isnan(heights[i])){//because pln is degenerate +// logger().debug("{}", tet_vertices[tet[i]].posf); +// logger().debug("{}", tet_vertices[tet[(i + 1) % 4]].posf); +// logger().debug("{}", tet_vertices[tet[(i + 2) % 4]].posf); +// logger().debug("{}", tet_vertices[tet[(i + 3) % 4]].posf); +// logger().debug("{}", pln.is_degenerate()); +// +// logger().debug("{}", tmp_p); +// logger().debug("{}", nv[i]); +// logger().debug("{}", heights[i]); +// pausee(); +// } + + //re-scale + std::array tmp_nv = {{CGAL::abs(nv[i][0]), CGAL::abs(nv[i][1]), CGAL::abs(nv[i][2])}}; + auto tmp = std::max_element(tmp_nv.begin(), tmp_nv.end()); + if(*tmp == 0 || heights[i] == 0){ + t_quality.min_d_angle = 0; + t_quality.max_d_angle = M_PI; +// t_quality.asp_ratio_2 = state.MAX_ENERGY; + return; + } else if (*tmp < 1e-5) { + nv[i] = Vector_3f(nv[i][0] / *tmp, nv[i][1] / *tmp, nv[i][2] / *tmp); + nv_length[i] = sqrt(heights[i] / ((*tmp) * (*tmp))); + } else { + nv_length[i] = sqrt(heights[i]); + } + } + + std::vector> opp_edges; + for (int i = 0; i < 3; i++) { + opp_edges.push_back(std::array({{0, i + 1}})); + opp_edges.push_back(std::array({{i + 1, (i + 1) % 3 + 1}})); + } + + ////compute dihedral angles + std::array dihedral_angles; + for (int i = 0; i < (int) opp_edges.size(); i++) { + double dihedral_angle = -nv[opp_edges[i][0]] * nv[opp_edges[i][1]] / + (nv_length[opp_edges[i][0]] * nv_length[opp_edges[i][1]]); + if (dihedral_angle > 1) + dihedral_angles[i] = 0; + else if (dihedral_angle < -1) + dihedral_angles[i] = M_PI; + else + dihedral_angles[i] = std::acos(dihedral_angle); + } +// std::sort(dihedral_angles.begin(), dihedral_angles.end()); + auto it=std::minmax_element(dihedral_angles.begin(), dihedral_angles.end()); + t_quality.min_d_angle = *(it.first); + t_quality.max_d_angle = *(it.second); + +// std::sort(heights.begin(), heights.end()); +// auto h = std::min_element(heights.begin(), heights.end()); +// t_quality.asp_ratio_2 = max_e_l / *h; +} + +void LocalOperations::calTetQuality_AMIPS(const std::array& tet, TetQuality& t_quality) { + if (energy_type == state.ENERGY_AMIPS) { + CGAL::Orientation ori = CGAL::orientation(tet_vertices[tet[0]].posf, + tet_vertices[tet[1]].posf, + tet_vertices[tet[2]].posf, + tet_vertices[tet[3]].posf); + if (ori != CGAL::POSITIVE) {//degenerate in floats + t_quality.slim_energy = state.MAX_ENERGY; + } else { + std::array T; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 3; j++) { + T[i*3+j] = tet_vertices[tet[i]].posf[j]; + } + } + t_quality.slim_energy = comformalAMIPSEnergy_new(T.data()); + if (std::isinf(t_quality.slim_energy) || std::isnan(t_quality.slim_energy)) + t_quality.slim_energy = state.MAX_ENERGY; + } + } + if(std::isinf(t_quality.slim_energy) || std::isnan(t_quality.slim_energy) || t_quality.slim_energy <= 0) + t_quality.slim_energy = state.MAX_ENERGY; +} + +bool LocalOperations::isEdgeOnSurface(int v1_id, int v2_id) { + if (!tet_vertices[v1_id].is_on_surface || !tet_vertices[v2_id].is_on_surface) + return false; + + std::vector t_ids; + setIntersection(tet_vertices[v1_id].conn_tets, tet_vertices[v2_id].conn_tets, t_ids); + assert(t_ids.size()!=0); + return isEdgeOnSurface(v1_id, v2_id, t_ids); +} + +bool LocalOperations::isEdgeOnBbox(int v1_id, int v2_id){ + if(!tet_vertices[v1_id].is_on_bbox || !tet_vertices[v2_id].is_on_bbox) + return false; + + std::vector t_ids; + setIntersection(tet_vertices[v1_id].conn_tets, tet_vertices[v2_id].conn_tets, t_ids); + return isEdgeOnBbox(v1_id, v2_id, t_ids); +} + +bool LocalOperations::isEdgeOnSurface(int v1_id, int v2_id, const std::vector& t_ids){ + for (int i = 0; i < t_ids.size(); i++) { + for (int j = 0; j < 4; j++) { + if (tets[t_ids[i]][j] != v1_id && tets[t_ids[i]][j] != v2_id) { + if (is_surface_fs[t_ids[i]][j]!=state.NOT_SURFACE) + return true; + } + } + } + return false; +} + +bool LocalOperations::isEdgeOnBbox(int v1_id, int v2_id, const std::vector& t_ids){ + std::unordered_set v_ids; + for (int i = 0; i < t_ids.size(); i++) { + for (int j = 0; j < 4; j++) { + if (tets[t_ids[i]][j] != v1_id && tets[t_ids[i]][j] != v2_id) { + v_ids.insert(tets[t_ids[i]][j]); + } + } + } + if(v_ids.size()!=t_ids.size()) + return true; + return false; +} + +bool LocalOperations::isEdgeOnBoundary(int v1_id, int v2_id) { +// if (boundary_points.size() == 0)//if it's a closed mesh, then there cannot be any boundary edges. +// return false; + + if(state.is_mesh_closed) + return false; + + if (!tet_vertices[v1_id].is_on_boundary || !tet_vertices[v2_id].is_on_boundary) + return false; + +// return true; + + int cnt = 0; + for (int t_id: tet_vertices[v1_id].conn_tets) { + std::array opp_js; + int ii = 0; + for (int j = 0; j < 4; j++) { + if (tets[t_id][j] == v1_id || tets[t_id][j] == v2_id) + continue; + opp_js[ii++] = j; + } + if (ii == 2) { + if (is_surface_fs[t_id][opp_js[0]] != state.NOT_SURFACE) + cnt++; + if (is_surface_fs[t_id][opp_js[1]] != state.NOT_SURFACE) + cnt++; + if (cnt > 2) + return false; + } + } + if (cnt == 2) //is boundary edge + return true; + + return false; +} + +bool LocalOperations::isFaceOutEnvelop(const Triangle_3f& tri) { +#if CHECK_ENVELOP + if(state.use_sampling){ + return isFaceOutEnvelop_sampling(tri); + } + return true; +#else + return false; +#endif + +// EnvelopSide side = getUpperLowerBounds(tri); +// if (side == EnvelopSide::OUTSIDE) +// return true; +// else if (side == EnvelopSide::INSIDE) +// return false; +// else { +// int depth = 0; +// int cnt = 0, sub_cnt = 0; +// std::queue tris_queue; +// tris_queue.push(tri); +// cnt++; +// while (!tris_queue.empty()) { +//// logger().debug("depth = {}{}cnt = {}{}sub_cnt = {}", depth, ", " +////, cnt, ", " +////, sub_cnt); +// if (depth == 6) +// return true; +//// return false; +// Triangle_3f &cur_tri = tris_queue.front(); +// +// //subdivide +// std::array mps; +// for (int j = 0; j < 3; j++) +// mps[j] = CGAL::midpoint(cur_tri[j], cur_tri[(j + 1) % 3]); +// +// std::array tris; +// for (int j = 0; j < 3; j++) +// tris[j] = Triangle_3f(cur_tri[j], mps[(j + 1) % 3], mps[(j - 1 + 3) % 3]); +// tris[3] = Triangle_3f(mps[0], mps[1], mps[2]); +// +// for (int j = 0; j < 4; j++) { +//// logger().debug("{}{}depth = {}{}cnt = {}{}sub_cnt = {}", j, ": "; +// side = getUpperLowerBounds(tris[j]); +// if (side == EnvelopSide::OUTSIDE) +// return true; +// else if (side == EnvelopSide::UNCERTAIN) { +// tris_queue.push(tris[j]); +// sub_cnt++; +// } +// } +// +// tris_queue.pop(); +// cnt--; +//// std::cout, depth, ", " +////, cnt, ", " +////, sub_cnt); +// if (cnt == 0) { +// cnt = sub_cnt; +// sub_cnt = 0; +// depth++; +// } +//// pausee(); +// } +// return false; +// } +} + +bool LocalOperations::isPointOutEnvelop(const Point_3f& p) { +#if CHECK_ENVELOP + GEO::vec3 geo_p(p[0], p[1], p[2]); + if (geo_sf_tree.squared_distance(geo_p) > state.eps_2) + return true; + + return false; +#else + return false; +#endif +} + +bool LocalOperations::isFaceOutEnvelop_sampling(const Triangle_3f& tri) { +#if CHECK_ENVELOP + if (tri.is_degenerate()) + return false; + +#if TIMING_BREAKDOWN + igl_timer0.start(); +#endif + std::array vs = {{GEO::vec3(tri[0][0], tri[0][1], tri[0][2]), + GEO::vec3(tri[1][0], tri[1][1], tri[1][2]), + GEO::vec3(tri[2][0], tri[2][1], tri[2][2])}}; + static thread_local std::vector ps; + ps.clear(); + sampleTriangle(vs, ps, state.sampling_dist); +#if TIMING_BREAKDOWN + breakdown_timing0[id_sampling] += igl_timer0.getElapsedTime(); +#endif + + size_t num_queries = 0; + size_t num_samples = ps.size(); + + //decide in/out +#if TIMING_BREAKDOWN + igl_timer0.start(); +#endif + + GEO::vec3 current_point = ps[0]; + GEO::vec3 nearest_point; + double sq_dist = std::numeric_limits::max(); + GEO::index_t prev_facet = GEO::NO_FACET; + int cnt = 0; + const unsigned int ps_size = ps.size(); + for (unsigned int i = ps_size / 2; i < ps.size(); i = (i + 1) % ps_size) {//check from the middle + GEO::vec3 ¤t_point = ps[i]; + if (prev_facet != GEO::NO_FACET) { + get_point_facet_nearest_point(geo_sf_mesh, current_point, prev_facet, nearest_point, sq_dist); + } + if (sq_dist > state.eps_2) { + geo_sf_tree.facet_in_envelope_with_hint( + current_point, state.eps_2, prev_facet, nearest_point, sq_dist); + } + ++num_queries; + if (sq_dist > state.eps_2) { +#if TIMING_BREAKDOWN + breakdown_timing0[id_aabb] += igl_timer0.getElapsedTime(); +#endif + logger().trace("num_queries {} / {}", num_queries, num_samples); + return true; + } + cnt++; + if (cnt >= ps_size) + break; + } + +#if TIMING_BREAKDOWN + breakdown_timing0[id_aabb] += igl_timer0.getElapsedTime(); +#endif + + logger().trace("num_queries {} / {}", num_queries, num_samples); + return false; +#else + return false; +#endif +} + +bool LocalOperations::isPointOutBoundaryEnvelop(const Point_3f& p) { +#if CHECK_ENVELOP + GEO::vec3 geo_p(p[0], p[1], p[2]); + if (geo_b_tree.squared_distance(geo_p) > state.eps_2) { + return true; + } + return false; +#else + return false; +#endif +} + +bool LocalOperations::isBoundarySlide(int v1_id, int v2_id, Point_3f& old_pf){ + return false; + +#if CHECK_ENVELOP + if(state.is_mesh_closed) + return false; + + std::unordered_set n_v_ids; + for(int t_id:tet_vertices[v1_id].conn_tets){ + for(int j=0;j<4;j++) + if(tets[t_id][j]!=v1_id && tets[t_id][j]!=v2_id && tet_vertices[tets[t_id][j]].is_on_boundary) + n_v_ids.insert(tets[t_id][j]); + } + if(n_v_ids.size()==0) + return false; + +#if TIMING_BREAKDOWN + igl_timer0.start(); +#endif + static thread_local std::vector b_points; + static thread_local std::vector ps; + b_points.clear(); + for(int v_id:n_v_ids) { + if (!isEdgeOnBoundary(v1_id, v_id)) + continue; + //sample the edge (v1, v) and push the sampling points into vector + GEO::vec3 p1(tet_vertices[v1_id].posf[0], tet_vertices[v1_id].posf[1], tet_vertices[v1_id].posf[2]); + GEO::vec3 p2(tet_vertices[v_id].posf[0], tet_vertices[v_id].posf[1], tet_vertices[v_id].posf[2]); + b_points.push_back(p1); + b_points.push_back(p2); + int n = GEO::distance(p1, p2) / state.sampling_dist + 1; + if (n == 1) + continue; + b_points.reserve(b_points.size() + n + 1); + for (int k = 1; k <= n - 1; k++) + b_points.push_back(p1 * ((double) k / (double) n) + p2 * ((double) (n - k) / (double) n)); + } + + //sampling faces + if(v2_id>=0 && tet_vertices[v2_id].is_on_boundary) { + std::vector n12_t_ids; + setIntersection(tet_vertices[v1_id].conn_tets, tet_vertices[v2_id].conn_tets, n12_t_ids); + std::unordered_set n12_v_ids; + for (int t_id:n12_t_ids) { + for (int j = 0; j < 4; j++) + if (tets[t_id][j] != v1_id && tets[t_id][j] != v2_id && tet_vertices[tets[t_id][j]].is_on_boundary) + n12_v_ids.insert(tets[t_id][j]); + } + bool is_12_on_boundary = false; + if(n12_v_ids.size()!=0) { + is_12_on_boundary = isEdgeOnBoundary(v1_id, v2_id); + } + for(int v_id:n12_v_ids) { + if (!isEdgeOnBoundary(v1_id, v_id) || !isEdgeOnBoundary(v2_id, v_id)) + continue; + if (!is_12_on_boundary) { + GEO::vec3 p1(tet_vertices[v1_id].posf[0], tet_vertices[v1_id].posf[1], tet_vertices[v1_id].posf[2]); + GEO::vec3 p2(old_pf[0], old_pf[1], old_pf[2]); + int n = GEO::distance(p1, p2) / state.sampling_dist + 1; + b_points.reserve(b_points.size() + n + 1); + b_points.push_back(p1); + for (int k = 1; k <= n - 1; k++) + b_points.push_back(p1 * ((double) k / (double) n) + p2 * ((double) (n - k) / (double) n)); + b_points.push_back(p2); + } else { + Triangle_3f tri(tet_vertices[v_id].posf, tet_vertices[v2_id].posf, old_pf); + std::array vs = {{GEO::vec3(tri[0][0], tri[0][1], tri[0][2]), + GEO::vec3(tri[1][0], tri[1][1], tri[1][2]), + GEO::vec3(tri[2][0], tri[2][1], tri[2][2])}}; + ps.clear(); + sampleTriangle(vs, ps, state.sampling_dist); + +// sampleTriangle(tri, ps);//CANNOT directly push the sampling points into b_points + + b_points.reserve(b_points.size() + ps.size()); // preallocate memory + b_points.insert(b_points.end(), ps.begin(), ps.end()); + } + } + } +#if TIMING_BREAKDOWN + breakdown_timing0[id_sampling] += igl_timer0.getElapsedTime(); +#endif + if(b_points.size()==0) + return false; + +#if TIMING_BREAKDOWN + igl_timer0.start(); +#endif + GEO::vec3 current_point = b_points[0]; + GEO::vec3 nearest_point; + double sq_dist; + GEO::index_t prev_facet = geo_b_tree.nearest_facet(current_point, nearest_point, sq_dist); + int cnt = 0; + const unsigned int b_points_size = b_points.size(); + for (unsigned int i = b_points_size / 2; ; i = (i + 1) % b_points_size) { + GEO::vec3 ¤t_point = b_points[i]; + sq_dist = current_point.distance2(nearest_point); + geo_b_tree.nearest_facet_with_hint(current_point, prev_facet, nearest_point, sq_dist); + double dis = current_point.distance2(nearest_point); + if (dis > state.eps_2) { +#if TIMING_BREAKDOWN + breakdown_timing0[id_aabb] += igl_timer0.getElapsedTime(); +#endif + return true; + } + cnt++; + if (cnt >= b_points.size()) + break; + } +#if TIMING_BREAKDOWN + breakdown_timing0[id_aabb] += igl_timer0.getElapsedTime(); +#endif + + return false; +#else + return false; +#endif +} + +bool LocalOperations::isTetOnSurface(int t_id){ + for(int i=0;i<4;i++){ + if(is_surface_fs[t_id][i]!=state.NOT_SURFACE) + return false; + } + return true; +} + +bool LocalOperations::isTetRounded(int t_id){ + for(int i=0;i<4;i++){ + if(!tet_vertices[tets[t_id][i]].is_rounded) + return false; + } + return true; +} + +void LocalOperations::getFaceConnTets(int v1_id, int v2_id, int v3_id, std::vector& t_ids){ + std::vector v1, v2, v3, tmp; + v1.reserve(tet_vertices[v1_id].conn_tets.size()); + for(auto it=tet_vertices[v1_id].conn_tets.begin();it!=tet_vertices[v1_id].conn_tets.end();it++) + v1.push_back(*it); + v2.reserve(tet_vertices[v2_id].conn_tets.size()); + for(auto it=tet_vertices[v2_id].conn_tets.begin();it!=tet_vertices[v2_id].conn_tets.end();it++) + v2.push_back(*it); + v3.reserve(tet_vertices[v3_id].conn_tets.size()); + for(auto it=tet_vertices[v3_id].conn_tets.begin();it!=tet_vertices[v3_id].conn_tets.end();it++) + v3.push_back(*it); + + std::sort(v1.begin(), v1.end()); + std::sort(v2.begin(), v2.end()); + std::sort(v3.begin(), v3.end()); + + std::set_intersection(v1.begin(), v1.end(), v2.begin(), v2.end(), std::back_inserter(tmp)); + std::set_intersection(v3.begin(), v3.end(), tmp.begin(), tmp.end(), std::back_inserter(t_ids)); +} + +bool LocalOperations::isIsolated(int v_id) { + for (auto it = tet_vertices[v_id].conn_tets.begin(); it != tet_vertices[v_id].conn_tets.end(); it++) { + for (int j = 0; j < 4; j++) { + if (tets[*it][j] != v_id && is_surface_fs[*it][j] != state.NOT_SURFACE) + return false; + } + } + + return true; +} + +bool LocalOperations::isBoundaryPoint(int v_id) { + if(state.is_mesh_closed) + return false; + std::unordered_set n_v_ids; + for (int t_id:tet_vertices[v_id].conn_tets) { + for (int j = 0; j < 4; j++) + if (tets[t_id][j] != v_id && tet_vertices[tets[t_id][j]].is_on_boundary) + n_v_ids.insert(tets[t_id][j]); + } + for (int n_v_id:n_v_ids) { + if (isEdgeOnBoundary(n_v_id, v_id)) + return true; + } + return false; +} + +void LocalOperations::checkUnrounded() { + bool is_output = false; + for (unsigned int i = 0; i < tet_vertices.size(); i++) { + if (v_is_removed[i]) + continue; + if (!tet_vertices[i].is_rounded) { + is_output = true; + break; + } + } + if(!is_output) + return; + + std::ofstream of; + of.open(state.working_dir + "unrounded_check.txt"); + int cnt_sf = 0; + int cnt_b = 0; + int cnt_all = 0; + int cnt_sf1 = 0; + std::vector diss; + for (unsigned int i = 0; i < tet_vertices.size(); i++) { + if (v_is_removed[i]) + continue; + if (tet_vertices[i].is_rounded) + continue; + + cnt_all++; + + if (tet_vertices[i].is_on_boundary) + cnt_b++; + if (tet_vertices[i].is_on_surface) { + cnt_sf++; + continue; + } + + bool is_found = false; + for (int t_id:tet_vertices[i].conn_tets) { + for (int j = 0; j < 4; j++) { + if (tets[t_id][j] == i) { + if (is_surface_fs[t_id][j] != state.NOT_SURFACE) { + cnt_sf1++; + is_found = true; + } + break; + } + } + if (is_found) + break; + } + if (is_found) + continue; + + GEO::vec3 geo_p(tet_vertices[i].posf[0], tet_vertices[i].posf[1], tet_vertices[i].posf[2]); + double dis = sqrt(geo_sf_tree.squared_distance(geo_p)); + diss.push_back(dis); + } + + of << "# all = " << cnt_all << std::endl; + of << "# surface = " << cnt_sf << std::endl; + of << "# boundary = " << cnt_b << std::endl; +// of<<"Is closed? "< +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 5/6/17. +// + +#ifndef NEW_GTET_LOCALOPERATIONS_H +#define NEW_GTET_LOCALOPERATIONS_H + +#include +#include +#include +#include +#include + +#ifdef TETWILD_WITH_ISPC +#include +#endif + +namespace tetwild { + +enum class EnvelopSide{ + OUTSIDE=0, + INSIDE=1, + UNCERTAIN=2 +}; + +class LocalOperations { +public: + const Args & args; + State & state; + + std::vector& tet_vertices; + std::vector>& tets; + std::vector>& is_surface_fs; + std::vector& v_is_removed; + std::vector& t_is_removed; + std::vector& tet_qualities; + + int energy_type; + + const GEO::Mesh &geo_sf_mesh; + const GEO::MeshFacetsAABBWithEps& geo_sf_tree; + const GEO::MeshFacetsAABBWithEps& geo_b_tree; + + int counter=0; + int suc_counter=0; + + std::array cmp_d_angles = {{6/180.0*M_PI, 12/180.0*M_PI, 18/180.0*M_PI, 162/180.0*M_PI, 168/180.0*M_PI, 174/180.0*M_PI}}; + + LocalOperations(std::vector& t_vs, std::vector>& ts, std::vector>& is_sf_fs, + std::vector& v_is_rm, std::vector& t_is_rm, std::vector& tet_qs, + int e_type, const GEO::Mesh &geo_mesh, const GEO::MeshFacetsAABBWithEps& geo_tree, const GEO::MeshFacetsAABBWithEps& b_t, + const Args &ar, State &st) : + tet_vertices(t_vs), tets(ts), is_surface_fs(is_sf_fs), v_is_removed(v_is_rm), t_is_removed(t_is_rm), + tet_qualities(tet_qs), energy_type(e_type), + geo_sf_mesh(geo_mesh), geo_sf_tree(geo_tree), geo_b_tree(b_t), + args(ar), state(st) + { } + + void check(); + void outputInfo(int op_type, double time, bool is_log = true); + + void calTetQualities(const std::vector>& new_tets, std::vector& tet_qs, bool all_measure = false); + void calTetQualities(const std::vector& t_ids, bool all_measure = false); + + double calEdgeLength(const std::array& v_ids); + double calEdgeLength(int v1_id, int v2_id, bool is_over_refine=false); + void calTetQuality_AD(const std::array& tet, TetQuality& t_quality); + void calTetQuality_AMIPS(const std::array& tet, TetQuality& t_quality); + + bool isFlip(const std::vector>& new_tets); + bool isTetFlip(const std::array& t); + bool isTetFlip(int t_id); + +// void getWorstQuality(TetQuality& tq); + void getAvgMaxEnergy(double& avg_tq, double& max_tq); + double getMaxEnergy(); + double getSecondMaxEnergy(double max_energy); + double getFilterEnergy(bool& is_clean_up); + + void getCheckQuality(const std::vector& tet_qs, TetQuality& tq); + void getCheckQuality(const std::vector& t_ids, TetQuality& tq); + + bool isEdgeOnSurface(int v1_id, int v2_id); + bool isEdgeOnBbox(int v1_id, int v2_id); + bool isEdgeOnSurface(int v1_id, int v2_id, const std::vector& t_ids); + bool isEdgeOnBbox(int v1_id, int v2_id, const std::vector& t_ids); + bool isEdgeOnBoundary(int v1_id, int v2_id); + +// EnvelopSide getUpperLowerBounds(const Triangle_3f& tri); + bool isFaceOutEnvelop(const Triangle_3f& tri); + bool isPointOutEnvelop(const Point_3f& p); + bool isFaceOutEnvelop_sampling(const Triangle_3f& tri); + bool isPointOutBoundaryEnvelop(const Point_3f& p); + bool isBoundarySlide(int v1_id, int v2_id, Point_3f& pf); + + bool isTetOnSurface(int t_id); + bool isTetRounded(int t_id); + void getFaceConnTets(int v1_id, int v2_id, int v3_id, std::vector& t_ids); + bool isIsolated(int v_id); + bool isBoundaryPoint(int v_id); + + static double comformalAMIPSEnergy_new(const double * T); + static void comformalAMIPSJacobian_new(const double * T, double *result_0); + static void comformalAMIPSHessian_new(const double * T, double *result_0); + + igl::Timer igl_timer0; + int id_sampling=0; + int id_aabb=1; + std::array breakdown_timing0; + std::array breakdown_name0={{"Envelop_sampling", "Envelop_AABBtree"}}; + + void checkUnrounded(); + int mid_id=0; + void outputSurfaceColormap(const Eigen::MatrixXd& V_in, const Eigen::MatrixXi& F_in, double old_eps); + + bool isLocked_ui(const std::array& e); + bool isTetLocked_ui(int tid); +}; + +} // namespace tetwild + +#endif //NEW_GTET_LOCALOPERATIONS_H diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/Logger.cpp b/contrib/NeRF-Editing/TetWild/src/tetwild/Logger.cpp new file mode 100644 index 00000000..83888cb0 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/Logger.cpp @@ -0,0 +1,51 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Jeremie Dumas +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Jeremie Dumas on 09/04/18. +// + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace tetwild { + +std::shared_ptr Logger::logger_; + +// Some code was copied over from +void Logger::init(bool use_cout, const std::string &filename, bool truncate) { + std::vector sinks; + if (use_cout) { + sinks.emplace_back(std::make_shared()); + } + if (!filename.empty()) { + sinks.emplace_back(std::make_shared(filename, truncate)); + } + + auto ®istry_inst = spdlog::details::registry::instance(); + + // create global thread pool if not already exists.. + std::lock_guard tp_lock(registry_inst.tp_mutex()); + auto tp = registry_inst.get_tp(); + if (tp == nullptr) { + tp = std::make_shared(spdlog::details::default_async_q_size, 1); + registry_inst.set_tp(tp); + } + + logger_ = std::make_shared("tetwild", sinks.begin(), sinks.end(), std::move(tp), spdlog::async_overflow_policy::block); + registry_inst.register_and_init(logger_); +} + +} // namespace tetwild diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/MeshConformer.cpp b/contrib/NeRF-Editing/TetWild/src/tetwild/MeshConformer.cpp new file mode 100644 index 00000000..cbdb2286 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/MeshConformer.cpp @@ -0,0 +1,385 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 3/29/17. +// + +#include +#include + +namespace tetwild { + +void MeshConformer::match() { + is_matched.assign(m_faces.size(), false); + matchDivFaces(); + return; +} + +void MeshConformer::matchVertexIndices(int x, const std::vector>& seed_v_list, + std::vector& f_list) { + int seed_id; + int l = 0, r = seed_v_list.size() - 1; + while (l < r) { + int mid = (l + r) / 2; + if (seed_v_list[mid][0] < x) + l = mid + 1; + else + r = mid; + } + if (r >= l && seed_v_list[l][0]==x) { + f_list.push_back(seed_v_list[l][1]); + int s = l - 1; + while (s >= 0) { + if (seed_v_list[s][0] == x) + f_list.push_back(seed_v_list[s][1]); + else + break; + s--; + } + s = l + 1; + while (s < seed_v_list.size()) { + if (seed_v_list[s][0] == x) + f_list.push_back(seed_v_list[s][1]); + else + break; + s++; + } + } +} + + +void MeshConformer::matchDivFaces() { + std::vector> seed_v_list; + seed_v_list.reserve(bsp_faces.size() * 3); + for (int i = 0; i < bsp_faces.size(); i++) { + for (int j = 0; j < 3; j++) + seed_v_list.push_back(std::array({{bsp_faces[i].vertices[j], i}})); + } + std::sort(seed_v_list.begin(), seed_v_list.end(), [](const std::array &a, const std::array &b) { + return a[0] < b[0];//todo: there should be a O(n) algorithm + }); + + const int m_faces_size = m_faces.size(); + for (int i = 0; i < m_faces_size; i++) { + int m_f_id = i; + std::array tri1 = {{m_vertices[m_faces[i][0]], m_vertices[m_faces[i][1]], m_vertices[m_faces[i][2]]}}; + + ///find seed info + std::unordered_set seed_fids, seed_nids; + std::vector> f_lists; + is_matched[i] = true; + for (int j = 0; j < 3; j++) { + std::vector f_list; + matchVertexIndices(m_faces[i][j], seed_v_list, f_list); + if (f_list.size() == 0) + is_matched[i] = false; + for (int k = 0; k < f_list.size(); k++) + seed_fids.insert(f_list[k]); + if (is_matched[i])//possibly matched + f_lists.push_back(f_list); + } + if (is_matched[i]) { + is_matched[i] = false; + for (int j = 0; j < f_lists.size(); j++) + std::sort(f_lists[j].begin(), f_lists[j].end()); + std::vector tmp, tmp1; + std::set_intersection(f_lists[0].begin(), f_lists[0].end(), f_lists[1].begin(), f_lists[1].end(), + std::back_inserter(tmp)); + std::set_intersection(tmp.begin(), tmp.end(), f_lists[2].begin(), f_lists[2].end(), + std::back_inserter(tmp1));//todo: find a better way to cal intersection + //todo: you can use unordered_set and set a *good* hash function to find identical triangles on bsp tree + if (tmp1.size() == 1) { + std::array tmp_bsp = {{bsp_faces[tmp1[0]].vertices[0], bsp_faces[tmp1[0]].vertices[1], + bsp_faces[tmp1[0]].vertices[2]}}; + std::array tmp_m = m_faces[i]; + std::sort(tmp_bsp.begin(), tmp_bsp.end()); + std::sort(tmp_m.begin(), tmp_m.end()); + if (tmp_bsp == tmp_m) { + is_matched[i] = true; + bsp_faces[tmp1[0]].matched_f_id = i; + continue; + } + } + } + for (auto it = seed_fids.begin(); it != seed_fids.end(); it++) + for (auto jt = bsp_faces[*it].conn_nodes.begin(); jt != bsp_faces[*it].conn_nodes.end(); jt++) + seed_nids.insert(*jt); + + for (auto it = seed_fids.begin(); it != seed_fids.end(); it++) { + ////cal intersection type + std::array tri2 = {{bsp_vertices[bsp_faces[*it].vertices[0]], + bsp_vertices[bsp_faces[*it].vertices[1]], + bsp_vertices[bsp_faces[*it].vertices[2]]}}; + int int_type = triangleIntersection3d(tri1, tri2, true); + if (int_type == COPLANAR_INT) { + bsp_faces[*it].div_faces.insert(i); + } else if (int_type == CROSS_INT) { + for (auto nit = bsp_faces[*it].conn_nodes.begin(); nit != bsp_faces[*it].conn_nodes.end(); nit++) + bsp_nodes[*nit].div_faces.insert(i); + } + } + + ///dfs all the info + std::unordered_set new_fids; + std::unordered_set new_nids = seed_nids; + while (true) { + new_fids.clear(); + for (auto it = new_nids.begin(); it != new_nids.end(); it++) { + for (int j = 0; j < bsp_nodes[*it].faces.size(); j++) { + int bsp_f_id = bsp_nodes[*it].faces[j]; + auto fit = std::find(seed_fids.begin(), seed_fids.end(), bsp_f_id); + if (fit == seed_fids.end()) { + ///check if the plane-coplanar or plane-crossing + ///check if intersecting (coplanar -> do_intersection / crossing -> sort 4 interseting points) + ///if intersected -> insert into new_fids + /////if coplanar-intersecting -> divface for face + /////else if crossing-intersecting -> divface for node + std::array tri2 = {{bsp_vertices[bsp_faces[bsp_f_id].vertices[0]], + bsp_vertices[bsp_faces[bsp_f_id].vertices[1]], + bsp_vertices[bsp_faces[bsp_f_id].vertices[2]]}}; + int int_type = triangleIntersection3d(tri1, tri2, false); + if (int_type != NONE_INT) + new_fids.insert(bsp_f_id); + + if (int_type == COPLANAR_INT) { + bsp_faces[bsp_f_id].div_faces.insert(i); + } else if (int_type == CROSS_INT) { + for (auto nit = bsp_faces[bsp_f_id].conn_nodes.begin(); + nit != bsp_faces[bsp_f_id].conn_nodes.end(); nit++) + bsp_nodes[*nit].div_faces.insert(i); + } + } + } + } + + if (new_fids.size() == 0) + break; + + new_nids.clear(); + for (auto it = new_fids.begin(); it != new_fids.end(); it++) { + for (auto jt = bsp_faces[*it].conn_nodes.begin(); jt != bsp_faces[*it].conn_nodes.end(); jt++) { + auto nid = std::find(seed_nids.begin(), seed_nids.end(), *jt); + if (nid == seed_nids.end()) { + new_nids.insert(*jt); + } + } + } + if (new_nids.size() == 0) + break; + + seed_fids.insert(new_fids.begin(), new_fids.end()); + seed_nids.insert(new_nids.begin(), new_nids.end());//c++11 + } + } + logger().debug("{} faces matched!", std::count(is_matched.begin(), is_matched.end(), true)); +} + +void MeshConformer::getOrientedVertices(int bsp_f_id){ + if(bsp_faces[bsp_f_id].vertices.size()==3) + return; + + std::vector vertices; + int begin=bsp_edges[bsp_faces[bsp_f_id].edges[0]].vertices[0]; + int end=bsp_edges[bsp_faces[bsp_f_id].edges[0]].vertices[1]; + vertices.push_back(begin); + + std::vector is_visited(bsp_faces[bsp_f_id].edges.size(), false); + is_visited[0]=true; + for(int i=0;i& tri, const Plane_3& pln, + std::vector& pos_vs, std::vector& neg_vs, std::vector& on_vs) { + for (int i = 0; i < 3; i++) { + CGAL::Oriented_side side = pln.oriented_side(tri[i]); + if (side == CGAL::ON_POSITIVE_SIDE) { + pos_vs.push_back(tri[i]); + } else if (side == CGAL::ON_NEGATIVE_SIDE) { + neg_vs.push_back(tri[i]); + } else { + on_vs.push_back(tri[i]); + } + } +} + +int MeshConformer::triangleIntersection3d(const std::array& tri1, const std::array& tri2, + bool intersect_known) { + if (!intersect_known) { + Triangle_3 t1(tri1[0], tri1[1], tri1[2]); + Triangle_3 t2(tri2[0], tri2[1], tri2[2]); + if (!do_intersect(t1, t2)) + return NONE_INT; + } + + Plane_3 pln1(tri1[0], tri1[1], tri1[2]); + Plane_3 pln2(tri2[0], tri2[1], tri2[2]); + + std::vector pos_vs1, neg_vs1, on_vs1; + triangleSideofPlane(tri1, pln2, pos_vs1, neg_vs1, on_vs1); + + ///coplanar + if (on_vs1.size() == 3) { +// if (!intersect_known) { +// Triangle_3 t1(tri1[0], tri1[1], tri1[2]); +// Triangle_3 t2(tri2[0], tri2[1], tri2[2]); +// if (do_intersect(t1, t2)) +// return COPLANAR_INT; +// else +// return NONE_INT; +// } else + return COPLANAR_INT; + } + + ///on one side + if (pos_vs1.size() == 0 || neg_vs1.size() == 0) { +// if(intersect_known) + return POINT_INT; +// +// Triangle_3 t1(tri1[0], tri1[1], tri1[2]); +// Triangle_3 t2(tri2[0], tri2[1], tri2[2]); +// if (do_intersect(t1, t2)) +// return POINT_INT; +// else +// return NONE_INT; + } + + ///cross + std::vector pos_vs2, neg_vs2, on_vs2; + triangleSideofPlane(tri2, pln1, pos_vs2, neg_vs2, on_vs2); + if (pos_vs2.size() == 0 || neg_vs2.size() == 0) { +// if(intersect_known) + return POINT_INT; +// +// Triangle_3 t1(tri1[0], tri1[1], tri1[2]); +// Triangle_3 t2(tri2[0], tri2[1], tri2[2]); +// if (do_intersect(t1, t2)) +// return POINT_INT; +// else +// return NONE_INT; + } + + std::vector> sorted_vs; + ///cal intersecting points for tri1 + for (int i = 0; i < pos_vs1.size(); i++) { + for (int j = 0; j < neg_vs1.size(); j++) { + Segment_3 seg1(pos_vs1[i], neg_vs1[j]); + auto result = intersection(seg1, pln2); + assert(!(!result)); + if (result) { + if (const Point_3 *p = boost::get(&*result)) + on_vs1.push_back(*p); + else + throw TetWildError("MeshConformer::triangleIntersection3d"); + } + } + } + assert(!(on_vs1.size() != 2)); + for (int i = 0; i < 2; i++) + sorted_vs.push_back(std::make_pair(on_vs1[i], 1)); + + ///cal intersecting points for tri2 + for (int i = 0; i < pos_vs2.size(); i++) { + for (int j = 0; j < neg_vs2.size(); j++) { + Segment_3 seg2(pos_vs2[i], neg_vs2[j]); + auto result = intersection(seg2, pln1); + assert(!(!result)); + if (result) { + if (const Point_3 *p = boost::get(&*result)) + on_vs2.push_back(*p); + else + throw TetWildError("MeshConformer::triangleIntersection3d"); + } + } + } + assert(!(on_vs2.size() != 2)); + for (int i = 0; i < 2; i++) + sorted_vs.push_back(std::make_pair(on_vs2[i], 2)); + std::sort(sorted_vs.begin(), sorted_vs.end(), [](const std::pair &p1, + const std::pair &p2) { + return p1.first < p2.first; + }); + +// if (!intersect_known) { +// if (sorted_vs[1].first != sorted_vs[2].first){ +// if(sorted_vs[0].second != sorted_vs[1].second) +// return CROSS_INT; +// else +// return NONE_INT; +// } +// else +// return POINT_INT; +// } else { + if (sorted_vs[1].first != sorted_vs[2].first) + return CROSS_INT; + else + return POINT_INT; +// } +} + +void MeshConformer::initT(const Vector_3& nv) { + std::vector vs {Vector_3(1, 0, 0), Vector_3(0, 1, 0), Vector_3(0, 0, 1)}; + std::vector is_ppd(3, false); + + for (int i = 0; i < 3; i++) { + if (nv * vs[i] == 0) + is_ppd[i] = true; + } + int i = std::find(is_ppd.begin(), is_ppd.end(), false) - is_ppd.begin(); + t = i; +} + +Point_2 MeshConformer::to2d(const Point_3& p){ + int x=(t+1)%3; + int y=(t+2)%3; + return Point_2(p[x], p[y]); +} + +Point_3 MeshConformer::to3d(const Point_2& p, const Plane_3& pln) { + Line_3 l; + switch (t) { + case 0: + l = Line_3(Point_3(0, p[0], p[1]), Direction_3(1, 0, 0)); + break; + case 1: + l = Line_3(Point_3(p[1], 0, p[0]), Direction_3(0, 1, 0)); + break; + case 2: + l = Line_3(Point_3(p[0], p[1], 0), Direction_3(0, 0, 1)); + } + + auto result = intersection(l, pln); + if (result) { + const Point_3 *p = boost::get(&*result); + return *p; + } else { + log_and_throw("error to3d!"); + } +} + +} // namespace tetwild diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/MeshConformer.h b/contrib/NeRF-Editing/TetWild/src/tetwild/MeshConformer.h new file mode 100644 index 00000000..5a09f7fd --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/MeshConformer.h @@ -0,0 +1,55 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 3/29/17. +// + +#ifndef GTET_MESHCONFORMER_H +#define GTET_MESHCONFORMER_H + +#include +#include + +namespace tetwild { + +class MeshConformer { +public: + const std::vector &m_vertices; + const std::vector> &m_faces; + std::vector is_matched; + + std::vector& bsp_vertices; + std::vector& bsp_edges; + std::vector& bsp_faces; + std::vector& bsp_nodes; + + MeshConformer(const std::vector &m_vs, const std::vector> &m_fs, std::vector& bsp_vs, + std::vector& bsp_es, std::vector& bsp_fs, std::vector& bsp_ns) : + m_vertices(m_vs), m_faces(m_fs), bsp_vertices(bsp_vs), bsp_edges(bsp_es), bsp_faces(bsp_fs), bsp_nodes(bsp_ns){} + + void match(); + void matchVertexIndices(int x, const std::vector>& seed_v_list, std::vector& f_list); + void matchDivFaces(); + void getOrientedVertices(int bsp_f_id); + + const int COPLANAR_INT=0; + const int CROSS_INT=1; + const int POINT_INT=2; + const int NONE_INT=3; + int triangleIntersection3d(const std::array& tri1, const std::array& tri2, + bool intersect_known=true); + + int t = 0; + void initT(const Vector_3 &nv); + Point_2 to2d(const Point_3 &p); + Point_3 to3d(const Point_2 &p, const Plane_3 &pln); +}; + +} // namespace tetwild + +#endif //GTET_MESHCONFORMER_H diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/MeshRefinement.cpp b/contrib/NeRF-Editing/TetWild/src/tetwild/MeshRefinement.cpp new file mode 100644 index 00000000..12c5f1db --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/MeshRefinement.cpp @@ -0,0 +1,1410 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 4/11/17. +// + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace tetwild { + +void MeshRefinement::prepareData(bool is_init) { + igl_timer.start(); + if (is_init) { + t_is_removed = std::vector(tets.size(), false);//have to + v_is_removed = std::vector(tet_vertices.size(), false); + for (int i = 0; i < tet_vertices.size(); i++) { + if (tet_vertices[i].is_rounded) + continue; + tet_vertices[i].round(); + } + round(); + } + + GEO::Mesh simple_mesh; + getSimpleMesh(simple_mesh); + GEO::MeshFacetsAABBWithEps simple_tree(simple_mesh); + LocalOperations localOperation(tet_vertices, tets, is_surface_fs, v_is_removed, t_is_removed, tet_qualities, + state.ENERGY_AMIPS, simple_mesh, simple_tree, simple_tree, args, state); + localOperation.calTetQualities(tets, tet_qualities, true);//cal all measure + double tmp_time = igl_timer.getElapsedTime(); + logger().debug("{}s", tmp_time); + localOperation.outputInfo(MeshRecord::OpType::OP_OPT_INIT, tmp_time); +} + +void MeshRefinement::round() { + int cnt = 0; + int sub_cnt = 0; + for (int i = 0; i < tet_vertices.size(); i++) { + if (v_is_removed[i]) + continue; + if (tet_vertices[i].is_rounded) + continue; + tet_vertices[i].is_rounded = true; + Point_3 old_p = tet_vertices[i].pos; + tet_vertices[i].pos = Point_3(tet_vertices[i].posf[0], tet_vertices[i].posf[1], tet_vertices[i].posf[2]); + + for (auto it = tet_vertices[i].conn_tets.begin(); it != tet_vertices[i].conn_tets.end(); it++) { + CGAL::Orientation ori; +// bool is_rounded = true; +// for (int j = 0; j < 4; j++) +// if (!tet_vertices[tets[*it][j]].is_rounded) { +// is_rounded = false; +// break; +// } +// if (is_rounded) +// ori = CGAL::orientation(tet_vertices[tets[*it][0]].posf, tet_vertices[tets[*it][1]].posf, +// tet_vertices[tets[*it][2]].posf, tet_vertices[tets[*it][3]].posf); +// else + ori = CGAL::orientation(tet_vertices[tets[*it][0]].pos, tet_vertices[tets[*it][1]].pos, + tet_vertices[tets[*it][2]].pos, tet_vertices[tets[*it][3]].pos); + + if (ori != CGAL::POSITIVE) { + tet_vertices[i].is_rounded = false; + break; + } + } + if (!tet_vertices[i].is_rounded) + tet_vertices[i].pos = old_p; + else { + cnt++; + sub_cnt++; + } + } + logger().debug("round: {}({})", cnt, tet_vertices.size()); + + //for check +// for (int i = 0; i < tets.size(); i++) { +// if (t_is_removed[i]) +// continue; +// CGAL::Orientation ori = CGAL::orientation(tet_vertices[tets[i][0]].pos, tet_vertices[tets[i][1]].pos, +// tet_vertices[tets[i][2]].pos, tet_vertices[tets[i][3]].pos); +// if (ori != CGAL::POSITIVE) { +// logger().debug("round hehe"); +// } +// } +} + +void MeshRefinement::clear() { + tet_vertices.clear(); + tets.clear(); + + t_is_removed.clear(); + v_is_removed.clear(); + is_surface_fs.clear(); + tet_qualities.clear(); +} + +int MeshRefinement::doOperations(EdgeSplitter& splitter, EdgeCollapser& collapser, EdgeRemover& edge_remover, + VertexSmoother& smoother, const std::array& ops){ + int cnt0=0; + for(int i=0;i& ops) +{ + double avg_energy, max_energy; + splitter.getAvgMaxEnergy(avg_energy, max_energy); + + int loop_cnt = 0; + for (int i = 0; i < max_pass; i++) { + doOperations(splitter, collapser, edge_remover, smoother, ops); + loop_cnt++; + + double tmp_avg_energy, tmp_max_energy; + splitter.getAvgMaxEnergy(tmp_avg_energy, tmp_max_energy); + if (std::abs(tmp_avg_energy - avg_energy) < args.delta_energy_thres + && std::abs(tmp_max_energy - max_energy) < args.delta_energy_thres) + break; + avg_energy = tmp_avg_energy; + max_energy = tmp_max_energy; + } + + return loop_cnt; +} + +void MeshRefinement::refine(int energy_type, const std::array& ops, bool is_pre, bool is_post, int scalar_update) { + GEO::MeshFacetsAABBWithEps geo_sf_tree(geo_sf_mesh); + if (geo_b_mesh.vertices.nb() == 0) { + getSimpleMesh(geo_b_mesh);//for constructing aabb tree, the mesh cannot be empty + } + GEO::MeshFacetsAABBWithEps geo_b_tree(geo_b_mesh); + + if (is_dealing_unrounded) + min_adaptive_scale = state.eps / state.initial_edge_len * 0.5; //min to eps/2 + else +// min_adaptive_scale = state.eps_input / state.initial_edge_len; // state.eps_input / state.initial_edge_len * 0.5 is too small + min_adaptive_scale = (state.bbox_diag / 1000) / state.initial_edge_len; // set min_edge_length to diag / 1000 would be better + + LocalOperations localOperation(tet_vertices, tets, is_surface_fs, v_is_removed, t_is_removed, tet_qualities, + energy_type, geo_sf_mesh, geo_sf_tree, geo_b_tree, args, state); + EdgeSplitter splitter(localOperation, state.initial_edge_len * (4.0 / 3.0) * state.initial_edge_len * (4.0 / 3.0)); + EdgeCollapser collapser(localOperation, state.initial_edge_len * (4.0 / 5.0) * state.initial_edge_len * (4.0 / 5.0)); + EdgeRemover edge_remover(localOperation, state.initial_edge_len * (4.0 / 3.0) * state.initial_edge_len * (4.0 / 3.0)); + VertexSmoother smoother(localOperation); + + collapser.is_check_quality = true; + + if (args.save_mid_result == 1) + outputMidResult(false, 1); + +// double old_state.eps = state.eps; +// state.eps = 0.5 * old_state.eps; +// state.eps_2 = state.eps * state.eps; + + if (is_pre) + refine_pre(splitter, collapser, edge_remover, smoother); + + /// apply the local operations + if (is_dealing_unrounded) { + for (int i = 0; i < tet_vertices.size(); i++) { + if (v_is_removed[i] || tet_vertices[i].is_rounded) + continue; + smoother.outputOneRing(i, ""); + } + } + + double avg_energy0, max_energy0; + localOperation.getAvgMaxEnergy(avg_energy0, max_energy0); + double target_energy0 = 1e6; + int update_buget = 2; + int update_cnt = 0; + int is_output = true; +// const double eps_s = 0.8; +// state.eps *= eps_s; +// state.eps_2 *= eps_s*eps_s; + bool is_split = true; + for (int pass = old_pass; pass < old_pass + args.max_num_passes; pass++) { + if (is_dealing_unrounded && pass == old_pass) { + updateScalarField(false, false, args.filter_energy_thres); + } + + logger().info("//////////////// Pass {} ////////////////", pass); + if (is_dealing_unrounded) + collapser.is_limit_length = false; + doOperations(splitter, collapser, edge_remover, smoother, + std::array({{is_split, ops[1], ops[2], ops[3]}})); + update_cnt++; + + if (is_dealing_unrounded) { + bool is_finished = true; + for (int i = 0; i < tet_vertices.size(); i++) { + if (v_is_removed[i]) + continue; + if (!tet_vertices[i].is_rounded) + is_finished = false; + } + if (is_finished) { + logger().debug("all vertices rounded!!"); +// break; + } + } + + if (localOperation.getMaxEnergy() < args.filter_energy_thres) + break; + + //check and mark is_bad_element + double avg_energy, max_energy; + localOperation.getAvgMaxEnergy(avg_energy, max_energy); + if (pass > 0 && pass < old_pass + args.max_num_passes - 1 + && avg_energy0 - avg_energy < args.delta_energy_thres && max_energy0 - max_energy < args.delta_energy_thres) { + +// if (args.target_num_vertices > 0 && getInsideVertexSize() > 1.05 * args.target_num_vertices && isRegionFullyRounded()) { +// if (state.sub_stage < args.stage) { +// state.eps += state.eps_delta; +// state.eps_2 = state.eps * state.eps; +// state.sub_stage++; +// avg_energy0 = avg_energy; +// max_energy0 = max_energy; +// continue; +// } else { +// is_split = false; +// continue; +//// break; +// } +// } +// is_split = true; + + if (update_cnt == 1) { + if (is_hit_min) { + update_buget--; + } else + continue; + } + if (update_buget == 0) { + if (state.sub_stage > 1 && state.sub_stage < args.stage) { + state.eps += state.eps_delta; + state.eps_2 = state.eps * state.eps; + state.sub_stage++; + update_buget = 2; +// logger().debug("[[[[[[[[[[[[[[UPDATE EPSILON {}]]]]]]]]]]]]]]]]", state.eps); + } else + break; + } + update_cnt = 0; + +// if(is_start_adaptive && !isRegionFullyRounded()) { +// if(is_hit_min && !isRegionFullyRounded()) { +// refine_unrounded(splitter, collapser, edge_remover, smoother); +// } + + //get target energy + double target_energy = localOperation.getMaxEnergy() / 100; + target_energy = std::min(target_energy, target_energy0 / 10); + target_energy = std::max(target_energy, args.filter_energy_thres * 0.8); + target_energy0 = target_energy; + updateScalarField(false, false, target_energy); + + if (state.sub_stage == 1 && state.sub_stage < args.stage + && target_energy < args.filter_energy_thres) { + state.eps += state.eps_delta; + state.eps_2 = state.eps * state.eps; + state.sub_stage++; +// logger().debug("[[[[[[[[[[[[[[UPDATE EPSILON {}]]]]]]]]]]]]]]]]", state.eps); + } + + if (is_output && args.save_mid_result == 1) { + outputMidResult(false, 1.5); + is_output = false; + } + +// collapser.is_soft = true; + +// if(is_hit_min) { +// Eigen::MatrixXd V_tmp; +// Eigen::MatrixXi F_tmp; +// getSurface(V_tmp, F_tmp); +// localOperation.outputSurfaceColormap(V_tmp, F_tmp, old_state.eps); +// } + } + avg_energy0 = avg_energy; + max_energy0 = max_energy; + } + + old_pass = old_pass + args.max_num_passes; + +// if (!isRegionFullyRounded()) { +// refine_unrounded(splitter, collapser, edge_remover, smoother); +// } +// if (max_energy0 > 1e3) { +// refine_local(splitter, collapser, edge_remover, smoother, args.filter_energy_thres); +// } + +// if (!isRegionFullyRounded() || max_energy0 > 1e3) +// serialization(state.working_dir + state.postfix_str + ".slz"); + + if (!args.is_quiet) { + double max_e = localOperation.getMaxEnergy(); + if (max_e > 100) { + bool is_print = false; + std::ofstream f; + f.open(state.working_dir + args.postfix + ".tmp"); + for (int i = 0; i < tet_qualities.size(); i++) { + if (t_is_removed[i]) + continue; + if (tet_qualities[i].slim_energy > max_e * 0.9) { + is_print = true; + f << "tet " << i << ": energy = " << tet_qualities[i].slim_energy << "; "; + std::array l; + for (int j = 0; j < 3; j++) { + l[j * 2] = CGAL::squared_distance(tet_vertices[tets[i][0]].posf, + tet_vertices[tets[i][j + 1]].posf); + l[j * 2 + 1] = CGAL::squared_distance(tet_vertices[tets[i][j + 1]].posf, + tet_vertices[tets[i][(j + 1) % 3 + 1]].posf); + } + auto it = std::min_element(l.begin(), l.end()); + f << "min_el = " << std::sqrt(*it) << "; "; + int n = it - l.begin(); + int v1_id, v2_id; + if (n % 2 == 0) { + v1_id = 0; + v2_id = n / 2 + 1; + } else { + v1_id = (n - 1) / 2 + 1; + v2_id = ((n - 1) / 2 + 1) % 3 + 1; + } + f << "v1 " << tets[i][v1_id] << " " << tet_vertices[tets[i][v1_id]].is_on_surface << " " + << tet_vertices[tets[i][v1_id]].is_on_boundary << " " + << localOperation.isPointOutEnvelop(tet_vertices[tets[i][v1_id]].posf) << " " + << localOperation.isPointOutBoundaryEnvelop(tet_vertices[tets[i][v1_id]].posf) << "; " + + << "v2 " << tets[i][v2_id] << " " << tet_vertices[tets[i][v2_id]].is_on_surface << " " + << tet_vertices[tets[i][v2_id]].is_on_boundary << " " + << localOperation.isPointOutEnvelop(tet_vertices[tets[i][v2_id]].posf) << " " + << localOperation.isPointOutBoundaryEnvelop(tet_vertices[tets[i][v2_id]].posf) << std::endl; + } + } + if (is_print) + f << state.eps << std::endl; + f.close(); + } + } + + if (is_post) { + if (args.target_num_vertices > 0) { + double n = getInsideVertexSize(); + if (n > args.target_num_vertices) { + collapser.is_limit_length = false; + collapser.is_soft = true; + collapser.soft_energy = localOperation.getMaxEnergy(); + collapser.budget = + (n - args.target_num_vertices) * std::count(v_is_removed.begin(), v_is_removed.end(), false) / n * + 1.5; + } + } + refine_post(splitter, collapser, edge_remover, smoother); + } + + + if (args.target_num_vertices > 0) + applyTargetedVertexNum(splitter, collapser, edge_remover, smoother); + + if (args.background_mesh != "") { + applySizingField(splitter, collapser, edge_remover, smoother); + } + + if (args.save_mid_result == 2) + outputMidResult(true, 2);//mark in/out + + +// if (!args.is_quiet) { +//// Eigen::MatrixXd V_tmp; +//// Eigen::MatrixXi F_tmp; +//// getTrackedSurface(V_tmp, F_tmp); +//// igl::writeOBJ(state.g_working_dir + state.postfix + "_tracked_sf1.obj", V_tmp, F_tmp); +//// getSurface(V_tmp, F_tmp); +//// igl::writeOBJ(state.g_working_dir + state.postfix + "_tracked_sf2.obj", V_tmp, F_tmp); +////// localOperation.outputSurfaceColormap(V_tmp, F_tmp, state.g_eps_input);//compared with user input eps +//// localOperation.checkUnrounded(); +// } + + if (args.smooth_open_boundary) + postProcess(smoother); +} + +void MeshRefinement::refine_pre(EdgeSplitter& splitter, EdgeCollapser& collapser, EdgeRemover& edge_remover, + VertexSmoother& smoother){ + logger().info("////////////////// Pre-processing //////////////////"); + collapser.is_limit_length = false; + doOperations(splitter, collapser, edge_remover, smoother, std::array{{false, true, false, false}}); + collapser.is_limit_length = true; +} + +void MeshRefinement::refine_post(EdgeSplitter& splitter, EdgeCollapser& collapser, EdgeRemover& edge_remover, + VertexSmoother& smoother){ + logger().info("////////////////// Post-processing //////////////////"); + collapser.is_limit_length = true; + for (int i = 0; i < tet_vertices.size(); i++) { + tet_vertices[i].adaptive_scale = 1; + } + + doOperations(splitter, collapser, edge_remover, smoother, std::array{{false, true, false, false}}); +} + +void MeshRefinement::refine_local(EdgeSplitter& splitter, EdgeCollapser& collapser, EdgeRemover& edge_remover, + VertexSmoother& smoother, double target_energy) { + EdgeSplitter &localOperation = splitter; + double old_min_adaptive_scale = min_adaptive_scale; + min_adaptive_scale = state.eps / state.initial_edge_len * 0.5; + + double avg_energy0, max_energy0; + localOperation.getAvgMaxEnergy(avg_energy0, max_energy0); + if(target_energy<0) { + target_energy = max_energy0 / 100; + target_energy = std::max(target_energy, args.filter_energy_thres); + } + updateScalarField(false, true, target_energy * 0.8, true); + for (int pass = 0; pass < 20; pass++) { + logger().info("////////////////// Local Pass {} //////////////////", pass); + doOperations(splitter, collapser, edge_remover, smoother); + + double avg_energy, max_energy; + localOperation.getAvgMaxEnergy(avg_energy, max_energy); + if (max_energy < target_energy) + break; + avg_energy0 = avg_energy; + max_energy0 = max_energy; + + if (pass > 0 && pass < args.max_num_passes - 1 + && avg_energy0 - avg_energy < args.delta_energy_thres && max_energy - max_energy0 < args.delta_energy_thres) { + updateScalarField(false, true, target_energy); + } + } + min_adaptive_scale = old_min_adaptive_scale; + refine_revert(splitter, collapser, edge_remover, smoother); + + for(int i=0;i 0 && pass % scalar_update == scalar_update - 1 && pass < args.max_num_passes * scalar_update - 1) { + updateScalarField(true, false, -1); + } + } + collapser.is_limit_length = true; + min_adaptive_scale = old_min_adaptive_scale; + refine_revert(splitter, collapser, edge_remover, smoother); + + for(int i=0;i({{false, true, true, true}})); +// doOperations(splitter, collapser, edge_remover, smoother); + + int n_v = std::count(v_is_removed.begin(), v_is_removed.end(), false); + if (n_v0 - n_v < 1) //when number of vertices becomes stable + break; + n_v0 = n_v; + } + + collapser.is_limit_length = true; + collapser.is_soft = false; +} + +int MeshRefinement::getInsideVertexSize(){ + std::vector tmp_t_is_removed; + markInOut(tmp_t_is_removed); + std::unordered_set inside_vs; + for (int i = 0; i < tets.size(); i++) { + if (tmp_t_is_removed[i]) + continue; + for (int j = 0; j < 4; j++) + inside_vs.insert(tets[i][j]); + } + return inside_vs.size(); +} + +void MeshRefinement::markInOut(std::vector& tmp_t_is_removed){ + tmp_t_is_removed = t_is_removed; + Eigen::MatrixXd C(std::count(tmp_t_is_removed.begin(), tmp_t_is_removed.end(), false), 3); + int cnt = 0; + for (int i = 0; i < tets.size(); i++) { + if (tmp_t_is_removed[i]) + continue; + std::vector vs; + vs.reserve(4); + for (int j = 0; j < 4; j++) + vs.push_back(tet_vertices[tets[i][j]].posf); + Point_3f p = CGAL::centroid(vs.begin(), vs.end(), CGAL::Dimension_tag<0>()); + for (int j = 0; j < 3; j++) + C(cnt, j) = p[j]; + cnt++; + } + + Eigen::MatrixXd V; + Eigen::MatrixXi F; + getSurface(V, F); + Eigen::VectorXd W; + logger().debug("winding number..."); + igl::winding_number(V, F, C, W); + logger().debug("winding number done"); + + cnt = 0; + for (int i = 0; i < tets.size(); i++) { + if (tmp_t_is_removed[i]) + continue; + tmp_t_is_removed[i] = !(W(cnt) > 0.5); + cnt++; + } +} + +void MeshRefinement::applySizingField(EdgeSplitter& splitter, EdgeCollapser& collapser, EdgeRemover& edge_remover, + VertexSmoother& smoother) { + PyMesh::MshLoader mshLoader(args.background_mesh); + Eigen::VectorXd V_in = mshLoader.get_nodes(); + Eigen::VectorXi T_in = mshLoader.get_elements(); + Eigen::VectorXd values = mshLoader.get_node_field("values"); + if (V_in.rows() == 0 || T_in.rows() == 0 || values.rows() == 0) + return; + + logger().debug("Applying sizing field..."); + + GEO::Mesh bg_mesh; + bg_mesh.vertices.clear(); + bg_mesh.vertices.create_vertices((int) V_in.rows() / 3); + for (int i = 0; i < V_in.rows() / 3; i++) { + GEO::vec3 &p = bg_mesh.vertices.point(i); + for (int j = 0; j < 3; j++) + p[j] = V_in(i * 3 + j); + } + bg_mesh.cells.clear(); + bg_mesh.cells.create_tets((int) T_in.rows() / 4); + for (int i = 0; i < T_in.rows() / 4; i++) { + for (int j = 0; j < 4; j++) + bg_mesh.cells.set_vertex(i, j, T_in(i * 4 + j)); + } + + // background_mesh.cells.compute_borders(); + // background_mesh.cells.connect(); + + GEO::MeshCellsAABB bg_aabb(bg_mesh, false); + for (int i = 0; i < tet_vertices.size(); i++) { + if (v_is_removed[i]) + continue; + GEO::vec3 p(tet_vertices[i].posf[0], tet_vertices[i].posf[1], tet_vertices[i].posf[2]); + int bg_t_id = bg_aabb.containing_tet(p); + if (bg_t_id == GEO::MeshCellsAABB::NO_TET) + continue; + + //compute barycenter + double value = 0; + std::array vs; + for (int j = 0; j < 4; j++) + vs[j] = Point_3f(V_in(T_in(bg_t_id * 4 + j) * 3), V_in(T_in(bg_t_id * 4 + j) * 3 + 1), + V_in(T_in(bg_t_id * 4 + j) * 3 + 2)); + + std::array weights; + for (int j = 0; j < 4; j++) { + Plane_3f pln(vs[j], vs[(j + 1) % 4], vs[(j + 2) % 4]); + double weight = std::sqrt( + CGAL::squared_distance(tet_vertices[i].posf, pln) / CGAL::squared_distance(vs[(j + 3) % 4], pln)); + weights[j] = weight; + value += weight * values(T_in(bg_t_id * 4 + (j + 3) % 4)); + } + + tet_vertices[i].adaptive_scale = value / state.initial_edge_len; //we allow .adaptive_scale > 1 + } + +// for debugging + outputMidResult(true, -1); + + //do more refinement + collapser.is_limit_length = true; + collapser.is_soft = true; + collapser.soft_energy = splitter.getMaxEnergy(); +// state.is_print_tmp = true;//debugging splitting + doOperationLoops(splitter, collapser, edge_remover, smoother, 20); +} + +void MeshRefinement::applyTargetedVertexNum(EdgeSplitter& splitter, EdgeCollapser& collapser, EdgeRemover& edge_remover, + VertexSmoother& smoother) { + if (args.target_num_vertices < 0) + return; + if (args.target_num_vertices == 0) + for (int i = 0; i < t_is_removed.size(); i++) + t_is_removed[i] = true; + + double N = args.target_num_vertices; //targeted #v + + //marking in/out + std::vector tmp_t_is_removed; + markInOut(tmp_t_is_removed); + + for (int i = 0; i < tet_vertices.size(); i++) + tet_vertices[i].is_locked = true; + + for (int i = 0; i < tets.size(); i++) { + if (tmp_t_is_removed[i]) + continue; + for (int j = 0; j < 4; j++) + tet_vertices[tets[i][j]].is_locked = false; + } + + int cnt = 0; + for (int i = 0; i < tet_vertices.size(); i++) + if (!v_is_removed[i] && !tet_vertices[i].is_locked) + cnt++; + + const double size_threshold = 0.05; + if (std::abs(cnt - N) / N < size_threshold) + return; + + logger().debug("{} -> target {}", cnt, N); + + if (cnt > N) {//reduce vertices + double max_energy = splitter.getMaxEnergy(); + for (int i = 0; i < tet_vertices.size(); i++) + tet_vertices[i].adaptive_scale = 10; + + collapser.is_soft = true; + collapser.soft_energy = max_energy; +// state.eps *= 1.5; +// state.eps_2 *= 1.5 * 1.5; +// for (int i = 0; i < tet_vertices.size(); i++) +// tet_vertices[i].is_locked = false; + + collapser.budget = cnt - N; + for (int pass = 0; pass < 10; pass++) { + doOperations(splitter, collapser, edge_remover, smoother, std::array({{false, true, false, false}})); + doOperationLoops(splitter, collapser, edge_remover, smoother, 5, std::array({{false, false, true, true}})); + if (collapser.budget / N < size_threshold) + break; +// collapser.soft_energy *= 1.5; + } + } else {//increase vertices + for (int i = 0; i < tet_vertices.size(); i++) + tet_vertices[i].adaptive_scale = 0; + + splitter.budget = N - cnt; + while(splitter.budget / N >= size_threshold) { + doOperations(splitter, collapser, edge_remover, smoother, std::array({{true, false, false, false}})); + doOperationLoops(splitter, collapser, edge_remover, smoother, 5, std::array({{false, false, true, true}})); + splitter.budget = N - getInsideVertexSize(); + } + } +} + +bool MeshRefinement::isRegionFullyRounded(){ + for(int i=0;i adap_tmp(tet_vertices.size(), 1.5); + double dynamic_adaptive_scale = args.adaptive_scalar; + + const int N = -int(std::log2(min_adaptive_scale) - 1); + std::vector> v_ids(N, std::vector()); + for (int i = 0; i < tet_vertices.size(); i++) { + if (v_is_removed[i] || tet_vertices[i].is_locked) + continue; + + if (is_clean_up_unrounded) { + if (tet_vertices[i].is_rounded) + continue; + } else { + bool is_refine = false; + for (int t_id: tet_vertices[i].conn_tets) { + if (tet_qualities[t_id].slim_energy > filter_energy) + is_refine = true; + } + if (!is_refine) + continue; + } + + int n = -int(std::log2(tet_vertices[i].adaptive_scale) - 0.5); + if (n >= N) + n = N - 1; + v_ids[n].push_back(i); + } + + for (int n = 0; n < N; n++) { + if(v_ids[n].size() == 0) + continue; + + double radius = radius0 / std::pow(2, n); +// double radius = radius0 / 1.5; + + std::unordered_set is_visited; + std::queue v_queue; + + std::vector pts; + pts.reserve(v_ids[n].size() * 3); + for (int i = 0; i < v_ids[n].size(); i++) { + for (int j = 0; j < 3; j++) + pts.push_back(tet_vertices[v_ids[n][i]].posf[j]); + + v_queue.push(v_ids[n][i]); + is_visited.insert(v_ids[n][i]); + adap_tmp[v_ids[n][i]] = dynamic_adaptive_scale; + } + // construct the kdtree + GEO::NearestNeighborSearch_var nnsearch = GEO::NearestNeighborSearch::create(3, "BNN"); + nnsearch->set_points(int(v_ids[n].size()), pts.data()); + + while (!v_queue.empty()) { + int v_id = v_queue.front(); + v_queue.pop(); + + for (int t_id:tet_vertices[v_id].conn_tets) { + for (int k = 0; k < 4; k++) { + if (is_visited.find(tets[t_id][k]) != is_visited.end()) + continue; + GEO::index_t _; + double sq_dist; + const double p[3] = {tet_vertices[tets[t_id][k]].posf[0], tet_vertices[tets[t_id][k]].posf[1], + tet_vertices[tets[t_id][k]].posf[2]}; + nnsearch->get_nearest_neighbors(1, p, &_, &sq_dist); + double dis = sqrt(sq_dist); + + if (dis < radius && !tet_vertices[tets[t_id][k]].is_locked) { + v_queue.push(tets[t_id][k]); + double new_ss = + (dis / radius) * (1 - dynamic_adaptive_scale) + dynamic_adaptive_scale; + if (new_ss < adap_tmp[tets[t_id][k]]) + adap_tmp[tets[t_id][k]] = new_ss; + } + is_visited.insert(tets[t_id][k]); + } + } + } + } + + // update scalars + int cnt = 0; + for (int i = 0; i < tet_vertices.size(); i++) { + if (v_is_removed[i]) + continue; + if (is_clean_up_unrounded && is_lock && adap_tmp[i] > 1) { + tet_vertices[i].is_locked = true; + cnt++; + } + double new_scale = tet_vertices[i].adaptive_scale * adap_tmp[i]; + if (new_scale > 1) + tet_vertices[i].adaptive_scale = 1; + else if (new_scale < min_adaptive_scale) { + if (!is_clean_up_unrounded) + is_hit_min = true; + tet_vertices[i].adaptive_scale = min_adaptive_scale; + } else + tet_vertices[i].adaptive_scale = new_scale; + } + if (is_clean_up_unrounded && is_lock) + logger().debug("{} vertices locked", cnt); + + logger().debug("marked!"); + tmp_time = igl_timer.getElapsedTime(); + logger().debug("time = {}s", tmp_time); + addRecord(MeshRecord(MeshRecord::OpType::OP_ADAP_UPDATE, tmp_time, -1, -1), args, state); +// outputMidResult(true); +} + +void MeshRefinement::getSimpleMesh(GEO::Mesh& mesh){ + mesh.vertices.clear(); + mesh.vertices.create_vertices(1); + mesh.vertices.point(0)=GEO::vec3(0,0,0); + + mesh.facets.clear(); + mesh.facets.create_triangles(1); + mesh.facets.set_vertex(0, 0, 0); + mesh.facets.set_vertex(0, 1, 0); + mesh.facets.set_vertex(0, 2, 0); + + mesh.facets.compute_borders();//for what?? +} + +void MeshRefinement::postProcess(VertexSmoother& smoother) { + igl_timer.start(); + + std::vector tmp_t_is_removed; + markInOut(tmp_t_is_removed); + + //get final surface and do smoothing + std::vector b_v_ids; + std::vector tmp_is_on_surface(tet_vertices.size(), false); + for (int i = 0; i < tet_vertices.size(); i++) { + if (v_is_removed[i]) + continue; + if (tet_vertices[i].is_on_bbox) + continue; + bool has_removed = false; + bool has_unremoved = false; + for (int t_id:tet_vertices[i].conn_tets) { + if (tmp_t_is_removed[t_id]) { + has_removed = true; + if (has_unremoved) + break; + } + if (!tmp_t_is_removed[t_id]) { + has_unremoved = true; + if (has_removed) + break; + } + } + if (!has_removed || !has_unremoved) + continue; + tmp_is_on_surface[i] = true; +// if(tet_vertices[i].is_on_boundary && !tet_vertices[i].is_on_surface) +// pausee(); + if (!tet_vertices[i].is_on_surface) + b_v_ids.push_back(i); + } + logger().debug("tmp_is_on_surface.size = {}", tmp_is_on_surface.size()); + logger().debug("b_v_ids.size = {}", b_v_ids.size()); + for (int i = 0; i < 20; i++) { + if (smoother.laplacianBoundary(b_v_ids, tmp_is_on_surface, tmp_t_is_removed) == 0) { + break; + } + } + smoother.outputInfo(MeshRecord::OpType::OP_SMOOTH, igl_timer.getElapsedTime()); + + t_is_removed = tmp_t_is_removed; +} + +/////just for check + +void MeshRefinement::check() { + for (int i = 0; i < tet_vertices.size(); i++) { + if (v_is_removed[i]) + continue; + for (auto it = tet_vertices[i].conn_tets.begin(); it != tet_vertices[i].conn_tets.end(); it++) + if (t_is_removed[*it]) + logger().debug("v {} should have conn_tet t{}", i, *it); + if (tet_vertices[i].conn_tets.size() == 0) { + logger().debug("empty conn_tets: v {}", i); + } + } + + std::vector> tet_faces; + for (int i = 0; i < tets.size(); i++) { + if (t_is_removed[i]) + continue; + + CGAL::Orientation ori = CGAL::orientation(tet_vertices[tets[i][0]].pos, + tet_vertices[tets[i][1]].pos, + tet_vertices[tets[i][2]].pos, + tet_vertices[tets[i][3]].pos); + + if (ori == CGAL::COPLANAR) { + logger().debug("tet {} is degenerate!", i); + } else if (ori == CGAL::NEGATIVE) { + logger().debug("tet {} is flipped!", i); + } + + for (int j = 0; j < 4; j++) { + bool is_found = false; + for (auto it = tet_vertices[tets[i][j]].conn_tets.begin(); + it != tet_vertices[tets[i][j]].conn_tets.end(); it++) { + if (*it == i) { + is_found = true; + } + if (t_is_removed[*it]) + logger().debug("tet {} is removed!", *it); + } + if (!is_found) { + logger().debug("{} {} {} {}", tets[i][0], tets[i][1], tets[i][2], tets[i][3]); + logger().debug("tet {} should be conn to v {}", i, tets[i][j]); + } + + std::array f = {{tets[i][j], tets[i][(j + 1) % 4], tets[i][(j + 2) % 4]}}; + std::sort(f.begin(), f.end()); + tet_faces.push_back(f); + } + } + std::sort(tet_faces.begin(), tet_faces.end()); + tet_faces.erase(std::unique(tet_faces.begin(), tet_faces.end()), tet_faces.end()); + + for (int i = 0; i < tet_faces.size(); i++) { + std::unordered_set tmp; + setIntersection(tet_vertices[tet_faces[i][0]].conn_tets, tet_vertices[tet_faces[i][1]].conn_tets, tmp); + setIntersection(tet_vertices[tet_faces[i][2]].conn_tets, tmp, tmp); + + if (tmp.size() != 1 && tmp.size() != 2) + logger().debug("{}", tmp.size()); + } +} + +void MeshRefinement::outputMidResult(bool is_with_bbox, double id) { + std::vector tmp_t_is_removed = t_is_removed; + Eigen::VectorXd in_out(std::count(t_is_removed.begin(), t_is_removed.end(), false)); +// if (!is_with_bbox) { + Eigen::MatrixXd C(std::count(tmp_t_is_removed.begin(), tmp_t_is_removed.end(), false), 3); + int cnt = 0; + for (int i = 0; i < tets.size(); i++) { + if (tmp_t_is_removed[i]) + continue; + std::vector vs; + vs.reserve(4); + for (int j = 0; j < 4; j++) + vs.push_back(tet_vertices[tets[i][j]].posf); + Point_3f p = CGAL::centroid(vs.begin(), vs.end(), CGAL::Dimension_tag<0>()); + for (int j = 0; j < 3; j++) + C(cnt, j) = p[j]; + cnt++; + } + + Eigen::MatrixXd V; + Eigen::MatrixXi F; + getSurface(V, F); + Eigen::VectorXd W; + logger().debug("winding number..."); + igl::winding_number(V, F, C, W); + logger().debug("winding number done"); + + cnt = 0; + for (int i = 0; i < tets.size(); i++) { + if (tmp_t_is_removed[i]) + continue; + if(!is_with_bbox) + tmp_t_is_removed[i] = !(W(cnt) > 0.5); + else + in_out(cnt) = !(W(cnt) > 0.5); + cnt++; + } +// } + + int t_cnt = 0; + std::vector v_ids; + for (int i = 0; i < tets.size(); i++) { + if (tmp_t_is_removed[i]) + continue; + for (int j = 0; j < 4; j++) + v_ids.push_back(tets[i][j]); + t_cnt++; + } + std::sort(v_ids.begin(), v_ids.end()); + v_ids.erase(std::unique(v_ids.begin(), v_ids.end()), v_ids.end()); + std::unordered_map map_ids; + for (int i = 0; i < v_ids.size(); i++) + map_ids[v_ids[i]] = i; + + PyMesh::MshSaver mSaver(state.working_dir + state.postfix + "_mid" + std::to_string(id) + ".msh", true); + Eigen::VectorXd oV(v_ids.size() * 3); + Eigen::VectorXi oT(t_cnt * 4); + for (int i = 0; i < v_ids.size(); i++) { + for (int j = 0; j < 3; j++) + oV(i * 3 + j) = tet_vertices[v_ids[i]].posf[j]; + } +// int cnt = 0; + cnt = 0; + for (int i = 0; i < tets.size(); i++) { + if (tmp_t_is_removed[i]) + continue; + for (int j = 0; j < 4; j++) + oT(cnt * 4 + j) = map_ids[tets[i][j]]; + cnt++; + } + mSaver.save_mesh(oV, oT, 3, mSaver.TET); + logger().debug("#v = {}", oV.rows() / 3); + logger().debug("#t = {}", oT.rows() / 4); + + Eigen::VectorXd angle(t_cnt); + Eigen::VectorXd energy(t_cnt); + cnt = 0; + for (int i = 0; i < tet_qualities.size(); i++) { + if (tmp_t_is_removed[i]) + continue; + angle(cnt) = tet_qualities[i].min_d_angle; + energy(cnt) = tet_qualities[i].slim_energy; + cnt++; + } + mSaver.save_elem_scalar_field("min_dihedral_angle", angle); + mSaver.save_elem_scalar_field("energy", energy); + if(is_with_bbox) + mSaver.save_elem_scalar_field("in/out", in_out); + + // for debugging + Eigen::VectorXd scalar(v_ids.size()); + cnt = 0; + for (int i = 0; i < v_ids.size(); i++) { + scalar(cnt) = tet_vertices[v_ids[i]].adaptive_scale; + cnt++; + } + mSaver.save_scalar_field("scalar field", scalar); +} + +void MeshRefinement::getSurface(Eigen::MatrixXd& V, Eigen::MatrixXi& F){ + std::vector> fs; + std::vector vs; + for(int i=0;i 0) {//outside + std::array v_ids = {{tets[i][(j + 1) % 4], tets[i][(j + 2) % 4], tets[i][(j + 3) % 4]}}; + if (CGAL::orientation(tet_vertices[v_ids[0]].pos, tet_vertices[v_ids[1]].pos, + tet_vertices[v_ids[2]].pos, tet_vertices[tets[i][j]].pos) != CGAL::POSITIVE) { + int tmp = v_ids[0]; + v_ids[0] = v_ids[2]; + v_ids[2] = tmp; + } + for (int k = 0; k < is_surface_fs[i][j]; k++) + fs.push_back(v_ids); + for (int k = 0; k < 3; k++) + vs.push_back(v_ids[k]); + } + } + } + std::sort(vs.begin(), vs.end()); + vs.erase(std::unique(vs.begin(), vs.end()), vs.end()); + + V.resize(vs.size(), 3); + std::map map_ids; + for(int i=0;i> fs; + std::vector vs; + for (int i = 0; i < tets.size(); i++) { + if (t_is_removed[i]) + continue; + for (int j = 0; j < 4; j++) { + if (is_surface_fs[i][j] != state.NOT_SURFACE && is_surface_fs[i][j] >= 0) {//outside + std::array v_ids = {{tets[i][(j + 1) % 4], tets[i][(j + 2) % 4], tets[i][(j + 3) % 4]}}; + if (CGAL::orientation(tet_vertices[v_ids[0]].pos, tet_vertices[v_ids[1]].pos, + tet_vertices[v_ids[2]].pos, tet_vertices[tets[i][j]].pos) != CGAL::POSITIVE) { + int tmp = v_ids[0]; + v_ids[0] = v_ids[2]; + v_ids[2] = tmp; + } + std::array v_ids1 = v_ids; + std::sort(v_ids1.begin(), v_ids1.end()); + fs.push_back(std::array({{v_ids1[0], v_ids1[1], v_ids1[2], v_ids[0], v_ids[1], v_ids[2]}})); + for (int k = 0; k < 3; k++) + vs.push_back(v_ids[k]); + } + } + } + std::sort(vs.begin(), vs.end()); + vs.erase(std::unique(vs.begin(), vs.end()), vs.end()); + + V.resize(vs.size(), 3); + std::unordered_map map_ids; + for (int i = 0; i < vs.size(); i++) { + map_ids[vs[i]] = i; + for (int j = 0; j < 3; j++) + V(i, j) = tet_vertices[vs[i]].posf[j]; + } + + F.resize(fs.size(), 3); + for (int i = 0; i < fs.size(); i++) + for (int j = 0; j < 3; j++) { + F(i, j) = map_ids[fs[i][j + 3]]; + } + + return; + + std::sort(fs.begin(), fs.end()); + int nf = 0; + for (int i = 0; i < fs.size(); i++) { + if (i > 0 && fs[i][0] == fs[i - 1][0] && fs[i][1] == fs[i - 1][1] && fs[i][2] == fs[i - 1][2]) + continue; + nf++; + } + + F.resize(nf, 3); + int cnt = 0; + for (int i = 0; i < fs.size(); i++) { + if (i > 0 && fs[i][0] == fs[i - 1][0] && fs[i][1] == fs[i - 1][1] && fs[i][2] == fs[i - 1][2]) + continue; + for (int j = 0; j < 3; j++) { + F(cnt, j) = map_ids[fs[i][j + 3]]; + cnt++; + } + } +} + +namespace { + +bool getSurfaceMesh(const Eigen::MatrixXd& V_in, const Eigen::MatrixXi& F_in, GEO::Mesh& geo_sf_mesh){ + geo_sf_mesh.vertices.clear(); + geo_sf_mesh.vertices.create_vertices((int) V_in.rows()); + for (int i = 0; i < V_in.rows(); i++) { + GEO::vec3 &p = geo_sf_mesh.vertices.point(i); + for (int j = 0; j < 3; j++) { + p[j] = V_in(i, j); + } + } + geo_sf_mesh.facets.clear(); + geo_sf_mesh.facets.create_triangles((int) F_in.rows()); + for (int i = 0; i < F_in.rows(); i++) { + for (int j = 0; j < 3; j++) { + geo_sf_mesh.facets.set_vertex(i, j, F_in(i, j)); + } + } + geo_sf_mesh.facets.compute_borders(); + return true; +} + +void getBoundaryMesh(const Eigen::MatrixXd& V_sf, const Eigen::MatrixXi& F_sf, GEO::Mesh& b_mesh){ + std::vector> conn_f4v(V_sf.rows(), std::vector()); + for (int i = 0; i < F_sf.rows(); i++) { + for (int j = 0; j < 3; j++) + conn_f4v[F_sf(i, j)].push_back(i); + } + + std::vector> b_edges; + for (int i = 0; i < F_sf.rows(); i++) { + for (int j = 0; j < 3; j++) { + std::vector tmp; + std::set_intersection(conn_f4v[F_sf(i, j)].begin(), conn_f4v[F_sf(i, j)].end(), + conn_f4v[F_sf(i, (j + 1) % 3)].begin(), conn_f4v[F_sf(i, (j + 1) % 3)].end(), + std::back_inserter(tmp)); + if (tmp.size() == 1) + b_edges.push_back(std::array({{F_sf(i, j), F_sf(i, (j + 1) % 3)}})); + } + } + + if(b_edges.size()==0){ + b_mesh.vertices.clear(); + return; + } + + std::unordered_set v_ids; + for (int i = 0; i < b_edges.size(); i++) { + v_ids.insert(b_edges[i][0]); + v_ids.insert(b_edges[i][1]); + } + std::unordered_map v_ids_map; + int cnt = 0; + for (int v_id : v_ids) { + v_ids_map[v_id] = cnt; + cnt++; + } + + b_mesh.vertices.clear(); + b_mesh.vertices.create_vertices((int) v_ids.size()); + for (int v_id : v_ids) { + GEO::vec3 &p = b_mesh.vertices.point(v_ids_map[v_id]); + for (int j = 0; j < 3; j++) + p[j] = V_sf(v_id, j); + } + b_mesh.facets.clear(); + b_mesh.facets.create_triangles((int) b_edges.size()); + for (int i = 0; i < b_edges.size(); i++) { + b_mesh.facets.set_vertex(i, 0, v_ids_map[b_edges[i][0]]); + b_mesh.facets.set_vertex(i, 1, v_ids_map[b_edges[i][1]]); + b_mesh.facets.set_vertex(i, 2, v_ids_map[b_edges[i][1]]); + } +} + +} // anonymous namespace + +bool MeshRefinement::deserialization(const Eigen::MatrixXd& V_in, const Eigen::MatrixXi& F_in, + const std::string& slz_file) +{ + logger().debug("deserializing ..."); + + //process sf_file + if(!getSurfaceMesh(V_in, F_in, geo_sf_mesh)) + return false; + getBoundaryMesh(V_in, F_in, geo_b_mesh); + state.is_mesh_closed = (geo_b_mesh.vertices.nb() == 0); + + //deserialization + igl::deserialize(state.bbox_diag, "bbox_diag", slz_file); + igl::deserialize(state.eps, "eps", slz_file); + igl::deserialize(state.eps_2, "eps_2", slz_file); + igl::deserialize(state.sampling_dist, "sampling_dist", slz_file); + igl::deserialize(state.initial_edge_len, "initial_edge_len", slz_file); + // igl::deserialize(state.NOT_SURFACE, "NOT_SURFACE", slz_file); + igl::deserialize(old_pass, "old_pass", slz_file); + + igl::deserialize(tet_vertices, "tet_vertices", slz_file); + igl::deserialize(tets, "tets", slz_file); + igl::deserialize(is_surface_fs, "is_surface_fs", slz_file); + + t_is_removed = std::vector(tets.size(), false); + v_is_removed = std::vector(tet_vertices.size(), false); + for (int i = 0; i < tets.size(); i++) { + for (int j = 0; j < 4; j++) { + tet_vertices[tets[i][j]].conn_tets.insert(i); + } + } + + prepareData(false); + +// for (int i = 0; i < tets.size(); i++) { +// for (int j = 0; j < 4; j++) +// tet_vertices[tets[i][j]].conn_tets.insert(i); +// } + logger().debug("deserialization done"); + + return true; +} + +void MeshRefinement::serialization(const std::string& slz_file) { + logger().debug("serializing ..."); + igl::serialize(state.bbox_diag, "bbox_diag", slz_file, true); + igl::serialize(state.eps, "eps", slz_file); + igl::serialize(state.eps_2, "eps_2", slz_file); + igl::serialize(state.sampling_dist, "sampling_dist", slz_file); + igl::serialize(state.initial_edge_len, "initial_edge_len", slz_file); + // igl::serialize(state.NOT_SURFACE, "NOT_SURFACE", slz_file); + igl::serialize(old_pass, "old_pass", slz_file); + + //relabel + std::vector new_v_ids(tet_vertices.size(), -1); + int cnt = 0; + for (int i = 0; i < tet_vertices.size(); i++) { + if (v_is_removed[i]) + continue; + new_v_ids[i] = cnt++; + } + + // serialize + std::vector slz_tet_vertices; + slz_tet_vertices.reserve(std::count(v_is_removed.begin(), v_is_removed.end(), false)); + for (int i = 0; i < tet_vertices.size(); i++) { + if (v_is_removed[i]) + continue; + slz_tet_vertices.push_back(tet_vertices[i]); + slz_tet_vertices.back().conn_tets.clear(); + slz_tet_vertices.back().on_face.clear(); + slz_tet_vertices.back().on_edge.clear(); + } + igl::serialize(slz_tet_vertices, "tet_vertices", slz_file); + slz_tet_vertices.clear(); + + std::vector > slz_tets; + slz_tets.reserve(std::count(t_is_removed.begin(), t_is_removed.end(), false)); + for (int i = 0; i < tets.size(); i++) { + if (t_is_removed[i]) + continue; + slz_tets.push_back(tets[i]); + for (int j = 0; j < 4; j++) + slz_tets.back()[j] = new_v_ids[tets[i][j]]; + } + igl::serialize(slz_tets, "tets", slz_file); + slz_tets.clear(); + + std::vector> slz_is_surface_fs; + slz_is_surface_fs.reserve(std::count(t_is_removed.begin(), t_is_removed.end(), false)); + for (int i = 0; i < is_surface_fs.size(); i++) { + if (t_is_removed[i]) + continue; + slz_is_surface_fs.push_back(is_surface_fs[i]); + } + igl::serialize(slz_is_surface_fs, "is_surface_fs", slz_file); + slz_is_surface_fs.clear(); + + logger().debug("serialization done"); +} + +} // namespace tetwild diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/MeshRefinement.h b/contrib/NeRF-Editing/TetWild/src/tetwild/MeshRefinement.h new file mode 100644 index 00000000..c3dcbbcc --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/MeshRefinement.h @@ -0,0 +1,100 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 4/11/17. +// + +#ifndef NEW_GTET_MESHREFINEMENT_H +#define NEW_GTET_MESHREFINEMENT_H + +#include +#include +#include +#include + +namespace tetwild { + +class MeshRefinement { +public: + const Args &args; + State &state; + + GEO::Mesh &geo_sf_mesh; + GEO::Mesh &geo_b_mesh; + + //init + std::vector tet_vertices; + std::vector> tets; + //prepare data + std::vector v_is_removed; + std::vector t_is_removed; + std::vector tet_qualities; + std::vector> is_surface_fs; + + igl::Timer igl_timer; + + int old_pass = 0; + + MeshRefinement(GEO::Mesh & sf_mesh, GEO::Mesh & b_mesh, const Args &ar, State &st) + : geo_sf_mesh(sf_mesh), geo_b_mesh(b_mesh), args(ar), state(st) + { } + + void prepareData(bool is_init=true); + void round(); + void clear(); + + int sf_id = 0; + int doOperations(EdgeSplitter& splitter, EdgeCollapser& collapser, EdgeRemover& edge_remover, + VertexSmoother& smoother, const std::array& ops={{true, true, true, true}}); + int doOperationLoops(EdgeSplitter& splitter, EdgeCollapser& collapser, EdgeRemover& edge_remover, + VertexSmoother& smoother, int max_pass, const std::array& ops={{true, true, true, true}}); + bool is_dealing_unrounded = false; + bool is_dealing_local = false; + + void refine(int energy_type, const std::array& ops={{true, true, true, true}}, + bool is_pre = true, bool is_post = true, int scalar_update = 3); + void refine_pre(EdgeSplitter& splitter, EdgeCollapser& collapser, EdgeRemover& edge_remover, + VertexSmoother& smoother); + void refine_post(EdgeSplitter& splitter, EdgeCollapser& collapser, EdgeRemover& edge_remover, + VertexSmoother& smoother); + void refine_local(EdgeSplitter& splitter, EdgeCollapser& collapser, EdgeRemover& edge_remover, + VertexSmoother& smoother, double target_energy = -1); + bool refine_unrounded(EdgeSplitter& splitter, EdgeCollapser& collapser, EdgeRemover& edge_remover, + VertexSmoother& smoother); + void refine_revert(EdgeSplitter& splitter, EdgeCollapser& collapser, EdgeRemover& edge_remover, + VertexSmoother& smoother); + bool isRegionFullyRounded(); + + double min_adaptive_scale; + bool is_hit_min = false; + void updateScalarField(bool is_clean_up_unrounded, bool is_clean_up_local, double filter_energy, bool is_lock = false); + + void getSimpleMesh(GEO::Mesh& mesh); + void postProcess(VertexSmoother& smoother);//for lapacian smoothing + + int getInsideVertexSize(); + void markInOut(std::vector& tmp_t_is_removed); + void applySizingField(EdgeSplitter& splitter, EdgeCollapser& collapser, EdgeRemover& edge_remover, + VertexSmoother& smoother); + void applyTargetedVertexNum(EdgeSplitter& splitter, EdgeCollapser& collapser, EdgeRemover& edge_remover, + VertexSmoother& smoother); + + ////for check + void check(); + int mid_result=0; + void outputMidResult(bool is_with_bbox, double id); + void getSurface(Eigen::MatrixXd& V, Eigen::MatrixXi& F); + void getTrackedSurface(Eigen::MatrixXd& V, Eigen::MatrixXi& F); + + bool deserialization(const Eigen::MatrixXd& V, const Eigen::MatrixXi& F, const std::string& slz_file); + void serialization(const std::string& slz_file); +}; + +} // namespace tetwild + +#endif //NEW_GTET_MESHREFINEMENT_H diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/Preprocess.cpp b/contrib/NeRF-Editing/TetWild/src/tetwild/Preprocess.cpp new file mode 100644 index 00000000..b0b9141b --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/Preprocess.cpp @@ -0,0 +1,1009 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 10/12/17. +// + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace tetwild { + +namespace { + +void checkBoundary(const Eigen::MatrixXd& V, const Eigen::MatrixXi& F, const State &state) { + PyMesh::MshSaver mSaver(state.working_dir+state.postfix+"_boundary.msh", true); + Eigen::VectorXd oV; + Eigen::VectorXi oF; + oV.resize(V.rows() * 3); + for (int i = 0; i < V.rows(); i++) { + for (int j = 0; j < 3; j++) + oV(i * 3 + j) = V(i, j); + } + oF.resize(F.rows() * 3); + for (int i = 0; i < F.rows(); i++) { + for (int j = 0; j < 3; j++) + oF(i * 3 + j) = F(i, j); + } + + if (oV.rows() > 0) { + mSaver.save_mesh(oV, oF, 3, mSaver.TRI); + + Eigen::VectorXd bF(F.rows()); + for (int i = 0; i < bF.rows(); i++) + bF(i) = 0; + Eigen::VectorXd bV(V.rows()); + for (int i = 0; i < bV.rows(); i++) + bV(i) = 0; + + std::vector> conn_f4v(V.rows(), std::vector()); + for (int i = 0; i < F.rows(); i++) { + for (int j = 0; j < 3; j++) + conn_f4v[F(i, j)].push_back(i); + } + + for (int i = 0; i < F.rows(); i++) { + for (int j = 0; j < 3; j++) { + std::vector tmp; + std::set_intersection(conn_f4v[F(i, j)].begin(), conn_f4v[F(i, j)].end(), + conn_f4v[F(i, (j + 1) % 3)].begin(), conn_f4v[F(i, (j + 1) % 3)].end(), + std::back_inserter(tmp)); + if (tmp.size() == 1) { + bF(tmp[0]) = 1; +// logger().debug("boundary tri! {}", tmp[0]); +// Triangle_3f tri(Point_3f(V(F(i, 0), 0), V(F(i, 0), 1), V(F(i, 0), 2)), +// Point_3f(V(F(i, 1), 0), V(F(i, 1), 1), V(F(i, 1), 2)), +// Point_3f(V(F(i, 2), 0), V(F(i, 2), 1), V(F(i, 2), 2))); +// if(tri.is_degenerate()) +// logger().debug("degenerate"); + bV(F(i, j)) = 1; + bV(F(i, (j + 1) % 3)) = 1; + } + } + } + + mSaver.save_elem_scalar_field("boundary faces", bF); + mSaver.save_scalar_field("boundary vertices", bV); + } + + logger().debug("boundary checked!"); +} + +} // anonymous namespace + +bool Preprocess::init(const Eigen::MatrixXd& V_tmp, const Eigen::MatrixXi& F_tmp, + GEO::Mesh& geo_b_mesh, GEO::Mesh& geo_sf_mesh, const Args &args) { + + logger().debug("{} {}", V_tmp.rows(), F_tmp.rows()); + + Eigen::VectorXi IV, _; +// igl::unique_rows(V_tmp, V_in, _, IV); + igl::remove_duplicate_vertices(V_tmp, F_tmp, 1e-10, V_in, IV, _, F_in); + + if (V_in.rows() == 0 || F_in.rows() == 0) + return false; + +// for (int i = 0; i < F_in.rows(); i++) { +// for (int j = 0; j < 3; j++) { +// F_in(i, j) = IV(F_in(i, j)); +// } +// } + logger().debug("#v = {} -> {}", V_tmp.rows(), V_in.rows()); + logger().debug("#f = {} -> {}", F_tmp.rows(), F_in.rows()); +// checkBoundary(V_in, F_in, state); + + ////get GEO meshes + geo_sf_mesh.vertices.clear(); + geo_sf_mesh.vertices.create_vertices((int) V_in.rows()); + for (int i = 0; i < V_in.rows(); i++) { + GEO::vec3 &p = geo_sf_mesh.vertices.point(i); + for (int j = 0; j < 3; j++) + p[j] = V_in(i, j); + } + geo_sf_mesh.facets.clear(); + geo_sf_mesh.facets.create_triangles((int) F_in.rows()); + for (int i = 0; i < F_in.rows(); i++) { + for (int j = 0; j < 3; j++) + geo_sf_mesh.facets.set_vertex(i, j, F_in(i, j)); + } + geo_sf_mesh.facets.compute_borders(); + + getBoundaryMesh(geo_b_mesh); + state.is_mesh_closed = (geo_b_mesh.vertices.nb() == 0); + + return true; +} + +void Preprocess::getBoundaryMesh(GEO::Mesh& b_mesh) { + Eigen::MatrixXd& V_sf = V_in; + Eigen::MatrixXi& F_sf = F_in; + + std::vector> conn_f4v(V_sf.rows(), std::vector()); + for (int i = 0; i < F_sf.rows(); i++) { + for (int j = 0; j < 3; j++) + conn_f4v[F_sf(i, j)].push_back(i); + } + //check isolated vertices +// for(int i=0;i> b_edges; + for (int i = 0; i < F_sf.rows(); i++) { + for (int j = 0; j < 3; j++) { + std::vector tmp; + std::set_intersection(conn_f4v[F_sf(i, j)].begin(), conn_f4v[F_sf(i, j)].end(), + conn_f4v[F_sf(i, (j + 1) % 3)].begin(), conn_f4v[F_sf(i, (j + 1) % 3)].end(), + std::back_inserter(tmp)); + if (tmp.size() == 1) + b_edges.push_back(std::array({{F_sf(i, j), F_sf(i, (j + 1) % 3)}})); + } + } + + if(b_edges.size()==0){ + b_mesh.vertices.clear(); + return; + } + + std::unordered_set v_ids; + for (int i = 0; i < b_edges.size(); i++) { + v_ids.insert(b_edges[i][0]); + v_ids.insert(b_edges[i][1]); + } + std::unordered_map v_ids_map; + int cnt = 0; + for (int v_id : v_ids) { + v_ids_map[v_id] = cnt; + cnt++; + } + + b_mesh.vertices.clear(); + b_mesh.vertices.create_vertices((int) v_ids.size()); + for (int v_id : v_ids) { + GEO::vec3 &p = b_mesh.vertices.point(v_ids_map[v_id]); + for (int j = 0; j < 3; j++) + p[j] = V_sf(v_id, j); + } + b_mesh.facets.clear(); + b_mesh.facets.create_triangles((int) b_edges.size()); + for (int i = 0; i < b_edges.size(); i++) { + b_mesh.facets.set_vertex(i, 0, v_ids_map[b_edges[i][0]]); + b_mesh.facets.set_vertex(i, 1, v_ids_map[b_edges[i][1]]); + b_mesh.facets.set_vertex(i, 2, v_ids_map[b_edges[i][1]]); + } +} + +void Preprocess::process(GEO::Mesh& geo_sf_mesh, std::vector& m_vertices, std::vector>& m_faces, const Args &args) { + double eps_scalar = 0.8; + double eps_scalar_2 = eps_scalar*eps_scalar; + + state.eps *= eps_scalar; + state.eps_2 *= eps_scalar_2; +// state.sampling_dist *= eps_scalar*2; + + conn_fs.resize(V_in.size()); + for (int i = 0; i < F_in.rows(); i++) { + for (int j = 0; j < 3; j++) + conn_fs[F_in(i, j)].insert(i); + } + v_is_removed = std::vector(V_in.rows(), false); + f_is_removed = std::vector(F_in.rows(), false); + + // mesh_reorder(geo_sf_mesh, GEO::MESH_ORDER_HILBERT); + GEO::MeshFacetsAABBWithEps geo_face_tree(geo_sf_mesh); + + std::vector> edges; + edges.reserve(F_in.rows()*6); + for (int i = 0; i < F_in.rows(); i++) { + for (int j = 0; j < 3; j++) { + std::array e = {{F_in(i, j), F_in(i, (j + 1) % 3)}}; + if (e[0] > e[1]) e = {{e[1], e[0]}}; + edges.push_back(e); + } + } + std::sort(edges.begin(), edges.end()); + edges.erase(std::unique(edges.begin(), edges.end()), edges.end()); + + const int edges_size = edges.size(); + for (int i = 0; i < edges_size; i++) { + double weight = getEdgeLength(edges[i]); + sm_queue.push(ElementInQueue_sm(edges[i], weight)); + sm_queue.push(ElementInQueue_sm(std::array({{edges[i][1], edges[i][0]}}), weight)); + } + + //simplification + ts = 0; + f_tss.resize(F_in.size()); + simplify(geo_sf_mesh, geo_face_tree); + + ////get CGAL surface mesh + int cnt = 0; + std::unordered_map new_v_ids; + + Eigen::MatrixXd V_out; + Eigen::MatrixXi F_out; + V_out.resize(std::count(v_is_removed.begin(), v_is_removed.end(), false), 3); + F_out.resize(std::count(f_is_removed.begin(), f_is_removed.end(), false), 3); + for (int i = 0; i < V_in.rows(); i++) { + if (v_is_removed[i]) + continue; + new_v_ids[i] = cnt; + V_out.row(cnt++) = V_in.row(i); + } + + cnt = 0; + for (int i = 0; i < F_in.rows(); i++) { + if (f_is_removed[i]) + continue; + for (int j = 0; j < 3; j++) + F_out(cnt, j) = new_v_ids[F_in(i, j)]; + cnt++; + } +// igl::writeSTL(state.working_dir+args.postfix+"_simplified.stl", V_out, F_out); + logger().debug("#v = {}", V_out.rows()); + logger().debug("#f = {}", F_out.rows()); + + V_in = V_out; + F_in = F_out; + conn_fs.clear(); + conn_fs.resize(V_in.size()); + for (int i = 0; i < F_in.rows(); i++) { + for (int j = 0; j < 3; j++) + conn_fs[F_in(i, j)].insert(i); + } + swap(geo_sf_mesh, geo_face_tree); + if(args.save_mid_result == 0) + igl::writeSTL(state.working_dir+state.postfix+"_simplified.stl", V_in, F_in); + +// checkBoundary(V_in, F_in); + + m_vertices.reserve(V_in.rows()); + m_faces.reserve(F_in.rows()); + for (int i = 0; i < V_in.rows(); i++) { + m_vertices.push_back(Point_3(V_in(i, 0), V_in(i, 1), V_in(i, 2))); + } + for (int i = 0; i < F_in.rows(); i++) { + std::array f = {{F_in(i, 0), F_in(i, 1), F_in(i, 2)}}; + Triangle_3 tr(m_vertices[f[0]], m_vertices[f[1]], m_vertices[f[2]]); + if (!tr.is_degenerate())//delete all degenerate triangles + m_faces.push_back(f); + } + logger().debug("#v = {}", m_vertices.size()); + logger().debug("#f = {}->{}", F_in.rows(), m_faces.size()); + + state.eps /= eps_scalar; + state.eps_2 /= eps_scalar_2; +// state.sampling_dist /= eps_scalar*2; + + // igl::write_triangle_mesh("tmp.obj", V_in, F_in); + + // igl::write_triangle_mesh("tmp.obj", V_in, F_in); + + //output colormap + // outputSurfaceColormap(geo_face_tree, geo_sf_mesh); +} + +void Preprocess::swap(const GEO::Mesh &geo_mesh, const GEO::MeshFacetsAABBWithEps& face_aabb_tree) { + int cnt = 0; + for (int i = 0; i < F_in.rows(); i++) { + bool is_swapped = false; + for (int j = 0; j < 3; j++) { + int v_id = F_in(i, j); + int v1_id = F_in(i, (j + 1) % 3); + int v2_id = F_in(i, (j + 2) % 3); + + // manifold + std::vector n12_f_ids; + setIntersection(conn_fs[v1_id], conn_fs[v2_id], n12_f_ids); + if (n12_f_ids.size() != 2) { + continue; + } + if (n12_f_ids[1] == i) + n12_f_ids = {{n12_f_ids[1], n12_f_ids[0]}}; + int v3_id = -1; + for (int k = 0; k < 3; k++) + if (F_in(n12_f_ids[1], k) != v1_id && F_in(n12_f_ids[1], k) != v2_id) { + v3_id = F_in(n12_f_ids[1], k); + break; + } + + // check quality + double cos_a = getCosAngle(v_id, v1_id, v2_id); + double cos_a1 = getCosAngle(v3_id, v1_id, v2_id); + std::array old_nvs; + for (int f = 0; f < 2; f++) { + std::array vs; + for (int k = 0; k < 3; k++) { + vs[k] = GEO::vec3(V_in(F_in(n12_f_ids[f], k), 0), V_in(F_in(n12_f_ids[f], k), 1), + V_in(F_in(n12_f_ids[f], k), 2)); + } + old_nvs[f] = GEO::Geom::triangle_normal(vs[0], vs[1], vs[2]); + } + if (cos_a > -0.999) { +// continue; + if (GEO::Geom::cos_angle(old_nvs[0], old_nvs[1]) < 1-1e-6)//not coplanar + continue; + } + double cos_a_new = getCosAngle(v1_id, v_id, v3_id); + double cos_a1_new = getCosAngle(v2_id, v_id, v3_id); + if (std::min(cos_a_new, cos_a1_new) <= std::min(cos_a, cos_a1)) + continue; + + // non flipping + Eigen::RowVectorXi f1_old = F_in.row(n12_f_ids[0]); + Eigen::RowVectorXi f2_old = F_in.row(n12_f_ids[1]); + for (int k = 0; k < 3; k++) { + if (F_in(n12_f_ids[0], k) == v2_id) + F_in(n12_f_ids[0], k) = v3_id; + if (F_in(n12_f_ids[1], k) == v1_id) + F_in(n12_f_ids[1], k) = v_id; + } + GEO::vec3 old_nv = cos_a1 < cos_a ? old_nvs[0] : old_nvs[1]; + bool is_filp = false; + for (int f_id:n12_f_ids) { + std::array vs; + for (int k = 0; k < 3; k++) { + vs[k] = GEO::vec3(V_in(F_in(f_id, k), 0), V_in(F_in(f_id, k), 1), V_in(F_in(f_id, k), 2)); + } + GEO::vec3 new_nv = GEO::Geom::triangle_normal(vs[0], vs[1], vs[2]); + if (GEO::dot(old_nv, new_nv) < 0) { + is_filp = true; + break; + } + } + if (is_filp) { + F_in.row(n12_f_ids[0]) = f1_old; + F_in.row(n12_f_ids[1]) = f2_old; + continue; + } + + // non outside envelop + std::unordered_set new_f_ids; + new_f_ids.insert(n12_f_ids.begin(), n12_f_ids.end()); + if (isOutEnvelop(new_f_ids, geo_mesh, face_aabb_tree)) { + F_in.row(n12_f_ids[0]) = f1_old; + F_in.row(n12_f_ids[1]) = f2_old; + continue; + } + + // real update + conn_fs[v1_id].erase(n12_f_ids[1]); + conn_fs[v2_id].erase(n12_f_ids[0]); + conn_fs[v_id].insert(n12_f_ids[1]); + conn_fs[v3_id].insert(n12_f_ids[0]); + is_swapped = true; + break; + } + if (is_swapped) + cnt++; + } + logger().debug("{} faces are swapped!!", cnt); +} + +double Preprocess::getCosAngle(int v_id, int v1_id, int v2_id) { + return GEO::Geom::cos_angle(GEO::vec3(V_in(v1_id, 0), V_in(v1_id, 1), V_in(v1_id, 2)) - + GEO::vec3(V_in(v_id, 0), V_in(v_id, 1), V_in(v_id, 2)), + GEO::vec3(V_in(v2_id, 0), V_in(v2_id, 1), V_in(v2_id, 2)) - + GEO::vec3(V_in(v_id, 0), V_in(v_id, 1), V_in(v_id, 2))); +} + +void Preprocess::simplify(const GEO::Mesh &geo_mesh, const GEO::MeshFacetsAABBWithEps& face_aabb_tree) { + int cnt = 0; +// logger().debug("queue.size() = {}", sm_queue.size()); + while (!sm_queue.empty()) { + std::array v_ids = sm_queue.top().v_ids; + double old_weight = sm_queue.top().weight; + sm_queue.pop(); + + if (!isEdgeValid(v_ids, old_weight)) + continue; + + if (!removeAnEdge(v_ids[0], v_ids[1], geo_mesh, face_aabb_tree)) { + inf_es.push_back(v_ids); + inf_e_tss.push_back(ts); + } else { + cnt++; + if (cnt % 1000 == 0) + logger().debug("1000 vertices removed"); + } + } + logger().debug("{}", cnt); + logger().debug("{}", c); + + if (cnt > 0) + postProcess(geo_mesh, face_aabb_tree); +} + +void Preprocess::postProcess(const GEO::Mesh &geo_mesh, const GEO::MeshFacetsAABBWithEps& face_aabb_tree){ + logger().debug("postProcess!"); + + std::vector> tmp_inf_es; + const int inf_es_size = inf_es.size(); + for (int i = 0; i < inf_es_size; i++) { + if (!isEdgeValid(inf_es[i])) + continue; + bool is_recal = false; + for (int f_id:conn_fs[inf_es[i][0]]) { + if (f_tss[f_id] > inf_e_tss[i]) { + is_recal = true; + break; + } + } + if (is_recal) + sm_queue.push(ElementInQueue_sm(inf_es[i], getEdgeLength(inf_es[i]))); + else + tmp_inf_es.push_back(inf_es[i]); + } + std::sort(tmp_inf_es.begin(), tmp_inf_es.end()); + tmp_inf_es.erase(std::unique(tmp_inf_es.begin(), tmp_inf_es.end()), tmp_inf_es.end()); + inf_es = tmp_inf_es; + ts++; + inf_e_tss = std::vector(inf_es.size(), ts); + + simplify(geo_mesh, face_aabb_tree); +} + +bool Preprocess::removeAnEdge(int v1_id, int v2_id, const GEO::Mesh &geo_mesh, const GEO::MeshFacetsAABBWithEps& face_aabb_tree) { + if (!isOneRingClean(v1_id) || !isOneRingClean(v2_id)) + return false; + + //check if flip after collapsing + std::vector n12_f_ids; + setIntersection(conn_fs[v1_id], conn_fs[v2_id], n12_f_ids); + if (n12_f_ids.size() != 2) {//!!! +// logger().debug("error: n12_f_ids.size()!=2"); + return false; + } + + std::unordered_set new_f_ids; + for (int f_id:conn_fs[v1_id]) { + if (f_id != n12_f_ids[0] && f_id != n12_f_ids[1]) { + new_f_ids.insert(f_id); + } + } + for (int f_id:conn_fs[v2_id]) { + if (f_id != n12_f_ids[0] && f_id != n12_f_ids[1]) { + new_f_ids.insert(f_id); + } + } + + //check euclidean characteristics (delete degenerate and duplicate elements + if(!isEuclideanValid(v1_id, v2_id)) + return false; + + //computing normal for checking flipping + for (int f_id:new_f_ids) { +// for(int f_id:conn_fs[v1_id]) { +// if (f_id != n12_f_ids[0] && f_id != n12_f_ids[1]) { + std::array vs; + for (int j = 0; j < 3; j++) { + vs[j] = GEO::vec3(V_in(F_in(f_id, j), 0), V_in(F_in(f_id, j), 1), V_in(F_in(f_id, j), 2)); + } + GEO::vec3 old_nv = GEO::Geom::triangle_normal(vs[0], vs[1], vs[2]); + + for (int j = 0; j < 3; j++) { +// if (F_in(f_id, j) == v1_id) { + if (F_in(f_id, j) == v1_id || F_in(f_id, j) == v2_id) { + vs[j] = (GEO::vec3(V_in(v1_id, 0), V_in(v1_id, 1), V_in(v1_id, 2)) + + GEO::vec3(V_in(v2_id, 0), V_in(v2_id, 1), V_in(v2_id, 2))) / 2; + break; + } + } + GEO::vec3 new_nv = GEO::Geom::triangle_normal(vs[0], vs[1], vs[2]); + if (GEO::dot(old_nv, new_nv) < 0) + return false; +// } + } + + //check if go outside of envelop + Eigen::VectorXd v1_old_p = V_in.row(v1_id); + Eigen::VectorXd v2_old_p = V_in.row(v2_id); + V_in.row(v1_id) = (V_in.row(v1_id) + V_in.row(v2_id)) / 2; + GEO::vec3 mid_p(V_in(v1_id, 0), V_in(v1_id, 1), V_in(v1_id, 2)); + GEO::vec3 nearest_p; + double _; + face_aabb_tree.nearest_facet(mid_p, nearest_p, _);//project back to surface + for(int j=0;j<3;j++) + V_in(v1_id, j) = nearest_p[j]; + V_in.row(v2_id) = V_in.row(v1_id); + if (isOutEnvelop(new_f_ids, geo_mesh, face_aabb_tree)) { + V_in.row(v1_id) = v1_old_p; + V_in.row(v2_id) = v2_old_p; + return false; + } + c++; + + //real update + std::unordered_set n_v_ids;//get this info before real update for later usage + for (int f_id:new_f_ids) { + for (int j = 0; j < 3; j++) { + if (F_in(f_id, j) != v1_id && F_in(f_id, j) != v2_id) + n_v_ids.insert(F_in(f_id, j)); + } + } + + v_is_removed[v1_id] = true; + for (int f_id:n12_f_ids) { + f_is_removed[f_id] = true; + for (int j = 0; j < 3; j++) {//rm conn_fs + if (F_in(f_id, j) != v1_id) { + conn_fs[F_in(f_id, j)].erase(f_id); +// auto it = std::find(conn_fs[F_in(f_id, j)].begin(), conn_fs[F_in(f_id, j)].end(), f_id); +// if (it != conn_fs[F_in(f_id, j)].end()) +// conn_fs[F_in(f_id, j)].erase(it); + } + } + } + for (int f_id:conn_fs[v1_id]) {//add conn_fs + if (f_is_removed[f_id]) + continue; + conn_fs[v2_id].insert(f_id); + for (int j = 0; j < 3; j++) { + if (F_in(f_id, j) == v1_id) + F_in(f_id, j) = v2_id; + } + } + + //update timestamps + ts++; + for (int f_id:conn_fs[v2_id]) { + f_tss[f_id] = ts; + } + + //push new edges into the queue + for (int v_id:n_v_ids) { + double weight = getEdgeLength(v2_id, v_id); + sm_queue.push(ElementInQueue_sm(std::array({{v2_id, v_id}}), weight)); + sm_queue.push(ElementInQueue_sm(std::array({{v_id, v2_id}}), weight)); + } + + return true; +} + +bool Preprocess::isEdgeValid(const std::array& v_ids){ + if(v_is_removed[v_ids[0]] || v_is_removed[v_ids[1]]) + return false; + + if(!isHaveCommonEle(conn_fs[v_ids[0]], conn_fs[v_ids[1]])) + return false; + + return true; +} + +bool Preprocess::isEdgeValid(const std::array& v_ids, double old_weight){ + if(v_is_removed[v_ids[0]] || v_is_removed[v_ids[1]]) + return false; + + if(!isHaveCommonEle(conn_fs[v_ids[0]], conn_fs[v_ids[1]])) + return false; + + if(old_weight!=getEdgeLength(v_ids))//the edge is outdated + return false; + + return true; +} + +double Preprocess::getEdgeLength(const std::array& v_ids){ + return (V_in.row(v_ids[0]) - V_in.row(v_ids[1])).squaredNorm(); +} + +double Preprocess::getEdgeLength(int v1_id, int v2_id){ + return (V_in.row(v1_id) - V_in.row(v2_id)).squaredNorm(); +} + +bool Preprocess::isOneRingClean(int v1_id){ + std::vector> n1_es; + for (int f_id:conn_fs[v1_id]) { + for (int j = 0; j < 3; j++) { + if (F_in(f_id, j) != v1_id) { + if (F_in(f_id, (j + 1) % 3) == v1_id) + n1_es.push_back(std::array({{F_in(f_id, j), 0}})); + else if (F_in(f_id, (j + 2) % 3) == v1_id) + n1_es.push_back(std::array({{F_in(f_id, j), 1}})); + } + } + } + if (n1_es.size() % 2 != 0) + return true; + std::sort(n1_es.begin(), n1_es.end()); + for (int i = 0; i < n1_es.size(); i += 2) { + if (n1_es[i][0] == n1_es[i + 1][0] && n1_es[i][1] != n1_es[i + 1][1]); + else + return false; + } + + return true; +} + + +bool Preprocess::isOutEnvelop(const std::unordered_set& new_f_ids, + const GEO::Mesh &geo_sf_mesh, const GEO::MeshFacetsAABBWithEps& geo_face_tree) +{ + size_t num_querried = 0; + size_t num_tris = new_f_ids.size(); + size_t num_samples = 0; + size_t tri_idx = 0; + + static thread_local std::vector ps; + for (int f_id:new_f_ids) { + //sample triangles except one-ring of v1v2 + std::array vs = {{ + GEO::vec3(V_in(F_in(f_id, 0), 0), V_in(F_in(f_id, 0), 1), V_in(F_in(f_id, 0), 2)), + GEO::vec3(V_in(F_in(f_id, 1), 0), V_in(F_in(f_id, 1), 1), V_in(F_in(f_id, 1), 2)), + GEO::vec3(V_in(F_in(f_id, 2), 0), V_in(F_in(f_id, 2), 1), V_in(F_in(f_id, 2), 2))}}; + ps.clear(); + sampleTriangle(vs, ps, state.sampling_dist); + ++tri_idx; + num_samples += ps.size(); + +// logger().debug("ps.size = {}", ps.size()); +// logger().debug("is output samples?"); +// int anw = 0; +// cin >> anw; +// if (anw != 0) { +//// if (true) { +// Eigen::MatrixXd V_tmp(ps.size() * 3 + 3, 3); +// Eigen::MatrixXi F_tmp(ps.size() + 1, 3); +// for (int i = 0; i < 3; i++) { +// for (int j = 0; j < 3; j++) +// V_tmp(i, j) = vs[i][j]; +// F_tmp(0, i) = i; +// } +// +// for (int i = 0; i < ps.size(); i++) { +// for (int k = 0; k < 3; k++) { +// for (int j = 0; j < 3; j++) +// V_tmp((1 + i) * 3 + k, j) = ps[i][j]; +// F_tmp(1 + i, k) = (1 + i) * 3 + k; +// } +// } +// igl::writeSTL(state.working_dir + "_sample.stl", V_tmp, F_tmp); +// } + + +// std::array ls; +// for (int i = 0; i < 3; i++) { +// ls[i] = GEO::length(vs[i] - vs[(i + 1) % 3]); +// } +// auto min_max = std::minmax_element(ls.begin(), ls.end()); +// int min_i = min_max.first - ls.begin(); +// int max_i = min_max.second - ls.begin(); +// +// double n = ls[max_i] / state.sampling_dist; +// if (n <= 1) { +// for (int i = 0; i < 3; i++) +// ps.push_back(vs[i]); +// } else { +// n = int(n) + 1; +// ps.reserve(n + n + 1); +// for (int j = 0; j <= n; j++) { +// ps.push_back(j / n * vs[(min_i + 2) % 3] + (n - j) / n * vs[min_i]); +// if (j == n) +// break; +// ps.push_back(j / n * vs[(min_i + 2) % 3] + (n - j) / n * vs[(min_i + 1) % 3]); +// } +// if (ls[min_i] > state.sampling_dist) { +// const int ps_size = ps.size(); +// for (int i = 0; i < ps_size - 1; i += 2) { +// double m = GEO::length(ps[i] - ps[i + 1]) / state.sampling_dist; +// if (m < 1) +// break; +// m = int(m) + 1; +// for (int j = 1; j < m; j++) +// ps.push_back(j / m * ps[i] + (m - j) / m * ps[i + 1]); +// } +// } +// } + + //check sampling points + GEO::vec3 nearest_point; + double sq_dist = std::numeric_limits::max(); + GEO::index_t prev_facet = GEO::NO_FACET; + + for (const GEO::vec3 ¤t_point:ps) { + if (prev_facet != GEO::NO_FACET) { + get_point_facet_nearest_point(geo_sf_mesh, current_point, prev_facet, nearest_point, sq_dist); + } + if (sq_dist > state.eps_2) { + geo_face_tree.facet_in_envelope_with_hint( + current_point, state.eps_2, prev_facet, nearest_point, sq_dist); + } + ++num_querried; + if (sq_dist > state.eps_2) { + logger().trace("num_triangles {} / {} num_queries {} / {}", + tri_idx - 1, num_tris, num_querried, num_samples); + return true; + } + } + } + logger().trace("num_triangles {} / {} num_queries {} / {}", + tri_idx - 1, num_tris, num_querried, num_samples); + + return false; +} + +bool Preprocess::isPointOutEnvelop(int v_id, const GEO::MeshFacetsAABBWithEps& geo_face_tree){ + if (geo_face_tree.squared_distance(GEO::vec3(V_in(v_id, 0), V_in(v_id, 1), V_in(v_id, 2))) > state.eps_2) + return true; + return false; +} + +int calEuclidean(const std::vector>& fs){ + std::vector> es; + es.reserve(fs.size()*3); + std::unordered_set vs; + for(int i=0;i e={{fs[i][j], fs[i][(j+1)%3]}}; + if(e[0]>e[1]) + e={{e[1], e[0]}}; + es.push_back(e); + } + } + std::sort(es.begin(), es.end()); + es.erase(std::unique(es.begin(), es.end()), es.end()); + + return vs.size()-es.size()+fs.size(); +} + +bool Preprocess::isEuclideanValid(int v1_id, int v2_id){ +// logger().debug("v1:{}", v1_id); +// for (int f_id:conn_fs[v1_id]) { +// logger().debug("{}{}{} {}", F_in(f_id, 0), ' ', F_in(f_id, 1), F_in(f_id, 2)); +// } +// logger().debug("v2:{}", v2_id); +// for (int f_id:conn_fs[v2_id]) { +// logger().debug("{}{}{} {}", F_in(f_id, 0), ' ', F_in(f_id, 1), F_in(f_id, 2)); +// } + + std::vector> fs; + for(int I=0;I<2;I++) { + int v_id = I == 0 ? v1_id : v2_id; + for (int f_id:conn_fs[v_id]) { + if (F_in(f_id, 0) != F_in(f_id, 1) && F_in(f_id, 1) != F_in(f_id, 2) && F_in(f_id, 0) != F_in(f_id, 2)) { + std::array f = {{F_in(f_id, 0), F_in(f_id, 1), F_in(f_id, 2)}}; + std::sort(f.begin(), f.end()); + fs.push_back(f); + } + } + } + std::sort(fs.begin(), fs.end()); + fs.erase(std::unique(fs.begin(), fs.end()), fs.end()); +// logger().debug("fs.size() = {}", fs.size()); + int ec0=calEuclidean(fs); +// logger().debug("{}", ec0); + + std::vector> fs1; + for(int i=0;i f = {{fs[i][0], fs[i][1], fs[i][2]}}; + std::sort(f.begin(), f.end()); + fs1.push_back(f); + } + } + std::sort(fs1.begin(), fs1.end()); + fs1.erase(std::unique(fs1.begin(), fs1.end()), fs1.end()); +// logger().debug("fs1.size() = {}", fs1.size()); + int ec1=calEuclidean(fs1); +// logger().debug("{}", ec1); + +// pausee(); + + if(ec0!=ec1) + return false; + return true; +} + +void Preprocess::outputSurfaceColormap(const GEO::MeshFacetsAABBWithEps& geo_face_tree, const GEO::Mesh& geo_sf_mesh) { + Eigen::VectorXd eps_dis(F_in.rows()); + for(int f_id=0;f_id ps; + std::array vs = {{ + geo_sf_mesh.vertices.point(geo_sf_mesh.facets.vertex(f_id, 0)), + geo_sf_mesh.vertices.point(geo_sf_mesh.facets.vertex(f_id, 1)), + geo_sf_mesh.vertices.point(geo_sf_mesh.facets.vertex(f_id, 2))}}; +// GEO::vec3(V_in(F_in(f_id, 0), 0), V_in(F_in(f_id, 0), 1), V_in(F_in(f_id, 0), 2)), +// GEO::vec3(V_in(F_in(f_id, 1), 0), V_in(F_in(f_id, 1), 1), V_in(F_in(f_id, 1), 2)), +// GEO::vec3(V_in(F_in(f_id, 2), 0), V_in(F_in(f_id, 2), 1), V_in(F_in(f_id, 2), 2))}; + std::array ls; + for (int i = 0; i < 1; i++) { + ls[i] = GEO::length(vs[i] - vs[(i + 1) % 3]); + // + double n = int(ls[i] / state.sampling_dist + 1); + for (int j = 1; j < n; j++) { + ps.push_back(double(j) / n * vs[i] + (n - double(j)) / n * vs[(i + 1) % 3]); + } + // + } +// auto min_max = std::minmax_element(ls.begin(), ls.end()); +// int min_i = min_max.first - ls.begin(); +// int max_i = min_max.second - ls.begin(); +// +// double n = int(ls[max_i] / state.sampling_dist + 1); +// ps.reserve(2*n); +// for (int j = 0; j <= n; j++) { +// ps.push_back(j / n * vs[(min_i + 2) % 3] + (n - j) / n * vs[min_i]); +// ps.push_back(j / n * vs[(min_i + 2) % 3] + (n - j) / n * vs[(min_i + 1) % 3]); +// } + +// if(ls[min_i] > state.sampling_dist) { +// int ps_size = ps.size(); +// for (int i = 0; i < ps_size; i += 2) { +// double m = int(GEO::length(ps[i] - ps[i + 1]) / state.sampling_dist + 1); +// if(m==0) +// break; +// for (int j = 1; j < m; j++) +// ps.push_back(j / m * ps[i] + (m - j) / m * ps[i + 1]); +// } +// } +// ps.push_back(vs[(min_i + 2) % 3]); + for(int i=0;i<3;i++) { + ps.push_back(vs[i]); + } +// ps.push_back((vs[0]+vs[1]+vs[2])/3); + + //check sampling points + GEO::vec3 current_point = ps[0]; + GEO::vec3 nearest_point; + double sq_dist; + GEO::index_t prev_facet = geo_face_tree.nearest_facet(current_point, nearest_point, sq_dist); + + double max_dis = 0; + int cnt=0; + GEO::vec3 pp; + std::vector fs; + for (const GEO::vec3 ¤t_point:ps) { + double dis; + int n_facet = geo_face_tree.nearest_facet(current_point, nearest_point, dis); +// sq_dist = current_point.distance2(nearest_point); +// geo_face_tree.nearest_facet_with_hint(current_point, prev_facet, nearest_point, sq_dist); +// double dis = current_point.distance2(nearest_point); +// if(f_id==2514) +// logger().debug("{}: {} {} {}", cnt, dis, sq_dist, int(prev_facet)); + if (dis > max_dis) { + max_dis = dis; + pp=current_point; + } + cnt++; + fs.push_back(int(n_facet)); + } + cnt = 0; + if(f_id==1681) { + for (const GEO::vec3 &p:ps) { + logger().debug("{}: {}, {}, {}; {}; {}", cnt, p[0], p[1], p[2], fs[cnt], geo_face_tree.squared_distance(p)); + cnt++; + } + } + + eps_dis(f_id) = sqrt(max_dis / state.eps_2); + if(eps_dis(f_id)>1) { + logger().debug("ERROR: simplified input goes outside of the envelop"); + logger().debug("{}", f_id); + logger().debug("{}", eps_dis(f_id)); + logger().debug("{} {}", max_dis, state.eps_2); + cnt = 0; + for (const GEO::vec3 &p:ps) { + logger().debug("{}: {}, {}, {}; {}; {}", cnt, p[0], p[1], p[2], fs[cnt], geo_face_tree.squared_distance(p)); + cnt++; + } +// logger().debug("{}", geo_face_tree.squared_distance(pp)); +// double dd; +// logger().debug("{}", int(geo_face_tree.nearest_facet(pp, nearest_point, dd))); +// logger().debug("{}", dd); + + std::vector vf={{1681, 1675, 1671, 1666}}; + for(int j=0;j v_ids = {{v1_id ,v2_id, v3_id}}; + for (int k = 0; k < 3; k++) { + logger().debug("{}: {} {} {}", v_ids[k], geo_sf_mesh.vertices.point(v_ids[k])[0], geo_sf_mesh.vertices.point(v_ids[k])[1], geo_sf_mesh.vertices.point(v_ids[k])[2]); + } + } + + + double min_dis=0; + int vf_id = 0; + std::vector diss; + GEO::vec3 nearest_p; + double _1, _2, _3; + for(int i=0;i +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 10/12/17. +// + +#ifndef NEW_GTET_PREPROCESS_H +#define NEW_GTET_PREPROCESS_H + +#include +#include +#include +#include +#include +#include +#include + +namespace tetwild { + +class ElementInQueue_sm{ +public: + std::array v_ids; + double weight; + + ElementInQueue_sm(){} + ElementInQueue_sm(const std::array& ids, double w): + v_ids(ids), weight(w){} +}; + +struct cmp_sm { + bool operator()(const ElementInQueue_sm &e1, const ElementInQueue_sm &e2) { + if (e1.weight == e2.weight) + return e1.v_ids < e2.v_ids; + return e1.weight > e2.weight; + } +}; + +class Preprocess { + std::priority_queue, cmp_sm> sm_queue; + int c=0; +public: + State &state; + + Eigen::MatrixXd V_in; + Eigen::MatrixXi F_in; + std::vector v_is_removed; + std::vector f_is_removed; + std::vector> conn_fs; + + Preprocess(State &st) : state(st) { } + + bool init(const Eigen::MatrixXd& V_tmp, const Eigen::MatrixXi& F_tmp, GEO::Mesh& geo_b_mesh, GEO::Mesh& geo_sf_mesh, const Args &args); + + void getBoundaryMesh(GEO::Mesh& b_mesh); + void process(GEO::Mesh& geo_sf_mesh, std::vector& m_vertices, std::vector>& m_faces, const Args &args); + + void simplify(const GEO::Mesh &geo_mesh, const GEO::MeshFacetsAABBWithEps& face_aabb_tree); + void postProcess(const GEO::Mesh &geo_mesh, const GEO::MeshFacetsAABBWithEps& face_aabb_tree); + bool removeAnEdge(int v1_id, int v2_id, const GEO::Mesh &geo_mesh, const GEO::MeshFacetsAABBWithEps& face_aabb_tree); + + void swap(const GEO::Mesh &geo_mesh, const GEO::MeshFacetsAABBWithEps& face_aabb_tree); + double getCosAngle(int v_id, int v1_id, int v2_id); + + double getEdgeLength(const std::array& v_ids); + double getEdgeLength(int v1_id, int v2_id); + + bool isEdgeValid(const std::array& v_ids, double old_weight); + bool isEdgeValid(const std::array& v_ids); + bool isOneRingClean(int v_id); + bool isOutEnvelop(const std::unordered_set& new_f_ids, const GEO::Mesh &geo_mesh, const GEO::MeshFacetsAABBWithEps& face_aabb_tree); + bool isPointOutEnvelop(int v_id, const GEO::MeshFacetsAABBWithEps& face_aabb_tree); + bool isEuclideanValid(int v1_id, int v2_id); + //when call this function, the coordinate of v1 has already been changed + + int ts=0; + std::vector> inf_es; + std::vector inf_e_tss; + std::vector f_tss; + + void outputSurfaceColormap(const GEO::MeshFacetsAABBWithEps& geo_face_tree, const GEO::Mesh& geo_sf_mesh); +}; + +} // namespace tetwild + +#endif //NEW_GTET_PREPROCESS_H diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/Serialization.h b/contrib/NeRF-Editing/TetWild/src/tetwild/Serialization.h new file mode 100644 index 00000000..0823b6d5 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/Serialization.h @@ -0,0 +1,154 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Jeremie Dumas +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Jeremie Dumas on 09/04/18. +// + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +//for serialization +namespace igl { + namespace serialization { + template<> + inline void serialize(const tetwild::Point_3 &p, std::vector &buffer) { + ::igl::serialize(STR(CGAL::exact(p[0])), std::string("x"), buffer); + ::igl::serialize(STR(CGAL::exact(p[1])), std::string("y"), buffer); + ::igl::serialize(STR(CGAL::exact(p[2])), std::string("z"), buffer); + } + + template<> + inline void deserialize(tetwild::Point_3 &p, const std::vector &buffer) { + using namespace tetwild; + std::string s1, s2, s3; + ::igl::deserialize(s1, std::string("x"), buffer); + ::igl::deserialize(s2, std::string("y"), buffer); + ::igl::deserialize(s3, std::string("z"), buffer); + p=Point_3(CGAL_FT(s1), CGAL_FT(s2), CGAL_FT(s3)); + } + + template<> + inline void serialize(const tetwild::Point_3f &p, std::vector &buffer) { + ::igl::serialize(p[0], std::string("x"), buffer); + ::igl::serialize(p[1], std::string("y"), buffer); + ::igl::serialize(p[2], std::string("z"), buffer); + } + + template<> + inline void deserialize(tetwild::Point_3f &p, const std::vector &buffer) { + double x, y, z; + ::igl::deserialize(x, std::string("x"), buffer); + ::igl::deserialize(y, std::string("y"), buffer); + ::igl::deserialize(z, std::string("z"), buffer); + p=tetwild::Point_3f(x, y, z); + } + + template<> + inline void serialize(const std::array &arr, std::vector &buffer) { + for(int i=0;i<3;i++) + ::igl::serialize(arr[i], std::to_string(i), buffer); + } + + template<> + inline void deserialize(std::array &arr, const std::vector &buffer) { + for(int i=0;i<3;i++) + ::igl::deserialize(arr[i], std::to_string(i), buffer); + } + + template<> + inline void serialize(const std::array &arr, std::vector &buffer) { + for(int i=0;i<4;i++) + ::igl::serialize(arr[i], std::to_string(i), buffer); + } + + template<> + inline void deserialize(std::array &arr, const std::vector &buffer) { + for(int i=0;i<4;i++) + ::igl::deserialize(arr[i], std::to_string(i), buffer); + } + template<> + inline void serialize(const tetwild::TetVertex &v, std::vector &buffer) { + ::igl::serialize(v.pos, std::string("pos"), buffer); + ::igl::serialize(v.posf, std::string("posf"), buffer); + + ::igl::serialize(v.is_rounded, std::string("is_rounded"), buffer); + ::igl::serialize(v.is_on_surface, std::string("is_on_surface"), buffer); + ::igl::serialize(v.is_on_bbox, std::string("is_on_bbox"), buffer); + ::igl::serialize(v.is_on_boundary, std::string("is_on_boundary"), buffer); + + ::igl::serialize(v.adaptive_scale, std::string("adaptive_scale"), buffer); + +// ::igl::serialize(v.on_fixed_vertex, std::string("on_fixed_vertex"), buffer); +// std::vector tmp; +// for(auto it=v.on_edge.begin();it!=v.on_edge.end();it++) +// tmp.push_back(*it); +// ::igl::serialize(tmp, std::string("on_edge"), buffer); +// tmp.clear(); +// for(auto it=v.on_face.begin();it!=v.on_face.end();it++) +// tmp.push_back(*it); +// ::igl::serialize(tmp, std::string("on_face"), buffer); +// tmp.clear(); +// for(auto it=v.conn_tets.begin();it!=v.conn_tets.end();it++) +// tmp.push_back(*it); +// ::igl::serialize(tmp, std::string("conn_tets"), buffer); + } + + template<> + inline void deserialize(tetwild::TetVertex &v, const std::vector &buffer) { + ::igl::deserialize(v.pos, std::string("pos"), buffer); + ::igl::deserialize(v.posf, std::string("posf"), buffer); + + ::igl::deserialize(v.is_rounded, std::string("is_rounded"), buffer); + ::igl::deserialize(v.is_on_surface, std::string("is_on_surface"), buffer); + ::igl::deserialize(v.is_on_bbox, std::string("is_on_bbox"), buffer); + ::igl::deserialize(v.is_on_boundary, std::string("is_on_boundary"), buffer); + + ::igl::deserialize(v.adaptive_scale, std::string("adaptive_scale"), buffer); + +// ::igl::deserialize(v.on_fixed_vertex, std::string("on_fixed_vertex"), buffer); +// std::vector tmp; +// ::igl::deserialize(tmp, std::string("on_edge"), buffer); +// for(int i=0;i +// inline void serialize(const TetQuality &q, std::vector &buffer) { +// ::igl::serialize(q.min_d_angle, std::string("min_d_angle"), buffer); +// ::igl::serialize(q.max_d_angle, std::string("max_d_angle"), buffer); +// ::igl::serialize(q.asp_ratio_2, std::string("asp_ratio_2"), buffer); +// ::igl::serialize(q.slim_energy, std::string("slim_energy"), buffer); +// ::igl::serialize(q.volume, std::string("volume"), buffer); +// +// } +// +// template<> +// inline void deserialize(TetQuality &q, const std::vector &buffer) { +// ::igl::deserialize(q.min_d_angle, std::string("min_d_angle"), buffer); +// ::igl::deserialize(q.max_d_angle, std::string("max_d_angle"), buffer); +// ::igl::deserialize(q.asp_ratio_2, std::string("asp_ratio_2"), buffer); +// ::igl::deserialize(q.slim_energy, std::string("slim_energy"), buffer); +// ::igl::deserialize(q.volume, std::string("volume"), buffer); +// +// } + } +} diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/SimpleTetrahedralization.cpp b/contrib/NeRF-Editing/TetWild/src/tetwild/SimpleTetrahedralization.cpp new file mode 100644 index 00000000..01c566d8 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/SimpleTetrahedralization.cpp @@ -0,0 +1,742 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 3/31/17. +// + +#include +#include +#include +#include +#include +#include + +//centroid +#include +#include +#include + +//triangulation +#include +#include +#include +#include +#include +typedef CGAL::Constrained_Delaunay_triangulation_2 CDT; +typedef CDT::Point Point_cdt_2; +typedef CGAL::Polygon_2 Polygon_2; + +//arrangement +#include +#include +#include +#include +typedef CGAL::Arr_segment_traits_2 Traits_2; +typedef Traits_2::Point_2 Point_arr_2; +typedef Traits_2::X_monotone_curve_2 Segment_arr_2; +typedef CGAL::Arrangement_2 Arrangement_2; + +namespace tetwild { + +void SimpleTetrahedralization::tetra(std::vector& tet_vertices, std::vector>& tets) { + std::vector &faces = MC.bsp_faces; + std::vector &vertices = MC.bsp_vertices; + + ///cal arrangement & tetrahedralization + triangulation(tet_vertices, tets); + logger().debug("#v = {} #t = {}", tet_vertices.size(), tets.size()); + + for (int i = 0; i < tets.size(); i++) { + for (int j = 0; j < 4; j++) { + tet_vertices[tets[i][j]].conn_tets.insert(i); + } + } +} + +void SimpleTetrahedralization::triangulation(std::vector& tet_vertices, std::vector>& tets) { + std::vector &bsp_nodes = MC.bsp_nodes; + std::vector &bsp_faces = MC.bsp_faces; + std::vector &bsp_edges = MC.bsp_edges; + std::vector &bsp_vertices = MC.bsp_vertices; + const std::vector& m_vertices=MC.m_vertices; + const std::vector>& m_faces=MC.m_faces; + + igl::Timer tmp_timer; + + tmp_timer.start(); + ///init + m_vertices_size = MC.m_vertices.size(); + + int original_v_size = bsp_vertices.size(); + + ///cal arrangement + for (int i = 0; i < bsp_faces.size(); i++) { + if (bsp_faces[i].div_faces.size() == 0) + continue; + + ///cal arrangement + Plane_3 pln; + constructPlane(i, pln); + MC.initT(pln.orthogonal_vector()); + MC.getOrientedVertices(i); + Polygon_2 poly; + for (int j = 0; j < bsp_faces[i].vertices.size(); j++) { + poly.push_back(MC.to2d(bsp_vertices[bsp_faces[i].vertices[j]])); + } + assert(poly.is_simple()); + + Arrangement_2 arr; + std::vector arr_segs; + for (int j = 0; j < poly.size(); j++) + arr_segs.push_back(Segment_arr_2(poly[j], poly[(j + 1) % poly.size()])); + for (auto it = bsp_faces[i].div_faces.begin(); it != bsp_faces[i].div_faces.end(); it++) { + std::array df_vs; + for (int j = 0; j < 3; j++) + df_vs[j] = MC.to2d(MC.m_vertices[MC.m_faces[*it][j]]); + for (int j = 0; j < 3; j++) { + Line_2 l(df_vs[j], df_vs[(j + 1) % 3]); + int cnt_pos = 0, cnt_neg = 0; + for (int k = 0; k < poly.size(); k++) { + CGAL::Oriented_side side=l.oriented_side(poly[k]); + if(side==CGAL::ON_POSITIVE_SIDE) + cnt_pos++; + if(side==CGAL::ON_NEGATIVE_SIDE) + cnt_neg++; + } + if(cnt_pos>0 && cnt_neg>0) + arr_segs.push_back(Segment_arr_2(df_vs[j], df_vs[(j + 1) % 3])); + } + } + CGAL::insert(arr, arr_segs.begin(), arr_segs.end()); + + std::map vs_arr2bsp; + std::vector> new_v_ids(bsp_faces[i].edges.size(), std::vector()); + std::vector segs(bsp_faces[i].edges.size(), Segment_3()); + for (int j = 0; j < bsp_faces[i].edges.size(); j++) { + segs[j] = Segment_3(bsp_vertices[bsp_edges[bsp_faces[i].edges[j]].vertices[0]], + bsp_vertices[bsp_edges[bsp_faces[i].edges[j]].vertices[1]]); + } + for (auto it = arr.vertices_begin(); it != arr.vertices_end(); it++) { + if (poly.has_on_unbounded_side(it->point())) { + vs_arr2bsp[it->point()] = -1; + continue; + } + auto vit = std::find(poly.vertices_begin(), poly.vertices_end(), it->point());//todo + if (vit != poly.vertices_end()) { + int n = vit - poly.vertices_begin(); + vs_arr2bsp[it->point()] = bsp_faces[i].vertices[n]; + continue; + } + Point_3 p = MC.to3d(it->point(), pln); + int on_e_local_id = -1; + for (int j = 0; j < bsp_faces[i].edges.size(); j++) { + if (segs[j].has_on(p)) { + on_e_local_id = j; + break; + } + } + if (on_e_local_id >= 0) {//if the vertex is on the edges of this bsp_face + bsp_vertices.push_back(p); + vs_arr2bsp[it->point()] = bsp_vertices.size() - 1; + new_v_ids[on_e_local_id].push_back(bsp_vertices.size() - 1); + bsp_faces[i].vertices.push_back(bsp_vertices.size() - 1); + continue; + } + //else + bsp_vertices.push_back(p); + vs_arr2bsp[it->point()] = bsp_vertices.size() - 1; + bsp_faces[i].vertices.push_back(bsp_vertices.size() - 1); + } + + std::vector> es; + for (auto it = arr.edges_begin(); it != arr.edges_end(); it++) { + Point_2 &p1 = it->source()->point(); + Point_2 &p2 = it->target()->point(); + if (vs_arr2bsp[p1] < 0 || vs_arr2bsp[p2] < 0) + continue; + std::array e = {{vs_arr2bsp[p1], vs_arr2bsp[p2]}}; + std::sort(e.begin(), e.end()); + es.push_back(e); + } + std::sort(es.begin(), es.end()); + + std::vector> tmp_es; + int old_e_size = bsp_faces[i].edges.size(); + for (int j = 0; j < old_e_size; j++) { + if (new_v_ids[j].size() == 0) + continue; + std::vector new_es = bsp_edges[bsp_faces[i].edges[j]].vertices; + new_es.insert(new_es.end(), new_v_ids[j].begin(), new_v_ids[j].end()); + std::sort(new_es.begin(), new_es.end(), [&](int a, int b) { + return bsp_vertices[a] < bsp_vertices[b]; + }); + + std::vector new_e_ids; + for (int k = 0; k < new_es.size() - 1; k++) { + BSPEdge new_bsp_e(new_es[k], new_es[k + 1]); + new_bsp_e.conn_faces = bsp_edges[bsp_faces[i].edges[j]].conn_faces; + if (k == 0) + bsp_edges[bsp_faces[i].edges[j]] = new_bsp_e; + else { + bsp_edges.push_back(new_bsp_e); + new_e_ids.push_back(bsp_edges.size() - 1); + } + + std::array e = {{new_es[k], new_es[k + 1]}}; + std::sort(e.begin(), e.end()); + tmp_es.push_back(e); + } + + for (auto it = bsp_edges[bsp_faces[i].edges[j]].conn_faces.begin(); + it != bsp_edges[bsp_faces[i].edges[j]].conn_faces.end(); it++) { + bsp_faces[*it].edges.insert(bsp_faces[*it].edges.end(), new_e_ids.begin(), new_e_ids.end()); + if (*it == i) + continue; + bsp_faces[*it].vertices.insert(bsp_faces[*it].vertices.end(), new_v_ids[j].begin(), new_v_ids[j].end()); + } + } + std::sort(tmp_es.begin(), tmp_es.end()); + + std::vector> diff_es; + std::set_difference(es.begin(), es.end(), tmp_es.begin(), tmp_es.end(), + std::back_inserter(diff_es)); + for (int j = 0; j < diff_es.size(); j++) { + BSPEdge e(diff_es[j][0], diff_es[j][1]); + bsp_edges.push_back(e); + bsp_faces[i].edges.push_back(bsp_edges.size() - 1); + } + } + logger().debug("2D arr {}", tmp_timer.getElapsedTime()); + tmp_timer.start(); + + tet_vertices.reserve(bsp_vertices.size() + bsp_nodes.size()); + for (unsigned int i = 0; i < bsp_vertices.size(); i++) { + TetVertex v(bsp_vertices[i]); + if (i < m_vertices_size) + v.is_on_surface=true; + tet_vertices.push_back(v); + } + + ///improvement + std::vector is_tets(bsp_nodes.size(), false); + std::unordered_map centroids_for_nodes; + tets.reserve(bsp_nodes.size()*6);//approx + for(unsigned int i = 0; i < bsp_nodes.size(); i++) { + BSPtreeNode &node = bsp_nodes[i]; + std::vector v_ids; + for (int j = 0; j < node.faces.size(); j++) { + for (int k = 0; k < bsp_faces[node.faces[j]].vertices.size(); k++) + v_ids.push_back(bsp_faces[node.faces[j]].vertices[k]); + } + std::sort(v_ids.begin(), v_ids.end()); + v_ids.erase(std::unique(v_ids.begin(), v_ids.end()), v_ids.end()); + + bool is_tet = false; + if (bsp_nodes[i].faces.size() == 4) { + is_tet = true; + for (int j = 0; j < bsp_nodes[i].faces.size(); j++) { + if (bsp_faces[bsp_nodes[i].faces[j]].vertices.size() != 3) { + is_tet = false; + break; + } + } + } + + if (is_tet) { + is_tets[i] = true; + std::array t = {{v_ids[0], v_ids[1], v_ids[2], v_ids[3]}}; + if (CGAL::orientation(tet_vertices[t[0]].pos, tet_vertices[t[1]].pos, tet_vertices[t[2]].pos, + tet_vertices[t[3]].pos) != CGAL::POSITIVE) { + int tmp = t[1]; + t[1] = t[3]; + t[3] = tmp; + } + tets.push_back(t); + } else { + TetVertex v; + tet_vertices.push_back(v); + centroids_for_nodes[i] = tet_vertices.size() - 1; + } + } + + logger().debug("improvement {}", tmp_timer.getElapsedTime()); + tmp_timer.start(); + ///cal CDT & insert tets + std::vector>> cdt_faces(bsp_faces.size(), std::vector>()); + CDT cdt; + for (unsigned int i = 0; i < bsp_faces.size(); i++) { + if (bsp_faces[i].vertices.size() == 3) { + cdt_faces[i].push_back(std::array({{bsp_faces[i].vertices[0], bsp_faces[i].vertices[1], + bsp_faces[i].vertices[2]}})); + if (bsp_faces[i].conn_nodes.size() == 1) { + for (int j = 0; j < bsp_faces[i].vertices.size(); j++) + tet_vertices[bsp_faces[i].vertices[j]].is_on_bbox = true; + } + continue; + } + + cdt.clear(); + Plane_3 pln; + constructPlane(i, pln); + MC.initT(pln.orthogonal_vector()); + for (int j = 0; j < bsp_faces[i].edges.size(); j++) { + cdt.insert_constraint(MC.to2d(bsp_vertices[bsp_edges[bsp_faces[i].edges[j]].vertices[0]]), + MC.to2d(bsp_vertices[bsp_edges[bsp_faces[i].edges[j]].vertices[1]])); + } + if(cdt.number_of_vertices() != bsp_faces[i].vertices.size()){ + logger().debug("error: cdt.number_of_vertices() != bsp_faces[i].vertices.size()"); + } + std::map vs_cdt2bsp; + for (int j = 0; j < bsp_faces[i].vertices.size(); j++) { + vs_cdt2bsp[MC.to2d(bsp_vertices[bsp_faces[i].vertices[j]])] = bsp_faces[i].vertices[j];//todo: improve to2d + } + for (CDT::Finite_faces_iterator fit = cdt.finite_faces_begin(); fit != cdt.finite_faces_end(); ++fit) { + cdt_faces[i].push_back(std::array({{vs_cdt2bsp[fit->vertex(0)->point()], + vs_cdt2bsp[fit->vertex(1)->point()], + vs_cdt2bsp[fit->vertex(2)->point()]}})); + if (bsp_faces[i].conn_nodes.size() == 1) { + for (int j = 0; j < bsp_faces[i].vertices.size(); j++) + tet_vertices[bsp_faces[i].vertices[j]].is_on_bbox = true; + } + } + } + + int rounded_cnt = 0; + int all_cnt = 0; + for(unsigned int i=0;i v_ids; + for (int j = 0; j < bsp_nodes[i].faces.size(); j++) { + for (int v_id:bsp_faces[bsp_nodes[i].faces[j]].vertices) { + if (v_id < original_v_size) + v_ids.insert(v_id); + } + } + + std::vector vs; + vs.reserve(v_ids.size()); + for (int v_id:v_ids) + vs.push_back(bsp_vertices[v_id]); + tet_vertices[c_id].pos = CGAL::centroid(vs.begin(), vs.end(), CGAL::Dimension_tag<0>()); + + //insert new tets + int t_cnt = 0; + for (int j = 0; j < bsp_nodes[i].faces.size(); j++) { + for (const std::array &f_ids:cdt_faces[bsp_nodes[i].faces[j]]) { + std::array t = {{c_id, f_ids[0], f_ids[1], f_ids[2]}}; + if (CGAL::orientation(tet_vertices[t[0]].pos, tet_vertices[t[1]].pos, tet_vertices[t[2]].pos, + tet_vertices[t[3]].pos) != CGAL::POSITIVE) { + int tmp = t[1]; + t[1] = t[3]; + t[3] = tmp; + } + tets.push_back(t); + t_cnt++; + } + } + + //round into float + Point_3 old_p = tet_vertices[c_id].pos; + tet_vertices[c_id].posf = Point_3f(CGAL::to_double(old_p[0]), CGAL::to_double(old_p[1]), + CGAL::to_double(old_p[2])); + tet_vertices[c_id].pos = Point_3(tet_vertices[c_id].posf[0], tet_vertices[c_id].posf[1], + tet_vertices[c_id].posf[2]); + int tets_size = tets.size(); + bool is_rounded = true; + for (int j = 0; j < t_cnt; j++) { + if (CGAL::orientation(tet_vertices[tets[tets_size - 1 - j][0]].pos, + tet_vertices[tets[tets_size - 1 - j][1]].pos, + tet_vertices[tets[tets_size - 1 - j][2]].pos, + tet_vertices[tets[tets_size - 1 - j][3]].pos) != CGAL::POSITIVE) { + is_rounded = false; + break; + } + } + + if (is_rounded) { + tet_vertices[c_id].is_rounded = true; + rounded_cnt++; + } else { + tet_vertices[c_id].pos = old_p; + //todo: calculate a new position + } + } + logger().debug("all_cnt = {}", all_cnt); + logger().debug("rounded_cnt = {}", rounded_cnt); + + logger().debug("CDT {}", tmp_timer.getElapsedTime()); +} + +void SimpleTetrahedralization::labelSurface(const std::vector& m_f_tags, const std::vector& m_e_tags, + const std::vector>& conn_e4v, + std::vector& tet_vertices, std::vector>& tets, + std::vector>& is_surface_fs) { + std::vector &bsp_faces = MC.bsp_faces; + std::vector &bsp_vertices = MC.bsp_vertices; + const std::vector &m_vertices = MC.m_vertices; + const std::vector> &m_faces = MC.m_faces; + + std::vector m_triangles; + m_triangles.reserve(m_faces.size()); + for (unsigned int i = 0; i < m_faces.size(); i++) + m_triangles.push_back(Triangle_3(m_vertices[m_faces[i][0]], m_vertices[m_faces[i][1]], + m_vertices[m_faces[i][2]])); + + std::vector> track_on_faces; + track_on_faces.resize(bsp_vertices.size()); + for (unsigned int i = 0; i < m_faces.size(); i++) { + for (int j = 0; j < 3; j++) { +// tet_vertices[centroid_size+m_faces[i][j]].on_face.insert(i); + tet_vertices[m_faces[i][j]].on_face.insert(i); + } + } + + std::vector> track_on_edges = conn_e4v; + track_on_edges.resize(bsp_vertices.size()); + for (unsigned int i = 0; i < m_vertices.size(); i++) { + for (int j = 0; j < track_on_edges[i].size(); j++) { + if (m_e_tags[track_on_edges[i][j]] >= 0) { +// tet_vertices[centroid_size+i].on_edge.insert(m_e_tags[track_on_edges[i][j]]); + tet_vertices[i].on_edge.insert(m_e_tags[track_on_edges[i][j]]); + } + } + } + + for (unsigned int i = 0; i < bsp_faces.size(); i++) { + for (int j = 0; j < bsp_faces[i].vertices.size(); j++) { + int v_id = bsp_faces[i].vertices[j]; + for (auto it = bsp_faces[i].div_faces.begin(); it != bsp_faces[i].div_faces.end(); it++) + track_on_faces[v_id].push_back(*it); + if (bsp_faces[i].matched_f_id >= 0) + track_on_faces[v_id].push_back(bsp_faces[i].matched_f_id); + } + } + for (unsigned int i = 0; i < track_on_faces.size(); i++) { + std::sort(track_on_faces[i].begin(), track_on_faces[i].end()); + track_on_faces[i].erase(std::unique(track_on_faces[i].begin(), track_on_faces[i].end()), + track_on_faces[i].end()); + } + + for (unsigned int i = 0; i < bsp_vertices.size(); i++) { + for (int j = 0; j < track_on_faces[i].size(); j++) { + if (i < m_vertices.size() && +// std::find(tet_vertices[centroid_size+i].on_face.begin(), tet_vertices[centroid_size+i].on_face.end(), +// track_on_faces[i][j]) != tet_vertices[centroid_size+i].on_face.end()) { + std::find(tet_vertices[i].on_face.begin(), tet_vertices[i].on_face.end(), track_on_faces[i][j]) != tet_vertices[i].on_face.end()) { + continue; + } + if (m_triangles[track_on_faces[i][j]].has_on(bsp_vertices[i])) { + //if bsp_vertices[i] is not a vertex of the triangle +// tet_vertices[centroid_size+i].on_face.insert(track_on_faces[i][j]); + tet_vertices[i].on_face.insert(track_on_faces[i][j]); + + ///check on_edge + std::array v_ids = {{m_faces[track_on_faces[i][j]][0], m_faces[track_on_faces[i][j]][1], + m_faces[track_on_faces[i][j]][2]}}; + std::vector e_ids; + bool is_already_on_edge = false; + for (int k = 0; k < 3; k++) { + std::set_intersection(conn_e4v[v_ids[k]].begin(), conn_e4v[v_ids[k]].end(), + conn_e4v[v_ids[(k + 1) % 3]].begin(), conn_e4v[v_ids[(k + 1) % 3]].end(), + std::back_inserter(e_ids)); +// if (std::find(tet_vertices[centroid_size+i].on_edge.begin(), tet_vertices[centroid_size+i].on_edge.end(), +// m_e_tags[e_ids.back()]) != tet_vertices[centroid_size+i].on_edge.end()) { + if (std::find(tet_vertices[i].on_edge.begin(), tet_vertices[i].on_edge.end(), m_e_tags[e_ids.back()]) != tet_vertices[i].on_edge.end()) { + is_already_on_edge = true; + break; + } + } + if (is_already_on_edge) + continue; + + assert(e_ids.size() == 3); + for (int k = 0; k < 3; k++) { + if (m_e_tags[e_ids[k]] < 0) + continue; + Segment_3 seg(m_vertices[v_ids[k]], m_vertices[v_ids[(k + 1) % 3]]); + if (seg.has_on(bsp_vertices[i])) { +// tet_vertices[centroid_size+i].on_edge.insert(m_e_tags[e_ids[k]]); + tet_vertices[i].on_edge.insert(m_e_tags[e_ids[k]]); + break; + } + } + } + } + } + + for(unsigned int i=0;i0) + tet_vertices[i].is_on_surface=true; + } + + ////is face on surface//// + // state.NOT_SURFACE = m_faces.size()+1; + is_surface_fs=std::vector>(tets.size(), + std::array({{state.NOT_SURFACE, state.NOT_SURFACE, state.NOT_SURFACE, state.NOT_SURFACE}})); +// std::vector> is_visited(tets.size(), std::array({{false, false, false, false}})); + + for(unsigned int i = 0; i < tets.size(); i++) { + for (int j = 0; j < 4; j++) { +// if (is_visited[i][j]) +// continue; + + ///mark visited +// int opp_i = getFaceOppoTets(tets[i][(j + 1) % 4], tets[i][(j + 2) % 4], tets[i][(j + 3) % 4], +// i, tet_vertices); +// int opp_i = -1; +// +// int opp_j = 0; +// if (opp_i >= 0) { +// for (int k = 0; k < 4; k++) { +// if (tets[opp_i][k] != tets[i][(j + 1) % 4] && tets[opp_i][k] != tets[i][(j + 2) % 4] +// && tets[opp_i][k] != tets[i][(j + 3) % 4]) { +// opp_j = k; +// break; +// } +// } +// is_visited[opp_i][opp_j] = true; +// } +// is_visited[i][j] = true; + + if (!tet_vertices[tets[i][(j + 1) % 4]].is_on_surface || !tet_vertices[tets[i][(j + 2) % 4]].is_on_surface + || !tet_vertices[tets[i][(j + 3) % 4]].is_on_surface) { + is_surface_fs[i][j] = state.NOT_SURFACE; +// if (opp_i >= 0) +// is_visited[opp_i][opp_j] = state.NOT_SURFACE; + continue; + } + std::unordered_set sf_faces_tmp; + setIntersection(tet_vertices[tets[i][(j + 1) % 4]].on_face, tet_vertices[tets[i][(j + 2) % 4]].on_face, + sf_faces_tmp); + if (sf_faces_tmp.size() == 0) { + is_surface_fs[i][j] = state.NOT_SURFACE; +// if (opp_i >= 0) +// is_visited[opp_i][opp_j] = state.NOT_SURFACE; + continue; + } + std::vector sf_faces; + setIntersection(sf_faces_tmp, tet_vertices[tets[i][(j + 3) % 4]].on_face, sf_faces); + if (sf_faces.size() == 0) { + is_surface_fs[i][j] = state.NOT_SURFACE; +// if (opp_i >= 0) +// is_visited[opp_i][opp_j] = state.NOT_SURFACE; + continue; + } + +// if (tmp.size() > 1) { +// std::array f = {{tets[i][(j + 1) % 4], tets[i][(j + 2) % 4], tets[i][(j + 3) % 4]}}; +// std::sort(f.begin(), f.end()); +// folding_fs.push_back(std::array({{f[0], f[1], f[2], i}})); +// continue; +// } + + ////get the first ori + is_surface_fs[i][j] = 0; + Plane_3 pln(m_vertices[m_faces[sf_faces[0]][0]], m_vertices[m_faces[sf_faces[0]][1]], + m_vertices[m_faces[sf_faces[0]][2]]); + CGAL::Oriented_side side = pln.oriented_side(tet_vertices[tets[i][j]].pos); + + if (side == CGAL::ON_ORIENTED_BOUNDARY) { + log_and_throw("ERROR: side == CGAL::ON_ORIENTED_BOUNDARY!!"); + } + if (side == CGAL::ON_POSITIVE_SIDE)//outside + is_surface_fs[i][j]++; + else//inside + is_surface_fs[i][j]--; + + ////if there are more than one sf_faces + int delta = is_surface_fs[i][j]; + if (sf_faces.size() > 1) { + //cal normal vec for [0] +// Vector_3 nv = CGAL::cross_product( +// m_vertices[m_faces[sf_faces[0]][0]] - m_vertices[m_faces[sf_faces[0]][1]], +// m_vertices[m_faces[sf_faces[0]][1]] - m_vertices[m_faces[sf_faces[0]][2]]); +// Direction_3 dir = nv.direction(); + Direction_3 dir = pln.orthogonal_direction(); + + for (int f_id = 1; f_id < sf_faces.size(); f_id++) { + Vector_3 nv1 = CGAL::cross_product( + m_vertices[m_faces[sf_faces[f_id]][0]] - m_vertices[m_faces[sf_faces[f_id]][1]], + m_vertices[m_faces[sf_faces[f_id]][1]] - m_vertices[m_faces[sf_faces[f_id]][2]]); + Direction_3 dir1 = nv1.direction(); + if (dir1 == dir) + is_surface_fs[i][j] += delta; + else + is_surface_fs[i][j] -= delta; +// else { +// logger().debug("wrong direction!!"); +// pausee(); +// } + } + } + +// for (int tri_id : sf_faces) { +// Plane_3 pln(m_vertices[m_faces[tri_id][0]], m_vertices[m_faces[tri_id][1]], +// m_vertices[m_faces[tri_id][2]]); +// CGAL::Oriented_side side = pln.oriented_side(tet_vertices[tets[i][j]].pos); +// +// if (side == CGAL::ON_ORIENTED_BOUNDARY) { +// log_and_throw("ERROR: side == CGAL::ON_ORIENTED_BOUNDARY!!"); +// } +// if (side == CGAL::ON_POSITIVE_SIDE)//outside +// is_surface_fs[i][j]++; +// else//inside +// is_surface_fs[i][j]--; +// } + +// if (opp_i >= 0) +// is_surface_fs[opp_i][opp_j] = -is_surface_fs[i][j]; + } + } + + //tag the surface + for(unsigned int i=0;i tmp; + for(auto it=tet_vertices[i].on_face.begin();it!=tet_vertices[i].on_face.end();it++) + tmp.insert(m_f_tags[*it]); + tet_vertices[i].on_face=tmp; + } +} + +void SimpleTetrahedralization::labelBbox(std::vector& tet_vertices, std::vector>& tets){ + std::vector &bsp_vertices = MC.bsp_vertices; + + //label bbox + std::vector bbx_f_tags; + std::vector bbx_e_tags; + for (int i = 0; i < 6; i++) + bbx_f_tags.push_back(-(i + 1)); + for (int i = 0; i < 12; i++) + bbx_e_tags.push_back(-(i + 1)); + std::vector bbx_faces={{0, 1, + 2, 3, + 4, 5}}; + std::vector bbx_edges={{0, 1, 2, 3, + 4, 5, 6, 7, + 8, 9, 10, 11}}; + + int i=0; + int i0=0, i7=0; + for (int I = 0; I < tet_vertices.size(); I++) { + if (!tet_vertices[I].is_on_bbox) + continue; + if (i < 8) { + tet_vertices[I].on_fixed_vertex = -2 - i; + + std::bitset<3> a(i); + for (int j = 0; j < 3; j++) { + tet_vertices[I].on_face.insert(bbx_f_tags[bbx_faces[j * 2 + a[j]]]); + tet_vertices[I].on_edge.insert(bbx_e_tags[bbx_edges[j * 4 + a[j] * 2 + a[(j + 1) % 3]]]); + } + } else { + //和bbx_v_ids[0], bbx_v_ids[7]比较 + std::array a = {{-1, -1, -1}}; + for (int j = 0; j < 3; j++) { +// if (bsp_vertices[I - centroid_size][j] == bsp_vertices[i0 - centroid_size][j]) + if (bsp_vertices[I][j] == bsp_vertices[i0][j]) + a[j] = 0; +// else if (bsp_vertices[I - centroid_size][j] == bsp_vertices[i7 - centroid_size][j]) + else if (bsp_vertices[I][j] == bsp_vertices[i7][j]) + a[j] = 1; + } + for (int j = 0; j < 3; j++) { + if (a[j] < 0) + continue; + tet_vertices[I].on_face.insert(bbx_f_tags[bbx_faces[j * 2 + a[j]]]); + if (a[(j + 1) % 3] >= 0) { + tet_vertices[I].on_edge.insert(bbx_e_tags[bbx_edges[j * 4 + a[j] * 2 + a[(j + 1) % 3]]]); + } + } + } + if (i == 0) + i0 = I; + else if (i == 7) + i7 = I; + i++; + } + logger().debug("#v on bbox = {}", i); +} + +void SimpleTetrahedralization::labelBoundary(std::vector& tet_vertices, std::vector>& tets, + const std::vector>& is_surface_fs) { + + std::vector> edges_tmp; + for (int i = 0; i < tets.size(); i++) { + for (int j = 1; j < 4; j++) { + if (tet_vertices[tets[i][0]].is_on_surface && tet_vertices[tets[i][j]].is_on_surface) { + std::array e = {{tets[i][0], tets[i][j]}}; + if (e[1] < e[0]) + e = {{e[1], e[0]}}; + edges_tmp.push_back(e); + } + if (tet_vertices[tets[i][j]].is_on_surface && tet_vertices[tets[i][j % 3 + 1]].is_on_surface) { + std::array e = {{tets[i][j], tets[i][j % 3 + 1]}}; + if (e[1] < e[0]) + e = {{e[1], e[0]}}; + edges_tmp.push_back(e); + } + } + } + std::sort(edges_tmp.begin(), edges_tmp.end()); + edges_tmp.erase(std::unique(edges_tmp.begin(), edges_tmp.end()), edges_tmp.end()); + + for (int i = 0; i < edges_tmp.size(); i++) { + int cnt = 0; + for (int t_id: tet_vertices[edges_tmp[i][0]].conn_tets) { + std::vector opp_js; + for (int j = 0; j < 4; j++) { + if (tets[t_id][j] == edges_tmp[i][0] || tets[t_id][j] == edges_tmp[i][1]) + continue; + opp_js.push_back(j); + } + if (opp_js.size() == 2) { + if (is_surface_fs[t_id][opp_js[0]] != state.NOT_SURFACE) + cnt++; + if (is_surface_fs[t_id][opp_js[1]] != state.NOT_SURFACE) + cnt++; + if (cnt > 2) + break; + } + } + if (cnt == 2) {//is boundary edge + tet_vertices[edges_tmp[i][0]].is_on_boundary = true; + tet_vertices[edges_tmp[i][1]].is_on_boundary = true; + } + } + + int cnt_boundary = 0, cnt_surface = 0; + for (int i = 0; i < tet_vertices.size(); i++) { + if (tet_vertices[i].is_on_boundary) + cnt_boundary++; + if (tet_vertices[i].is_on_surface) + cnt_surface++; + } + logger().debug("{} vertices on boundary", cnt_boundary); + logger().debug("{} vertices on surface", cnt_surface); +} + +void SimpleTetrahedralization::constructPlane(int bsp_f_id, Plane_3& pln) { + pln = Plane_3(MC.bsp_vertices[MC.bsp_faces[bsp_f_id].vertices[0]], + MC.bsp_vertices[MC.bsp_faces[bsp_f_id].vertices[1]], + MC.bsp_vertices[MC.bsp_faces[bsp_f_id].vertices[2]]); + int i = 3; + while (pln.is_degenerate() && i < MC.bsp_faces[bsp_f_id].vertices.size()) { + pln = Plane_3(MC.bsp_vertices[MC.bsp_faces[bsp_f_id].vertices[0]], + MC.bsp_vertices[MC.bsp_faces[bsp_f_id].vertices[1]], + MC.bsp_vertices[MC.bsp_faces[bsp_f_id].vertices[i++]]); + } + assert(!(pln.is_degenerate())); +} + +} // namespace tetwild diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/SimpleTetrahedralization.h b/contrib/NeRF-Editing/TetWild/src/tetwild/SimpleTetrahedralization.h new file mode 100644 index 00000000..9397d030 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/SimpleTetrahedralization.h @@ -0,0 +1,49 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 3/31/17. +// + +#ifndef NEW_GTET_SIMPLETETRAHEDRALIZATION_H +#define NEW_GTET_SIMPLETETRAHEDRALIZATION_H + +#include +#include + +namespace tetwild { + +class SimpleTetrahedralization { +public: + const State & state; + MeshConformer& MC; + std::vector centers; + + //useful infos +// int centroid_size; + int m_vertices_size; +// std::vector is_visited; + + SimpleTetrahedralization(const State &st, MeshConformer& mc) : state(st), MC(mc) { } + + void tetra(std::vector& tet_vertices, std::vector>& tets); + void triangulation(std::vector& tet_vertices, std::vector>& tets); + + void labelSurface(const std::vector& m_f_tags, const std::vector& m_e_tags, + const std::vector>& conn_e4v, + std::vector& tet_vertices, std::vector>& tets, + std::vector>& is_surface_fs); + void labelBbox(std::vector& tet_vertices, std::vector>& tets); + void labelBoundary(std::vector& tet_vertices, std::vector>& tets, + const std::vector>& is_surface_fs); + + void constructPlane(int bsp_f_id, Plane_3& pln); +}; + +} // namespace tetwild + +#endif //NEW_GTET_SIMPLETETRAHEDRALIZATION_H diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/State.cpp b/contrib/NeRF-Editing/TetWild/src/tetwild/State.cpp new file mode 100644 index 00000000..547c02db --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/State.cpp @@ -0,0 +1,48 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Jeremie Dumas +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Jeremie Dumas on 09/04/18. +// + +#include +#include +#include +#include + +namespace tetwild { + +State::State(const Args &args, const Eigen::MatrixXd &V) + : working_dir(args.working_dir) + , postfix(args.postfix) + , stat_file(args.csv_file) + , bbox_diag(igl::bounding_box_diagonal(V)) + , eps_input(bbox_diag * args.eps_rel) + , eps_delta(args.sampling_dist_rel > 0 ? 0 : eps_input / args.stage / std::sqrt(3)) + , initial_edge_len(args.getAbsoluteEdgeLength(bbox_diag)) +{ + if (args.sampling_dist_rel > 0) { + //for testing only + sampling_dist = bbox_diag * args.sampling_dist_rel / 100.0; + eps = bbox_diag * args.eps_rel; + eps_2 = eps * eps; + if (args.stage != 1) { + throw TetWildError("args.stage should be equal to 1."); + } + } else { + // d_err = d/sqrt(3) + sampling_dist = eps_input / args.stage; + eps = eps_input - sampling_dist / std::sqrt(3) * (args.stage + 1 - sub_stage); + eps_2 = eps * eps; + // eps_delta = sampling_dist / std::sqrt(3); + } + + // logger().debug("eps = {}", eps); + // logger().debug("ideal_l = {}", initial_edge_len); +} + +} // namespace tetwild diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/State.h b/contrib/NeRF-Editing/TetWild/src/tetwild/State.h new file mode 100644 index 00000000..0d340223 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/State.h @@ -0,0 +1,122 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Jeremie Dumas +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Jeremie Dumas on 09/04/18. +// + +#pragma once + +#include +#include +#include +#include + +namespace tetwild { + +// Global values computed from the user input +struct State { + const int EPSILON_INFINITE=-2; + const int EPSILON_NA=-1; + const int ENERGY_NA=0; + const int ENERGY_AD=1; + const int ENERGY_AMIPS=2; + const int ENERGY_DIRICHLET=3; + const double MAX_ENERGY = 1e50; + const int NOT_SURFACE = std::numeric_limits::max(); + + // paths used for i/o + const std::string working_dir; + const std::string stat_file; + const std::string postfix; + + double bbox_diag = 0; // bbox diagonal + double eps = 0; // effective epsilon at the current stage (see \hat{\epsilon} in the paper) + double eps_2 = 0; + double sampling_dist = 0; // sampling distance for triangles at the current stage (see d_k p.8 of the paper) + double initial_edge_len = 0; // initial target edge-length defined by the user (the final lengths can be lower, depending on mesh quality and feature size) + bool is_mesh_closed = 0; // open mesh or closed mesh? + + const double eps_input = 0; // target epsilon entered by the user + const double eps_delta = 0; // increment for the envelope at each sub-stage of the mesh optimization (see (3) p.8 of the paper) + int sub_stage = 1; // sub-stage within the stage that tetwild was called with + + /////////////// + // [testing] // + /////////////// + + // Whether to use the max or the total energy when checking improvements in local operations + const bool use_energy_max = true; + + // Use sampling to determine whether a face lies outside the envelope during mesh optimization + // (if false, then only its vertices are tested) + const bool use_sampling = true; + + // Project vertices to the plane of their one-ring instead of the original surface during vertex smoothing + const bool use_onering_projection = false; + + // [debug] + const bool is_print_tmp = false; + + // Set program constants given user parameters and input mesh + State(const Args &args, const Eigen::MatrixXd &V); +}; + + +struct MeshRecord { + enum OpType { + OP_INIT = 0, + OP_PREPROCESSING, + OP_DELAUNEY_TETRA, + OP_DIVFACE_MATCH, + OP_BSP, + OP_SIMPLE_TETRA, + + OP_OPT_INIT, + OP_SPLIT, + OP_COLLAPSE, + OP_SWAP, + OP_SMOOTH, + OP_ADAP_UPDATE, + OP_WN, + OP_UNROUNDED + }; + + int op; + double timing; + int n_v; + int n_t; + double min_min_d_angle = -1; + double avg_min_d_angle = -1; + double max_max_d_angle = -1; + double avg_max_d_angle = -1; + double max_energy = -1; + double avg_energy = -1; + + MeshRecord(int op_, double timing_, int n_v_, int n_t_, double min_min_d_angle_, double avg_min_d_angle_, + double max_max_d_angle_, double avg_max_d_angle_, double max_energy_, double avg_energy_) { + this->op = op_; + this->timing = timing_; + this->n_v = n_v_; + this->n_t = n_t_; + this->min_min_d_angle = min_min_d_angle_; + this->avg_min_d_angle = avg_min_d_angle_; + this->max_max_d_angle = max_max_d_angle_; + this->avg_max_d_angle = avg_max_d_angle_; + this->max_energy = max_energy_; + this->avg_energy = avg_energy_; + } + + MeshRecord(int op_, double timing_, int n_v_, int n_t_) { + this->op = op_; + this->timing = timing_; + this->n_v = n_v_; + this->n_t = n_t_; + } +}; + +} // namespace tetwild diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/TetmeshElements.cpp b/contrib/NeRF-Editing/TetWild/src/tetwild/TetmeshElements.cpp new file mode 100644 index 00000000..3b1690de --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/TetmeshElements.cpp @@ -0,0 +1,48 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Jeremie Dumas +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Jeremie Dumas on 09/10/18. +// + +#include +#include +#include + +namespace tetwild { + +void TetVertex::printInfo() const { + logger().debug("is_on_surface = {}", is_on_surface); + logger().debug("is_on_bbox = {}", is_on_bbox); + logger().debug("conn_tets = {}", conn_tets); +} + +void Stage::serialize(std::string serialize_file) { + igl::serialize(tet_vertices, "tet_vertices", serialize_file, true); + igl::serialize(tets, "tets", serialize_file); + igl::serialize(is_surface_fs, "tets", serialize_file); + igl::serialize(v_is_removed, "v_is_removed", serialize_file); + igl::serialize(t_is_removed, "t_is_removed", serialize_file); + igl::serialize(tet_qualities, "tet_qualities", serialize_file); + + igl::serialize(is_shown, "is_shown", serialize_file); + igl::serialize(resolution, "resolution", serialize_file); +} + +void Stage::deserialize(std::string serialize_file) { + igl::deserialize(tet_vertices, "tet_vertices", serialize_file); + igl::deserialize(tets, "tets", serialize_file); + igl::deserialize(is_surface_fs, "tets", serialize_file); + igl::deserialize(v_is_removed, "v_is_removed", serialize_file); + igl::deserialize(t_is_removed, "t_is_removed", serialize_file); + igl::deserialize(tet_qualities, "tet_qualities", serialize_file); + + igl::deserialize(is_shown, "is_shown", serialize_file); + igl::deserialize(resolution, "resolution", serialize_file); +} + +} // namespace tetwild diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/TetmeshElements.h b/contrib/NeRF-Editing/TetWild/src/tetwild/TetmeshElements.h new file mode 100644 index 00000000..b1997d4d --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/TetmeshElements.h @@ -0,0 +1,140 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by yihu on 8/22/17. +// + +#ifndef NEW_GTET_TETMESHELEMENTS_H +#define NEW_GTET_TETMESHELEMENTS_H + +#include +#include +#include + +namespace tetwild { + +//const int ON_SURFACE_FALSE = 0;//delete +//const int ON_SURFACE_TRUE_INSIDE = 1;//delete +//const int ON_SURFACE_TRUE_OUTSIDE = 2;//delete +class TetVertex { +public: + Point_3 pos;//todo: how to remove it? + + ///for surface conforming + int on_fixed_vertex = -1; + std::unordered_set on_edge;//fixed points can be on more than one edges + std::unordered_set on_face; + bool is_on_surface = false; + + ///for local operations + std::unordered_set conn_tets; + + ///for hybrid rationals + Point_3f posf; + bool is_rounded = false; + + void round() { + posf = Point_3f(CGAL::to_double(pos[0]), CGAL::to_double(pos[1]), CGAL::to_double(pos[2])); + } + + ///for bbox + bool is_on_bbox = false; + + ///for boundary + bool is_on_boundary = false; + + //for adaptive refinement + double adaptive_scale = 1.0; + + TetVertex() = default; + + TetVertex(const Point_3& p) { + pos = p; + } + + void printInfo() const; + + bool is_locked = false; + bool is_inside = false; +}; + +class TetQuality { +public: + double min_d_angle = 0; + double max_d_angle = 0; +// double asp_ratio_2; + + double slim_energy = 0; + double volume = 0; + + TetQuality() = default; + TetQuality(double d_min, double d_max, double r) + : min_d_angle(d_min), max_d_angle(d_max) + { } + +// bool operator < (const TetQuality& tq) { +// if (min_d_angle < tq.min_d_angle) +// return true; +// if (max_d_angle > tq.max_d_angle) +// return true; +// if (asp_ratio_2 > tq.asp_ratio_2) +// return true; +// return false; +// } + + bool isBetterThan(const TetQuality& tq, int energy_type, const State &state) { + if (energy_type == state.ENERGY_AMIPS || energy_type == state.ENERGY_DIRICHLET) { + return slim_energy < tq.slim_energy; + } + else if (energy_type == state.ENERGY_AD) { + return min_d_angle > tq.min_d_angle && max_d_angle < tq.max_d_angle; + } + else + return false; + } + + bool isBetterOrEqualThan(const TetQuality& tq, int energy_type, const State &state) { + if (energy_type == state.ENERGY_AMIPS || energy_type == state.ENERGY_DIRICHLET) { + return slim_energy <= tq.slim_energy; + } + else if (energy_type == state.ENERGY_AD) { + return min_d_angle >= tq.min_d_angle && max_d_angle <= tq.max_d_angle; + } + else + return false; + } +}; + +///for visualization +class Stage { +public: + std::vector tet_vertices; + std::vector> tets; + std::vector> is_surface_fs; + std::vector t_is_removed; + std::vector v_is_removed; + std::vector tet_qualities; + + std::vector is_shown; + double resolution; + + Stage() = default; + Stage(const std::vector& tet_vs, const std::vector>& ts, + const std::vector>& is_sf_fs, + const std::vector& v_is_rd, const std::vector& t_is_rd, const std::vector& tet_qs) + : tet_vertices(tet_vs), tets(ts), is_surface_fs(is_sf_fs) + , v_is_removed(v_is_rd), t_is_removed(t_is_rd), tet_qualities(tet_qs) + { } + + void serialize(std::string serialize_file); + void deserialize(std::string serialize_file); +}; + +} // namespace tetwild + +#endif //NEW_GTET_TETMESHELEMENTS_H diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/VertexSmoother.cpp b/contrib/NeRF-Editing/TetWild/src/tetwild/VertexSmoother.cpp new file mode 100644 index 00000000..499fa873 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/VertexSmoother.cpp @@ -0,0 +1,882 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 4/11/17. +// + +#include +#include +#include +#include + +namespace tetwild { + +void VertexSmoother::smooth() { + tets_tss = std::vector(tets.size(), 1); + tet_vertices_tss = std::vector(tet_vertices.size(), 0); + ts = 1; + + igl::Timer tmp_timer0; + int max_pass = 1; + double v_cnt = std::count(v_is_removed.begin(), v_is_removed.end(), false); + for (int i = 0; i < max_pass; i++) { + double suc_in = 0; + double suc_surface = 0; + smoothSingle(); + suc_in = suc_counter; + if (state.eps >= 0) { + smoothSurface(); + suc_surface = suc_counter; + } + logger().debug("{}", (suc_in + suc_surface) / v_cnt); + if (suc_in + suc_surface < v_cnt * 0.1) { + logger().debug("{}", i); + break; + } + } + for (int i = 0; i < breakdown_timing.size(); i++) { + logger().debug("{}: {}s", breakdown_name[i], breakdown_timing[i]); + breakdown_timing[i] = 0;//reset + } +} + +bool VertexSmoother::smoothSingleVertex(int v_id, bool is_cal_energy){ + std::vector> new_tets; + std::vector t_ids; + for (int t_id:tet_vertices[v_id].conn_tets) { + new_tets.push_back(tets[t_id]); + t_ids.push_back(t_id); + } + + ///try to round the vertex + if(!tet_vertices[v_id].is_rounded) { + Point_3 old_p = tet_vertices[v_id].pos; + tet_vertices[v_id].pos = Point_3(tet_vertices[v_id].posf[0], tet_vertices[v_id].posf[1], + tet_vertices[v_id].posf[2]); + if(isFlip(new_tets)) + tet_vertices[v_id].pos = old_p; + else + tet_vertices[v_id].is_rounded = true; + } + + ///check if should use exact smoothing + bool is_valid = true; + for (auto it = tet_vertices[v_id].conn_tets.begin(); it != tet_vertices[v_id].conn_tets.end(); it++) { + CGAL::Orientation ori = CGAL::orientation(tet_vertices[tets[*it][0]].posf, tet_vertices[tets[*it][1]].posf, + tet_vertices[tets[*it][2]].posf, tet_vertices[tets[*it][3]].posf); + if (ori != CGAL::POSITIVE) { + is_valid = false; + break; + } + } + if (!is_valid) { + return false; + } else { + Point_3f pf; + if(energy_type == state.ENERGY_AMIPS) { + if (!NewtonsMethod(t_ids, new_tets, v_id, pf)) + return false; + } + + //assign new coordinate and try to round it + Point_3 old_p = tet_vertices[v_id].pos; + Point_3f old_pf = tet_vertices[v_id].posf; + bool old_is_rounded = tet_vertices[v_id].is_rounded; + Point_3 p = Point_3(pf[0], pf[1], pf[2]); + tet_vertices[v_id].pos = p; + tet_vertices[v_id].posf = pf; + tet_vertices[v_id].is_rounded = true; + if (isFlip(new_tets)) {//TODO: why it happens? + logger().debug("flip in the end"); + tet_vertices[v_id].pos = old_p; + tet_vertices[v_id].posf = old_pf; + tet_vertices[v_id].is_rounded = old_is_rounded; + } + } + + if(is_cal_energy){ + std::vector tet_qs; + calTetQualities(new_tets, tet_qs); + int cnt = 0; + for (int t_id:tet_vertices[v_id].conn_tets) { + tet_qualities[t_id] = tet_qs[cnt++]; + } + } + + return true; +} + +void VertexSmoother::smoothSingle() { + double old_ts = ts; + counter = 0; + suc_counter = 0; + for (int v_id = 0; v_id < tet_vertices.size(); v_id++) { + if (v_is_removed[v_id]) + continue; + if (tet_vertices[v_id].is_on_bbox) + continue; + if (state.eps != state.EPSILON_INFINITE && tet_vertices[v_id].is_on_surface) + continue; + + if (tet_vertices[v_id].is_locked) + continue; + + ///check if its one-ring is changed +// bool is_changed=false; +// for(auto it=tet_vertices[v_id].conn_tets.begin();it!=tet_vertices[v_id].conn_tets.end();it++){ +// if(tets_tss[*it]>tet_vertices_tss[v_id]){ +// is_changed=true; +// break; +// } +// } +// if(!is_changed) +// continue; + + counter++; + +#if TIMING_BREAKDOWN + igl_timer.start(); +#endif + std::vector> new_tets; + std::vector t_ids; + for (auto it = tet_vertices[v_id].conn_tets.begin(); it != tet_vertices[v_id].conn_tets.end(); it++) { + new_tets.push_back(tets[*it]); + t_ids.push_back(*it); + } + + ///try to round the vertex + if (!tet_vertices[v_id].is_rounded) { + Point_3 old_p = tet_vertices[v_id].pos; + tet_vertices[v_id].pos = Point_3(tet_vertices[v_id].posf[0], tet_vertices[v_id].posf[1], + tet_vertices[v_id].posf[2]); + if (isFlip(new_tets)) + tet_vertices[v_id].pos = old_p; + else + tet_vertices[v_id].is_rounded = true; + } + + ///check if should use exact smoothing + bool is_valid = true; + for (auto it = tet_vertices[v_id].conn_tets.begin(); it != tet_vertices[v_id].conn_tets.end(); it++) { + CGAL::Orientation ori = CGAL::orientation(tet_vertices[tets[*it][0]].posf, tet_vertices[tets[*it][1]].posf, + tet_vertices[tets[*it][2]].posf, tet_vertices[tets[*it][3]].posf); + if (ori != CGAL::POSITIVE) { + is_valid = false; + break; + } + } +#if TIMING_BREAKDOWN + breakdown_timing[id_round] += igl_timer.getElapsedTime(); +#endif + + if (!is_valid) { + continue; + } else { + Point_3f pf; + if (energy_type == state.ENERGY_AMIPS) { + if (!NewtonsMethod(t_ids, new_tets, v_id, pf)) + continue; + } +#if TIMING_BREAKDOWN + igl_timer.start(); +#endif + //assign new coordinate and try to round it + Point_3 old_p = tet_vertices[v_id].pos; + Point_3f old_pf = tet_vertices[v_id].posf; + bool old_is_rounded = tet_vertices[v_id].is_rounded; + Point_3 p = Point_3(pf[0], pf[1], pf[2]); + tet_vertices[v_id].pos = p; + tet_vertices[v_id].posf = pf; + tet_vertices[v_id].is_rounded = true; + if (isFlip(new_tets)) {//TODO: why it happens? + logger().debug("flip in the end"); + tet_vertices[v_id].pos = old_p; + tet_vertices[v_id].posf = old_pf; + tet_vertices[v_id].is_rounded = old_is_rounded; + } +#if TIMING_BREAKDOWN + breakdown_timing[id_round] += igl_timer.getElapsedTime(); +#endif + } + + ///update timestamps + ts++; + for(auto it=tet_vertices[v_id].conn_tets.begin();it!=tet_vertices[v_id].conn_tets.end();it++) + tets_tss[*it]=ts; + tet_vertices_tss[v_id]=ts; + + suc_counter++; + } + + //calculate the quality for all tets + std::vector> new_tets;//todo: can be improve + new_tets.reserve(std::count(t_is_removed.begin(), t_is_removed.end(), false)); + for (int i = 0; i < tets.size(); i++) { + if (t_is_removed[i]) + continue; +// if(tets_tss[i]<=old_ts) +// continue; + new_tets.push_back(tets[i]); + } + std::vector tet_qs; +#if TIMING_BREAKDOWN + igl_timer.start(); +#endif + calTetQualities(new_tets, tet_qs); +#if TIMING_BREAKDOWN + breakdown_timing[id_value_e] += igl_timer.getElapsedTime(); +#endif + int cnt = 0; + for (int i = 0; i < tets.size(); i++) { + if (t_is_removed[i]) + continue; +// if(tets_tss[i]<=old_ts) +// continue; + tet_qualities[i] = tet_qs[cnt++]; + } +} + +void VertexSmoother::smoothSurface() {//smoothing surface using two methods +// suc_counter = 0; +// counter = 0; + int sf_suc_counter = 0; + int sf_counter = 0; + + for (int v_id = 0; v_id < tet_vertices.size(); v_id++) { + if (v_is_removed[v_id]) + continue; + if (!tet_vertices[v_id].is_on_surface) + continue; + + if (tet_vertices[v_id].is_locked) + continue; + + if (isIsolated(v_id)) { + tet_vertices[v_id].is_on_surface = false; + tet_vertices[v_id].is_on_boundary = false; + tet_vertices[v_id].on_fixed_vertex = -1; + tet_vertices[v_id].on_face.clear(); + tet_vertices[v_id].on_edge.clear(); + continue; + } + if (!isBoundaryPoint(v_id)) + tet_vertices[v_id].is_on_boundary = false; + + counter++; + sf_counter++; + + std::vector> new_tets; + std::vector old_t_ids; + for (auto it = tet_vertices[v_id].conn_tets.begin(); it != tet_vertices[v_id].conn_tets.end(); it++) { + new_tets.push_back(tets[*it]); + old_t_ids.push_back(*it); + } + + if (!tet_vertices[v_id].is_rounded) { + Point_3 old_p = tet_vertices[v_id].pos; + tet_vertices[v_id].pos = Point_3(tet_vertices[v_id].posf[0], tet_vertices[v_id].posf[1], + tet_vertices[v_id].posf[2]); + if (isFlip(new_tets)) + tet_vertices[v_id].pos = old_p; + else + tet_vertices[v_id].is_rounded = true; + } + + bool is_valid = true; + for (auto it = tet_vertices[v_id].conn_tets.begin(); it != tet_vertices[v_id].conn_tets.end(); it++) { + CGAL::Orientation ori = CGAL::orientation(tet_vertices[tets[*it][0]].posf, tet_vertices[tets[*it][1]].posf, + tet_vertices[tets[*it][2]].posf, tet_vertices[tets[*it][3]].posf); + if (ori != CGAL::POSITIVE) { + is_valid = false; + break; + } + } + + Point_3 p_out; + Point_3f pf_out; + if (!is_valid) { + continue; + } else { + if (energy_type == state.ENERGY_AMIPS) { + if (!NewtonsMethod(old_t_ids, new_tets, v_id, pf_out)) + continue; + } + p_out = Point_3(pf_out[0], pf_out[1], pf_out[2]); + } + + ///find one-ring surface faces +#if TIMING_BREAKDOWN + igl_timer.start(); +#endif + std::vector> tri_ids; + for (auto it = tet_vertices[v_id].conn_tets.begin(); it != tet_vertices[v_id].conn_tets.end(); it++) { + for (int j = 0; j < 4; j++) { + if (tets[*it][j] != v_id && is_surface_fs[*it][j] != state.NOT_SURFACE) { + std::array tri = {{tets[*it][(j + 1) % 4], tets[*it][(j + 2) % 4], tets[*it][(j + 3) % 4]}}; + std::sort(tri.begin(), tri.end()); + tri_ids.push_back(tri); + } + } + } + std::sort(tri_ids.begin(), tri_ids.end()); + tri_ids.erase(std::unique(tri_ids.begin(), tri_ids.end()), tri_ids.end()); + + Point_3f pf; + Point_3 p; + if (state.use_onering_projection) {//we have to use exact construction here. Or the projecting points may be not exactly on the plane. + std::vector tris; + for (int i = 0; i < tri_ids.size(); i++) { + tris.push_back(Triangle_3(tet_vertices[tri_ids[i][0]].pos, tet_vertices[tri_ids[i][1]].pos, + tet_vertices[tri_ids[i][2]].pos)); + } + + is_valid = false; + for (int i = 0; i < tris.size(); i++) { + if (tris[i].is_degenerate()) + continue; + Plane_3 pln = tris[i].supporting_plane(); + p = pln.projection(p_out); + if (tris[i].has_on(p)) { + is_valid = true; + break; + } + } + if (!is_valid) + continue; + pf = Point_3f(CGAL::to_double(p[0]), CGAL::to_double(p[1]), CGAL::to_double(p[2])); + p = Point_3(pf[0], pf[1], pf[2]); + } else { + GEO::vec3 geo_pf(pf_out[0], pf_out[1], pf_out[2]); + GEO::vec3 nearest_pf; + double _; + if (tet_vertices[v_id].is_on_boundary) + geo_b_tree.nearest_facet(geo_pf, nearest_pf, _); + else + geo_sf_tree.nearest_facet(geo_pf, nearest_pf, _); + pf = Point_3f(nearest_pf[0], nearest_pf[1], nearest_pf[2]); + p = Point_3(nearest_pf[0], nearest_pf[1], nearest_pf[2]); + } +#if TIMING_BREAKDOWN + breakdown_timing[id_project] += igl_timer.getElapsedTime(); +#endif + + Point_3 old_p = tet_vertices[v_id].pos; + Point_3f old_pf = tet_vertices[v_id].posf; + std::vector tet_qs; + bool is_found = false; + + tet_vertices[v_id].posf = pf; + tet_vertices[v_id].pos = p; + if (isFlip(new_tets)) { + tet_vertices[v_id].pos = old_p; + tet_vertices[v_id].posf = old_pf; + continue; + } + TetQuality old_tq, new_tq; + getCheckQuality(old_t_ids, old_tq); + calTetQualities(new_tets, tet_qs); + getCheckQuality(tet_qs, new_tq); + if (!new_tq.isBetterThan(old_tq, energy_type, state)) { + tet_vertices[v_id].pos = old_p; + tet_vertices[v_id].posf = old_pf; + continue; + } + is_found = true; + + if (!is_found) { + tet_vertices[v_id].pos = old_p; + tet_vertices[v_id].posf = old_pf; + continue; + } + +#if TIMING_BREAKDOWN + igl_timer.start(); +#endif + ///check if the boundary is sliding + if (tet_vertices[v_id].is_on_boundary) { + if (isBoundarySlide(v_id, -1, old_pf)) { + tet_vertices[v_id].pos = old_p; + tet_vertices[v_id].posf = old_pf; +#if TIMING_BREAKDOWN + breakdown_timing[id_aabb] += igl_timer.getElapsedTime(); +#endif + continue; + } + } + + ///check if tris outside the envelop + std::vector trisf; + for (int i = 0; i < tri_ids.size(); i++) { + auto jt = std::find(tri_ids[i].begin(), tri_ids[i].end(), v_id); + int k = jt - tri_ids[i].begin(); + Triangle_3f tri(Point_3f(CGAL::to_double(p[0]), CGAL::to_double(p[1]), CGAL::to_double(p[2])), + tet_vertices[tri_ids[i][(k + 1) % 3]].posf, tet_vertices[tri_ids[i][(k + 2) % 3]].posf); + if (!tri.is_degenerate()) + trisf.push_back(tri); + } + + is_valid = true; + for (int i = 0; i < trisf.size(); i++) { + if (isFaceOutEnvelop(trisf[i])) { + is_valid = false; + break; + } + } +#if TIMING_BREAKDOWN + breakdown_timing[id_aabb] += igl_timer.getElapsedTime(); +#endif + if (!is_valid) { + tet_vertices[v_id].pos = old_p; + tet_vertices[v_id].posf = old_pf; + continue; + } + + ///real update + ///update timestamps + ts++; + for (auto it = tet_vertices[v_id].conn_tets.begin(); it != tet_vertices[v_id].conn_tets.end(); it++) + tets_tss[*it] = ts; + tet_vertices_tss[v_id] = ts; + + if (!tet_vertices[v_id].is_rounded) { + tet_vertices[v_id].pos = Point_3(pf[0], pf[1], pf[2]); + if (isFlip(new_tets)) { + tet_vertices[v_id].pos = old_p; + tet_vertices[v_id].is_rounded = false; + } else + tet_vertices[v_id].is_rounded = true; + } + for (int i = 0; i < old_t_ids.size(); i++) + tet_qualities[old_t_ids[i]] = tet_qs[i]; + + suc_counter++; + sf_suc_counter++; + if (sf_suc_counter % 1000 == 0) + logger().debug("1000 accepted!"); + } + logger().debug("Totally {}({}) vertices on surface are smoothed.", sf_suc_counter, sf_counter); +} + +bool VertexSmoother::NewtonsMethod(const std::vector& t_ids, const std::vector>& new_tets, + int v_id, Point_3f& p) { +// bool is_moved=true; + bool is_moved = false; + const int MAX_STEP = 15; + const int MAX_IT = 20; + Point_3f pf0 = tet_vertices[v_id].posf; + Point_3 p0 = tet_vertices[v_id].pos; + + double old_energy = 0; + Eigen::Vector3d J; + Eigen::Matrix3d H; + Eigen::Vector3d X0; + for (int step = 0; step < MAX_STEP; step++) { + if (NewtonsUpdate(t_ids, v_id, old_energy, J, H, X0) == false) + break; + Point_3f old_pf = tet_vertices[v_id].posf; + Point_3 old_p = tet_vertices[v_id].pos; + double a = 1; + bool step_taken = false; + double new_energy; + + for (int it = 0; it < MAX_IT; it++) { + //solve linear system + //check flip + //check energy + igl_timer.start(); + Eigen::Vector3d X = H.colPivHouseholderQr().solve(H * X0 - a * J); + breakdown_timing[id_solve] += igl_timer.getElapsedTime(); + if (!X.allFinite()) { + a /= 2.0; + continue; + } + + tet_vertices[v_id].posf = Point_3f(X(0), X(1), X(2)); + tet_vertices[v_id].pos = Point_3(X(0), X(1), X(2)); +// tet_vertices[v_id].is_rounded=true;//need to remember old value? + + //check flipping + if (isFlip(new_tets)) { + tet_vertices[v_id].posf = old_pf; + tet_vertices[v_id].pos = old_p; + a /= 2.0; + continue; + } + + //check quality + igl_timer.start(); + new_energy = getNewEnergy(t_ids); + breakdown_timing[id_value_e] += igl_timer.getElapsedTime(); + if (new_energy >= old_energy || std::isinf(new_energy) || std::isnan(new_energy)) { + tet_vertices[v_id].posf = old_pf; + tet_vertices[v_id].pos = old_p; + a /= 2.0; + continue; + } + + step_taken = true; + break; + } + if (std::abs(new_energy - old_energy) < 1e-5) + step_taken = false; + + if (!step_taken) { + if (step == 0) + is_moved = false; + else + is_moved = true; + break; + } else + is_moved = true; + } + p = tet_vertices[v_id].posf; + tet_vertices[v_id].posf = pf0; + tet_vertices[v_id].pos = p0; + + return is_moved; +} + +double VertexSmoother::getNewEnergy(const std::vector& t_ids) { + double s_energy = 0; + +#ifdef TETWILD_WITH_ISPC + int n = t_ids.size(); + + static thread_local std::vector T0; + static thread_local std::vector T1; + static thread_local std::vector T2; + static thread_local std::vector T3; + static thread_local std::vector T4; + static thread_local std::vector T5; + static thread_local std::vector T6; + static thread_local std::vector T7; + static thread_local std::vector T8; + static thread_local std::vector T9; + static thread_local std::vector T10; + static thread_local std::vector T11; + static thread_local std::vector energy; + + if (T0.empty()) { + // logger().trace("Initial ISPC allocation: n = {}", n); + } else if (T0.size() != n) { + // logger().trace("ISPC reallocation: n = {}", n); + } + + T0.resize(n); + T1.resize(n); + T2.resize(n); + T3.resize(n); + T4.resize(n); + T5.resize(n); + T6.resize(n); + T7.resize(n); + T8.resize(n); + T9.resize(n); + T10.resize(n); + T11.resize(n); + energy.resize(n); + + for (int i = 0; i < n; i++) { + T0[i] = tet_vertices[tets[t_ids[i]][0]].posf[0]; + T1[i] = tet_vertices[tets[t_ids[i]][0]].posf[1]; + T2[i] = tet_vertices[tets[t_ids[i]][0]].posf[2]; + T3[i] = tet_vertices[tets[t_ids[i]][1]].posf[0]; + T4[i] = tet_vertices[tets[t_ids[i]][1]].posf[1]; + T5[i] = tet_vertices[tets[t_ids[i]][1]].posf[2]; + T6[i] = tet_vertices[tets[t_ids[i]][2]].posf[0]; + T7[i] = tet_vertices[tets[t_ids[i]][2]].posf[1]; + T8[i] = tet_vertices[tets[t_ids[i]][2]].posf[2]; + T9[i] = tet_vertices[tets[t_ids[i]][3]].posf[0]; + T10[i] = tet_vertices[tets[t_ids[i]][3]].posf[1]; + T11[i] = tet_vertices[tets[t_ids[i]][3]].posf[2]; + } + + ispc::energy_ispc(T0.data(), T1.data(), T2.data(), T3.data(), T4.data(), + T5.data(), T6.data(), T7.data(), T8.data(), + T9.data(), T10.data(), T11.data(), energy.data(), n); + + for (int i = 0; i < n; i++) { + s_energy += energy[i]; //s_energy intialized in the beginning + } +#else + for (int i = 0; i < t_ids.size(); i++) { + std::array t; + for (int j = 0; j < 4; j++) { + for (int k = 0; k < 3; k++) { + t[j*3 + k] = tet_vertices[tets[t_ids[i]][j]].posf[k]; + } + } + if (energy_type == state.ENERGY_AMIPS) { + s_energy += comformalAMIPSEnergy_new(t.data()); + } + } +#endif + if (std::isinf(s_energy) || std::isnan(s_energy) || s_energy <= 0 || s_energy > state.MAX_ENERGY) { + logger().debug("new E inf"); + s_energy = state.MAX_ENERGY; + } + + return s_energy; +} + +bool VertexSmoother::NewtonsUpdate(const std::vector& t_ids, int v_id, + double& energy, Eigen::Vector3d& J, Eigen::Matrix3d& H, Eigen::Vector3d& X0) { + energy = 0; + for (int i = 0; i < 3; i++) { + J(i) = 0; + for (int j = 0; j < 3; j++) { + H(i, j) = 0; + } + X0(i) = tet_vertices[v_id].posf[i]; + } + + for (int i = 0; i < t_ids.size(); i++) { + std::array t; + int start = 0; + for (int j = 0; j < 4; j++) { + if (tets[t_ids[i]][j] == v_id) { + start = j; + break; + } + } + for (int j = 0; j < 4; j++) { + for (int k = 0; k < 3; k++) { + t[j*3+k] = tet_vertices[tets[t_ids[i]][(start + j) % 4]].posf[k]; + } + } +#ifndef TETWILD_WITH_ISPC + igl_timer.start(); + energy += comformalAMIPSEnergy_new(t.data()); + breakdown_timing[id_value_e] += igl_timer.getElapsedTime(); +#endif + + double J_1[3]; + double H_1[9]; + igl_timer.start(); + comformalAMIPSJacobian_new(t.data(), J_1); + breakdown_timing[id_value_j] += igl_timer.getElapsedTime(); + igl_timer.start(); + comformalAMIPSHessian_new(t.data(), H_1); + breakdown_timing[id_value_h] += igl_timer.getElapsedTime(); + + for (int j = 0; j < 3; j++) { + J(j) += J_1[j]; + H(j, 0) += H_1[j * 3 + 0]; + H(j, 1) += H_1[j * 3 + 1]; + H(j, 2) += H_1[j * 3 + 2]; + } + } +#ifdef TETWILD_WITH_ISPC + igl_timer.start(); + energy = getNewEnergy(t_ids); + breakdown_timing[id_value_e] += igl_timer.getElapsedTime(); +#endif + + if (std::isinf(energy)) { + logger().debug("{} E inf", v_id); + energy = state.MAX_ENERGY; + } + if (std::isnan(energy)) { + logger().debug("{} E nan", v_id); + return false; + } + if (energy <= 0) { + logger().debug("{} E < 0", v_id); + return false; + } + if (!J.allFinite()) { + logger().debug("{} J inf/nan", v_id); + return false; + } + if (!H.allFinite()) { + logger().debug("{} H inf/nan", v_id); + return false; + } + + return true; +} + +int VertexSmoother::laplacianBoundary(const std::vector& b_v_ids, const std::vector& tmp_is_on_surface, + const std::vector& tmp_t_is_removed){ + int cnt_suc = 0; + double max_slim_evergy = 0; + for(unsigned int i=0;i max_slim_evergy) + max_slim_evergy = tet_qualities[i].slim_energy; + } + + for(int v_id:b_v_ids){ + // do laplacian on v_id + std::vector> new_tets; + std::unordered_set n_v_ids; +// std::unordered_set n_v_ids2; + std::unordered_set tmp_n_sf_v_ids; + std::unordered_set n_sf_v_ids; + for(int t_id:tet_vertices[v_id].conn_tets){ + for(int j=0;j<4;j++){ + if(tmp_is_on_surface[tets[t_id][j]]) { + tmp_n_sf_v_ids.insert(tets[t_id][j]); + continue; + } + if(!tet_vertices[tets[t_id][j]].is_on_surface) + n_v_ids.insert(tets[t_id][j]); + } + new_tets.push_back(tets[t_id]); + } + for(int n_sf_v_id:tmp_n_sf_v_ids){ + std::vector t_ids; + setIntersection(tet_vertices[v_id].conn_tets, tet_vertices[n_sf_v_id].conn_tets, t_ids); + bool has_removed = false; + bool has_unremoved = false; + for(int t_id:t_ids){ + if(tmp_t_is_removed[t_id]) + has_removed=true; + if(!tmp_t_is_removed[t_id]) + has_unremoved=true; + } + if(has_removed && has_unremoved) + n_sf_v_ids.insert(n_sf_v_id); + } +// for(int n_v_id:n_v_ids){ +// for(int t_id:tet_vertices[n_v_id].conn_tets){ +// for(int j=0;j<4;j++) +// if(!tmp_is_on_surface[tets[t_id][j]] && !tet_vertices[tets[t_id][j]].is_on_surface) +// n_v_ids2.insert(tets[t_id][j]); +// } +// } + std::array vec ={{0, 0, 0}}; + for(int n_sf_v_id:n_sf_v_ids) { + for (int j = 0; j < 3; j++) + vec[j] += tet_vertices[n_sf_v_id].posf[j]; + } + for(int j=0;j<3;j++) { + vec[j] = (vec[j] / n_sf_v_ids.size()) - tet_vertices[v_id].posf[j]; + } + + // do bisection and check flipping + Point_3 old_p = tet_vertices[v_id].pos; + Point_3f old_pf = tet_vertices[v_id].posf; + double a = 1; + bool is_suc = false; + while(true) { + //give stop condition + bool is_stop = true; + for (int j = 0; j < 3; j++) + if (vec[j] * a > state.eps) + is_stop = false; + if (is_stop) + break; + tet_vertices[v_id].pos = Point_3(old_pf[0] + vec[0] * a, old_pf[1] + vec[1] * a, old_pf[2] + vec[2] * a); + tet_vertices[v_id].posf = Point_3f(old_pf[0] + vec[0] * a, old_pf[1] + vec[1] * a, old_pf[2] + vec[2] * a); + if (isFlip(new_tets)) { + a /= 2; + continue; + } + //check quality + std::vector tet_qs; + calTetQualities(new_tets, tet_qs); + bool is_valid=true; + for (int i = 0; i < tet_qs.size(); i++) { + if (tet_qs[i].slim_energy > max_slim_evergy) + is_valid=false; + } + if(!is_valid) { + a /= 2; + continue; + } + + int cnt = 0; + for (int t_id:tet_vertices[v_id].conn_tets) { + tet_qualities[t_id] = tet_qs[cnt++]; + } + + is_suc = true; + cnt_suc++; + break; + } + if(!is_suc) { + tet_vertices[v_id].pos = old_p; + tet_vertices[v_id].posf = old_pf; + continue; + } + + std::vector tet_qs; + calTetQualities(new_tets, tet_qs); + int cnt = 0; + for (int t_id:tet_vertices[v_id].conn_tets) { + tet_qualities[t_id] = tet_qs[cnt++]; + } + + // do normal smoothing on neighbor vertices +// logger().debug("n_v_ids.size = {}", n_v_ids.size()); +// logger().debug("n_v_ids2.size = {}", n_v_ids2.size()); + for(int n_v_id:n_v_ids){ + smoothSingleVertex(n_v_id, true); + } +// for(int n_v_id:n_v_ids2){ +// smoothSingleVertex(n_v_id, true); +// } +// for(int n_v_id:n_v_ids){ +// smoothSingleVertex(n_v_id, true); +// } + } + + logger().debug("suc.size = {}", cnt_suc); + return cnt_suc; +} + +void VertexSmoother::outputOneRing(int v_id, std::string s){ + PyMesh::MshSaver mSaver(state.working_dir+state.postfix+"_smooth_"+std::to_string(v_id)+s+".msh", true); + std::vector v_ids; + std::vector new_ids(tet_vertices.size(), -1); + for(int t_id: tet_vertices[v_id].conn_tets){ + for(int j=0;j<4;j++) + v_ids.push_back(tets[t_id][j]); + } + std::sort(v_ids.begin(), v_ids.end()); + v_ids.erase(std::unique(v_ids.begin(), v_ids.end()), v_ids.end()); + int cnt=0; + for(int id:v_ids){ + new_ids[id] = cnt; + cnt++; + } + + Eigen::VectorXd oV(v_ids.size() * 3); + Eigen::VectorXi oT(tet_vertices[v_id].conn_tets.size() * 4); + for (int i = 0; i < v_ids.size(); i++) { + for (int j = 0; j < 3; j++) + oV(i * 3 + j) = tet_vertices[v_ids[i]].posf[j]; + } + cnt = 0; + for (int t_id: tet_vertices[v_id].conn_tets) { + for (int j = 0; j < 4; j++) { + oT(cnt * 4 + j) = new_ids[tets[t_id][j]]; + } + cnt++; + } + + mSaver.save_mesh(oV, oT, 3, mSaver.TET); + + Eigen::VectorXd cv(v_ids.size()); + for(int i=0;i +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 4/11/17. +// + +#ifndef NEW_GTET_VERTEXSMOOTHER_H +#define NEW_GTET_VERTEXSMOOTHER_H + +#include + +namespace tetwild { + +class VertexSmoother:public LocalOperations { +public: + VertexSmoother(LocalOperations lo): LocalOperations(lo){} + + void smooth(); + void smoothSingle(); + bool smoothSingleVertex(int v_id, bool is_cal_energy); + void smoothSurface(); + + bool NewtonsMethod(const std::vector& t_ids, const std::vector>& new_tets, int v_id, Point_3f& p); + bool NewtonsUpdate(const std::vector& t_ids, int v_id, double& energy, Eigen::Vector3d& J, Eigen::Matrix3d& H, Eigen::Vector3d& X0); + double getNewEnergy(const std::vector& t_ids); + + int ts; + std::vector tets_tss; + std::vector tet_vertices_tss; + + void outputOneRing(int v_i, std::string s); + //for postprocessing + int laplacianBoundary(const std::vector& b_v_ids, const std::vector& tmp_is_on_surface, + const std::vector& tmp_t_is_removed); + + int id_value_e=0; + int id_value_j=1; + int id_value_h=2; + int id_solve=3; + int id_aabb=4; + int id_project = 5; + int id_round = 6; + std::array breakdown_timing={{0,0,0,0,0,0,0}}; + std::array breakdown_name={{"Computing E", "Computing J", "Computing H", "Solving linear system", "AABB", "Project", "Rounding"}}; + igl::Timer igl_timer; +}; + +} // namespace tetwild + +#endif //NEW_GTET_VERTEXSMOOTHER_H diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/geogram/mesh_AABB.cpp b/contrib/NeRF-Editing/TetWild/src/tetwild/geogram/mesh_AABB.cpp new file mode 100644 index 00000000..502e1c73 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/geogram/mesh_AABB.cpp @@ -0,0 +1,576 @@ +/* + * Copyright (c) 2012-2014, Bruno Levy + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of the ALICE Project-Team nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * If you modify this software, you should include a notice giving the + * name of the person performing the modification, the date of modification, + * and the reason for such modification. + * + * Contact: Bruno Levy + * + * Bruno.Levy@inria.fr + * http://www.loria.fr/~levy + * + * ALICE Project + * LORIA, INRIA Lorraine, + * Campus Scientifique, BP 239 + * 54506 VANDOEUVRE LES NANCY CEDEX + * FRANCE + * + */ + +#include +#include +#include +#include +#include +#include + +namespace { + + using namespace GEO; + + /** + * \brief Computes the axis-aligned bounding box of a mesh facet. + * \param[in] M the mesh + * \param[out] B the bounding box of the facet + * \param[in] f the index of the facet in mesh \p M + */ + void get_facet_bbox( + const Mesh& M, Box& B, index_t f + ) { + index_t c = M.facets.corners_begin(f); + const double* p = M.vertices.point_ptr(M.facet_corners.vertex(c)); + for(coord_index_t coord = 0; coord < 3; ++coord) { + B.xyz_min[coord] = p[coord]; + B.xyz_max[coord] = p[coord]; + } + for(++c; c < M.facets.corners_end(f); ++c) { + p = M.vertices.point_ptr(M.facet_corners.vertex(c)); + for(coord_index_t coord = 0; coord < 3; ++coord) { + B.xyz_min[coord] = std::min(B.xyz_min[coord], p[coord]); + B.xyz_max[coord] = std::max(B.xyz_max[coord], p[coord]); + } + } + } + + /** + * \brief Computes the maximum node index in a subtree + * \param[in] node_index node index of the root of the subtree + * \param[in] b first facet index in the subtree + * \param[in] e one position past the last facet index in the subtree + * \return the maximum node index in the subtree rooted at \p node_index + */ + index_t max_node_index(index_t node_index, index_t b, index_t e) { + geo_debug_assert(e > b); + if(b + 1 == e) { + return node_index; + } + index_t m = b + (e - b) / 2; + index_t childl = 2 * node_index; + index_t childr = 2 * node_index + 1; + return std::max( + max_node_index(childl, b, m), + max_node_index(childr, m, e) + ); + } + + /** + * \brief Computes the hierarchy of bounding boxes recursively. + * \details This function is generic and can be used to compute + * a bbox hierarchy of arbitrary elements. + * \param[in] M the mesh + * \param[in] bboxes the array of bounding boxes + * \param[in] node_index the index of the root of the subtree + * \param[in] b first element index in the subtree + * \param[in] e one position past the last element index in the subtree + * \param[in] get_bbox a function that computes the bbox of an element + * \tparam GET_BBOX a function (or a functor) with the following arguments: + * - mesh: a const reference to the mesh + * - box: a reference where the computed bounding box of the element + * will be stored + * - element: the index of the element + */ + template + void init_bboxes_recursive( + const Mesh& M, vector& bboxes, + index_t node_index, + index_t b, index_t e, + const GET_BBOX& get_bbox + ) { + geo_debug_assert(node_index < bboxes.size()); + geo_debug_assert(b != e); + if(b + 1 == e) { + get_bbox(M, bboxes[node_index], b); + return; + } + index_t m = b + (e - b) / 2; + index_t childl = 2 * node_index; + index_t childr = 2 * node_index + 1; + geo_debug_assert(childl < bboxes.size()); + geo_debug_assert(childr < bboxes.size()); + init_bboxes_recursive(M, bboxes, childl, b, m, get_bbox); + init_bboxes_recursive(M, bboxes, childr, m, e, get_bbox); + geo_debug_assert(childl < bboxes.size()); + geo_debug_assert(childr < bboxes.size()); + bbox_union(bboxes[node_index], bboxes[childl], bboxes[childr]); + } + + /** + * \brief Finds the nearest point in a mesh facet from a query point. + * \param[in] M the mesh + * \param[in] p the query point + * \param[in] f index of the facet in \p M + * \param[out] nearest_p the point of facet \p f nearest to \p p + * \param[out] squared_dist the squared distance between + * \p p and \p nearest_p + * \pre the mesh \p M is triangulated + */ + void get_point_facet_nearest_point( + const Mesh& M, + const vec3& p, + index_t f, + vec3& nearest_p, + double& squared_dist + ) { + geo_debug_assert(M.facets.nb_vertices(f) == 3); + index_t c = M.facets.corners_begin(f); + const vec3& p1 = Geom::mesh_vertex(M, M.facet_corners.vertex(c)); + ++c; + const vec3& p2 = Geom::mesh_vertex(M, M.facet_corners.vertex(c)); + ++c; + const vec3& p3 = Geom::mesh_vertex(M, M.facet_corners.vertex(c)); + double lambda1, lambda2, lambda3; // barycentric coords, not used. + squared_dist = Geom::point_triangle_squared_distance( + p, p1, p2, p3, nearest_p, lambda1, lambda2, lambda3 + ); + } + + /** + * \brief Computes the squared distance between a point and a Box. + * \param[in] p the point + * \param[in] B the box + * \return the squared distance between \p p and \p B + * \pre p is inside B + */ + double inner_point_box_squared_distance( + const vec3& p, + const Box& B + ) { + geo_debug_assert(B.contains(p)); + double result = geo_sqr(p[0] - B.xyz_min[0]); + result = std::min(result, geo_sqr(p[0] - B.xyz_max[0])); + for(coord_index_t c = 1; c < 3; ++c) { + result = std::min(result, geo_sqr(p[c] - B.xyz_min[c])); + result = std::min(result, geo_sqr(p[c] - B.xyz_max[c])); + } + return result; + } + + /** + * \brief Computes the squared distance between a point and a Box + * with negative sign if the point is inside the Box. + * \param[in] p the point + * \param[in] B the box + * \return the signed squared distance between \p p and \p B + */ + double point_box_signed_squared_distance( + const vec3& p, + const Box& B + ) { + bool inside = true; + double result = 0.0; + for(coord_index_t c = 0; c < 3; c++) { + if(p[c] < B.xyz_min[c]) { + inside = false; + result += geo_sqr(p[c] - B.xyz_min[c]); + } else if(p[c] > B.xyz_max[c]) { + inside = false; + result += geo_sqr(p[c] - B.xyz_max[c]); + } + } + if(inside) { + result = -inner_point_box_squared_distance(p, B); + } + return result; + } + + /** + * \brief Computes the squared distance between a point and the + * center of a box. + * \param[in] p the point + * \param[in] B the box + * \return the squared distance between \p p and the center of \p B + */ + double point_box_center_squared_distance( + const vec3& p, const Box& B + ) { + double result = 0.0; + for(coord_index_t c = 0; c < 3; ++c) { + double d = p[c] - 0.5 * (B.xyz_min[c] + B.xyz_max[c]); + result += geo_sqr(d); + } + return result; + } + + /** + * \brief Tests whether a segment intersects a triangle. + * \param[in] q1 , q2 the two extremities of the segment. + * \param[in] p1 , p2 , p3 the three vertices of the triangle. + * \retval true if [q1,q2] has an intersection with (p1, p2, p3). + * \retval false otherwise. + */ + bool segment_triangle_intersection( + const vec3& q1, const vec3& q2, + const vec3& p1, const vec3& p2, const vec3& p3 + ) { + + // If the segment does not straddle the supporting plane of the + // triangle, then there is no intersection. + vec3 N = cross(p2-p1, p3-p1); + if(dot(q1-p1,N)*dot(q2-p1,N) > 0.0) { + return false; + } + + // The three tetrahedra formed by the segment and the three edges + // of the triangle should have the same sign, else there is no + // intersection. + int s1 = geo_sgn(Geom::tetra_signed_volume(q1,q2,p1,p2)); + int s2 = geo_sgn(Geom::tetra_signed_volume(q1,q2,p2,p3)); + if(s1 != s2) { + return false; + } + int s3 = geo_sgn(Geom::tetra_signed_volume(q1,q2,p3,p1)); + return (s2 == s3); + } + + /** + * \brief Tests whether there is an intersection between a segment + * and a mesh facet. + * \param[in] q1 , q2 the extremities of the segment + * \param[in] M the mesh + * \param[in] f the facet + */ + bool segment_mesh_facet_intersection( + const vec3& q1, const vec3& q2, + const Mesh& M, + index_t f + ) { + geo_debug_assert(M.facets.nb_vertices(f) == 3); + index_t c = M.facets.corners_begin(f); + const vec3& p1 = Geom::mesh_vertex(M, M.facet_corners.vertex(c)); + ++c; + const vec3& p2 = Geom::mesh_vertex(M, M.facet_corners.vertex(c)); + ++c; + const vec3& p3 = Geom::mesh_vertex(M, M.facet_corners.vertex(c)); + return segment_triangle_intersection(q1, q2, p1, p2, p3); + } + + /** + * \brief Tests whether a segment intersects a box. + * \param[in] q1 , q2 the two extremities of the segment. + * \param[in] box the box. + * \retval true if [q1,q2] intersects the box. + * \retval false otherwise. + */ + bool segment_box_intersection( + const vec3& q1, const vec3& q2, const Box& box + ) { + // Ref: https://www.gamedev.net/forums/topic/338987-aabb---line-segment-intersection-test/ + vec3 d( + 0.5*(q2.x - q1.x), + 0.5*(q2.y - q1.y), + 0.5*(q2.z - q1.z) + ); + + vec3 e( + 0.5*(box.xyz_max[0] - box.xyz_min[0]), + 0.5*(box.xyz_max[1] - box.xyz_min[1]), + 0.5*(box.xyz_max[2] - box.xyz_min[2]) + ); + + vec3 c( + q1.x + d.x - 0.5*(box.xyz_min[0] + box.xyz_max[0]), + q1.y + d.y - 0.5*(box.xyz_min[1] + box.xyz_max[1]), + q1.z + d.z - 0.5*(box.xyz_min[2] + box.xyz_max[2]) + ); + + vec3 ad(fabs(d.x), fabs(d.y), fabs(d.z)); + + if (fabs(c[0]) > e[0] + ad[0]) { + return false; + } + + if (fabs(c[1]) > e[1] + ad[1]) { + return false; + } + + if (fabs(c[2]) > e[2] + ad[2]) { + return false; + } + + if (fabs(d[1] * c[2] - d[2] * c[1]) > e[1] * ad[2] + e[2] * ad[1]) { + return false; + } + + if (fabs(d[2] * c[0] - d[0] * c[2]) > e[2] * ad[0] + e[0] * ad[2]) { + return false; + } + + if (fabs(d[0] * c[1] - d[1] * c[0]) > e[0] * ad[1] + e[1] * ad[0]) { + return false; + } + + return true; + } +} + +/****************************************************************************/ + +namespace GEO { + + MeshFacetsAABBWithEps::MeshFacetsAABBWithEps( + Mesh& M, bool reorder + ) : + mesh_(M) { + if(!M.facets.are_simplices()) { + mesh_repair( + M, + MeshRepairMode( + MESH_REPAIR_TRIANGULATE | MESH_REPAIR_QUIET + ) + ); + } + if(reorder) { + mesh_reorder(mesh_, MESH_ORDER_MORTON); + } + bboxes_.resize( + max_node_index( + 1, 0, mesh_.facets.nb() + ) + 1 // <-- this is because size == max_index + 1 !!! + ); + init_bboxes_recursive( + mesh_, bboxes_, 1, 0, mesh_.facets.nb(), get_facet_bbox + ); + } + + void MeshFacetsAABBWithEps::get_nearest_facet_hint( + const vec3& p, + index_t& nearest_f, vec3& nearest_point, double& sq_dist + ) const { + + // Find a good initial value for nearest_f by traversing + // the boxes and selecting the child such that the center + // of its bounding box is nearer to the query point. + // For a large mesh (20M facets) this gains up to 10% + // performance as compared to picking nearest_f randomly. + index_t b = 0; + index_t e = mesh_.facets.nb(); + index_t n = 1; + while(e != b + 1) { + index_t m = b + (e - b) / 2; + index_t childl = 2 * n; + index_t childr = 2 * n + 1; + if( + point_box_center_squared_distance(p, bboxes_[childl]) < + point_box_center_squared_distance(p, bboxes_[childr]) + ) { + e = m; + n = childl; + } else { + b = m; + n = childr; + } + } + nearest_f = b; + + index_t v = mesh_.facet_corners.vertex( + mesh_.facets.corners_begin(nearest_f) + ); + nearest_point = Geom::mesh_vertex(mesh_, v); + sq_dist = Geom::distance2(p, nearest_point); + } + + void MeshFacetsAABBWithEps::nearest_facet_recursive( + const vec3& p, + index_t& nearest_f, vec3& nearest_point, double& sq_dist, + index_t n, index_t b, index_t e + ) const { + geo_debug_assert(e > b); + + // If node is a leaf: compute point-facet distance + // and replace current if nearer + if(b + 1 == e) { + vec3 cur_nearest_point; + double cur_sq_dist; + get_point_facet_nearest_point( + mesh_, p, b, cur_nearest_point, cur_sq_dist + ); + if(cur_sq_dist < sq_dist) { + nearest_f = b; + nearest_point = cur_nearest_point; + sq_dist = cur_sq_dist; + } + return; + } + index_t m = b + (e - b) / 2; + index_t childl = 2 * n; + index_t childr = 2 * n + 1; + + double dl = point_box_signed_squared_distance(p, bboxes_[childl]); + double dr = point_box_signed_squared_distance(p, bboxes_[childr]); + + // Traverse the "nearest" child first, so that it has more chances + // to prune the traversal of the other child. + if(dl < dr) { + if(dl < sq_dist) { + nearest_facet_recursive( + p, + nearest_f, nearest_point, sq_dist, + childl, b, m + ); + } + if(dr < sq_dist) { + nearest_facet_recursive( + p, + nearest_f, nearest_point, sq_dist, + childr, m, e + ); + } + } else { + if(dr < sq_dist) { + nearest_facet_recursive( + p, + nearest_f, nearest_point, sq_dist, + childr, m, e + ); + } + if(dl < sq_dist) { + nearest_facet_recursive( + p, + nearest_f, nearest_point, sq_dist, + childl, b, m + ); + } + } + } + + void MeshFacetsAABBWithEps::facet_in_envelope_recursive( + const vec3& p, double sq_epsilon, + index_t& nearest_f, vec3& nearest_point, double& sq_dist, + index_t n, index_t b, index_t e + ) const { + geo_debug_assert(e > b); + + if (sq_dist <= sq_epsilon) { + return; + } + + // If node is a leaf: compute point-facet distance + // and replace current if nearer + if(b + 1 == e) { + vec3 cur_nearest_point; + double cur_sq_dist; + get_point_facet_nearest_point( + mesh_, p, b, cur_nearest_point, cur_sq_dist + ); + if(cur_sq_dist < sq_dist) { + nearest_f = b; + nearest_point = cur_nearest_point; + sq_dist = cur_sq_dist; + } + return; + } + index_t m = b + (e - b) / 2; + index_t childl = 2 * n; + index_t childr = 2 * n + 1; + + double dl = point_box_signed_squared_distance(p, bboxes_[childl]); + double dr = point_box_signed_squared_distance(p, bboxes_[childr]); + + // Traverse the "nearest" child first, so that it has more chances + // to prune the traversal of the other child. + if(dl < dr) { + if(dl < sq_dist && dl <= sq_epsilon) { + facet_in_envelope_recursive( + p, sq_epsilon, + nearest_f, nearest_point, sq_dist, + childl, b, m + ); + } + if(dr < sq_dist && dr <= sq_epsilon) { + facet_in_envelope_recursive( + p, sq_epsilon, + nearest_f, nearest_point, sq_dist, + childr, m, e + ); + } + } else { + if(dr < sq_dist && dr <= sq_epsilon) { + facet_in_envelope_recursive( + p, sq_epsilon, + nearest_f, nearest_point, sq_dist, + childr, m, e + ); + } + if(dl < sq_dist && dl <= sq_epsilon) { + facet_in_envelope_recursive( + p, sq_epsilon, + nearest_f, nearest_point, sq_dist, + childl, b, m + ); + } + } + } + + + bool MeshFacetsAABBWithEps::segment_intersection(const vec3& q1, const vec3& q2) const { + return segment_intersection_recursive(q1, q2, 1, 0, mesh_.facets.nb()); + } + + bool MeshFacetsAABBWithEps::segment_intersection_recursive( + const vec3& q1, const vec3& q2, index_t n, index_t b, index_t e + ) const { + if(!segment_box_intersection(q1, q2, bboxes_[n])) { + return false; + } + if(b + 1 == e) { + return segment_mesh_facet_intersection(q1, q2, mesh_, b); + } + index_t m = b + (e - b) / 2; + index_t childl = 2 * n; + index_t childr = 2 * n + 1; + return ( + segment_intersection_recursive(q1, q2, childl, b, m) || + segment_intersection_recursive(q1, q2, childr, m, e) + ); + } + +/****************************************************************************/ + +} + diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/geogram/mesh_AABB.h b/contrib/NeRF-Editing/TetWild/src/tetwild/geogram/mesh_AABB.h new file mode 100644 index 00000000..39ec16a2 --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/geogram/mesh_AABB.h @@ -0,0 +1,423 @@ +/* + * Copyright (c) 2012-2014, Bruno Levy + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of the ALICE Project-Team nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * If you modify this software, you should include a notice giving the + * name of the person performing the modification, the date of modification, + * and the reason for such modification. + * + * Contact: Bruno Levy + * + * Bruno.Levy@inria.fr + * http://www.loria.fr/~levy + * + * ALICE Project + * LORIA, INRIA Lorraine, + * Campus Scientifique, BP 239 + * 54506 VANDOEUVRE LES NANCY CEDEX + * FRANCE + * + */ + +#pragma once +/** + * \file mesh_AABB.h + * \brief Axis Aligned Bounding Box trees for accelerating + * geometric queries that operate on a Mesh. + */ + +#include +#include +#include + +namespace GEO { + + /** + * \brief Axis Aligned Bounding Box tree of mesh facets. + * \details Used to quickly compute facet intersection and + * to locate the nearest facet from 3d query points. + */ + class GEOGRAM_API MeshFacetsAABBWithEps { + public: + /** + * \brief Creates the Axis Aligned Bounding Boxes tree. + * \param[in] M the input mesh. It can be modified, + * and will be triangulated (if + * not already a triangular mesh). The facets are + * re-ordered (using Morton's order, see mesh_reorder()). + * \param[in] reorder if not set, Morton re-ordering is + * skipped (but it means that mesh_reorder() was previously + * called else the algorithm will be pretty unefficient). + * \pre M.facets.are_simplices() + */ + MeshFacetsAABBWithEps(Mesh& M, bool reorder = true); + + /** + * \brief Computes all the pairs of intersecting facets. + * \param[in] action ACTION::operator(index_t,index_t) is + * invoked of all pairs of facets that have overlapping + * bounding boxes. triangles_intersection() needs to be + * called to detect the actual intersections. + * \tparam ACTION user action class, that needs to define + * operator(index_t,index_t), where the two indices are + * the indices each pair of triangles that have intersecting + * bounding boxes. + */ + template + void compute_facet_bbox_intersections( + ACTION& action + ) const { + intersect_recursive( + action, + 1, 0, mesh_.facets.nb(), + 1, 0, mesh_.facets.nb() + ); + } + + + /** + * \brief Computes all the intersections between a given + * box and the bounding boxes of all the facets. + * \param[in] action ACTION::operator(index_t) is + * invoked for all facets that have a bounding + * box that intersects \p box_in. + * \tparam ACTION user action class, that needs to define + * operator(index_t), where the parameter is the index + * of the triangle that has its bounding box intersecting + * \p box_in. + */ + template< class ACTION > + void compute_bbox_facet_bbox_intersections( + const Box& box_in, + ACTION& action + ) const { + bbox_intersect_recursive( + action, box_in, 1, 0, mesh_.facets.nb() + ); + } + + /** + * \brief Finds the nearest facet from an arbitrary 3d query point. + * \param[in] p query point + * \param[out] nearest_point nearest point on the surface + * \param[out] sq_dist squared distance between p and the surface. + * \return the index of the facet nearest to point p. + */ + index_t nearest_facet( + const vec3& p, vec3& nearest_point, double& sq_dist + ) const { + index_t nearest_facet; + get_nearest_facet_hint(p, nearest_facet, nearest_point, sq_dist); + nearest_facet_recursive( + p, + nearest_facet, nearest_point, sq_dist, + 1, 0, mesh_.facets.nb() + ); + return nearest_facet; + } + + /** + * \brief Computes the nearest point and nearest facet from + * a query point, using user-specified hint. + * + * \details The hint is specified as reasonable initial values of + * (nearest_facet, nearest_point, sq_dist). If multiple queries + * are done on a set of points that has spatial locality, + * the hint can be the result of the previous call. + * + * \param[in] p query point + * \param[in,out] nearest_facet the nearest facet so far, + * or NO_FACET if not known yet + * \param[in,out] nearest_point a point in nearest_facet + * \param[in,out] sq_dist squared distance between p and + * nearest_point + * \note On entry, \p sq_dist needs to be equal to the squared + * distance between \p p and \p nearest_point (it is easy to + * forget to update it when calling it within a loop). + */ + void nearest_facet_with_hint( + const vec3& p, + index_t& nearest_facet, vec3& nearest_point, double& sq_dist + ) const { + if(nearest_facet == NO_FACET) { + get_nearest_facet_hint( + p, nearest_facet, nearest_point, sq_dist + ); + } + nearest_facet_recursive( + p, + nearest_facet, nearest_point, sq_dist, + 1, 0, mesh_.facets.nb() + ); + } + + /* + * Finds the nearest facet on the surface, but stops early if a + * point within a given distance is found. + */ + index_t facet_in_envelope( + const vec3& p, double sq_epsilon, vec3& nearest_point, double& sq_dist + ) const { + index_t nearest_facet; + get_nearest_facet_hint(p, nearest_facet, nearest_point, sq_dist); + facet_in_envelope_recursive( + p, sq_epsilon, + nearest_facet, nearest_point, sq_dist, + 1, 0, mesh_.facets.nb() + ); + return nearest_facet; + } + + /* + * Same as before, but stops as soon as a point on the surface in + * within a given distance bound from the triangle mesh. + */ + void facet_in_envelope_with_hint( + const vec3& p, double sq_epsilon, + index_t& nearest_facet, vec3& nearest_point, double& sq_dist + ) const { + if(nearest_facet == NO_FACET) { + get_nearest_facet_hint( + p, nearest_facet, nearest_point, sq_dist + ); + } + facet_in_envelope_recursive( + p, sq_epsilon, + nearest_facet, nearest_point, sq_dist, + 1, 0, mesh_.facets.nb() + ); + } + + /** + * \brief Computes the distance between an arbitrary 3d query + * point and the surface. + * \param[in] p query point + * \return the squared distance between \p p and the surface. + */ + double squared_distance(const vec3& p) const { + vec3 nearest_point; + double result; + nearest_facet(p, nearest_point, result); + return result; + } + + /** + * \brief Tests whether this surface mesh has an intersection + * with a segment. + * \param[in] q1 , q2 the two extremities of the segment. + * \retval true if there exists an intersection between [q1 , q2] + * and a facet of the mesh. + * \retval false otherwise. + */ + bool segment_intersection(const vec3& q1, const vec3& q2) const; + + protected: + + + /** + * \brief Computes all the facets that have a bbox that + * intersects a given bbox in a sub-tree of the AABB tree. + * + * Note that the tree structure is completely implicit, + * therefore the bounds of the (continuous) facet indices + * sequences that correspond to the facets contained + * in the two nodes are sent as well as the node indices. + * + * \param[in] action ACTION::operator(index_t) is + * invoked for all facet that has a bounding box that + * overlaps \p box. + * \param[in] node index of the first node of the AABB tree + * \param[in] b index of the first facet in \p node + * \param[in] e one position past the index of the last + * facet in \p node + */ + template + void bbox_intersect_recursive( + ACTION& action, + const Box& box, + index_t node, index_t b, index_t e + ) const { + geo_debug_assert(e != b); + + // Prune sub-tree that does not have intersection + if(!bboxes_overlap(box, bboxes_[node])) { + return; + } + + // Leaf case + if(e == b+1) { + action(b); + return; + } + + // Recursion + index_t m = b + (e - b) / 2; + index_t node_l = 2 * node; + index_t node_r = 2 * node + 1; + + bbox_intersect_recursive(action, box, node_l, b, m); + bbox_intersect_recursive(action, box, node_r, m, e); + } + + /** + * \brief Computes all the pairs of intersecting facets + * for two sub-trees of the AABB tree. + * + * Note that the tree structure is completely implicit, + * therefore the bounds of the (continuous) facet indices + * sequences that correspond to the facets contained + * in the two nodes are sent as well as the node indices. + * + * \param[in] action ACTION::operator(index_t,index_t) is + * invoked of all pairs of facets that have overlapping + * bounding boxes. + * \param[in] node1 index of the first node of the AABB tree + * \param[in] b1 index of the first facet in \p node1 + * \param[in] e1 one position past the index of the last + * facet in \p node1 + * \param[in] node2 index of the second node of the AABB tree + * \param[in] b2 index of the first facet in \p node2 + * \param[in] e2 one position past the index of the second + * facet in \p node2 + */ + template + void intersect_recursive( + ACTION& action, + index_t node1, index_t b1, index_t e1, + index_t node2, index_t b2, index_t e2 + ) const { + geo_debug_assert(e1 != b1); + geo_debug_assert(e2 != b2); + + // Since we are intersecting the AABBTree with *itself*, + // we can prune half of the cases by skipping the test + // whenever node2's facet index interval is greated than + // node1's facet index interval. + if(e2 <= b1) { + return; + } + + // The acceleration is here: + if(!bboxes_overlap(bboxes_[node1], bboxes_[node2])) { + return; + } + + // Simple case: leaf - leaf intersection. + if(b1 + 1 == e1 && b2 + 1 == e2) { + action(b1, b2); + return; + } + + // If node2 has more facets than node1, then + // intersect node2's two children with node1 + // else + // intersect node1's two children with node2 + if(e2 - b2 > e1 - b1) { + index_t m2 = b2 + (e2 - b2) / 2; + index_t node2_l = 2 * node2; + index_t node2_r = 2 * node2 + 1; + intersect_recursive(action, node1, b1, e1, node2_l, b2, m2); + intersect_recursive(action, node1, b1, e1, node2_r, m2, e2); + } else { + index_t m1 = b1 + (e1 - b1) / 2; + index_t node1_l = 2 * node1; + index_t node1_r = 2 * node1 + 1; + intersect_recursive(action, node1_l, b1, m1, node2, b2, e2); + intersect_recursive(action, node1_r, m1, e1, node2, b2, e2); + } + } + + /** + * \brief Computes a reasonable initialization for + * nearest facet search. + * + * \details A good initialization makes the algorithm faster, + * by allowing early pruning of subtrees that provably + * do not contain the nearest neighbor. + * + * \param[in] p query point + * \param[out] nearest_facet a facet reasonably near p + * \param[out] nearest_point a point in nearest_facet + * \param[out] sq_dist squared distance between p and nearest_point + */ + void get_nearest_facet_hint( + const vec3& p, + index_t& nearest_facet, vec3& nearest_point, double& sq_dist + ) const; + + /** + * \brief The recursive function used by the implementation + * of nearest_facet(). + * + * \details The first call may use get_nearest_facet_hint() + * to initialize nearest_facet, nearest_point and sq_dist, + * as done in nearest_facet(). + * + * \param[in] p query point + * \param[in,out] nearest_facet the nearest facet so far, + * \param[in,out] nearest_point a point in nearest_facet + * \param[in,out] sq_dist squared distance between p and nearest_point + * \param[in] n index of the current node in the AABB tree + * \param[in] b index of the first facet in the subtree under node \p n + * \param[in] e one position past the index of the last facet in the + * subtree under node \p n + */ + void nearest_facet_recursive( + const vec3& p, + index_t& nearest_facet, vec3& nearest_point, double& sq_dist, + index_t n, index_t b, index_t e + ) const; + + /* + * Same as before, but stops early if a point within a given distance + * is found. + */ + void facet_in_envelope_recursive( + const vec3& p, double sq_epsilon, + index_t& nearest_facet, vec3& nearest_point, double& sq_dist, + index_t n, index_t b, index_t e + ) const; + + /** + * \brief The recursive function used by the implementation + * of segment_intersection() + * \param[in] q1 , q2 the segment + * \param[in] n index of the current node in the AABB tree + * \param[in] b index of the first facet in the subtree under node \p n + * \param[in] e one position past the index of the last facet in the + * subtree under node \p n + */ + bool segment_intersection_recursive( + const vec3& q1, const vec3& q2, index_t n, index_t b, index_t e + ) const; + + protected: + vector bboxes_; + Mesh& mesh_; + }; + +} diff --git a/contrib/NeRF-Editing/TetWild/src/tetwild/tetwild.cpp b/contrib/NeRF-Editing/TetWild/src/tetwild/tetwild.cpp new file mode 100644 index 00000000..2698891b --- /dev/null +++ b/contrib/NeRF-Editing/TetWild/src/tetwild/tetwild.cpp @@ -0,0 +1,433 @@ +// This file is part of TetWild, a software for generating tetrahedral meshes. +// +// Copyright (C) 2018 Yixin Hu +// +// This Source Code Form is subject to the terms of the Mozilla Public License +// v. 2.0. If a copy of the MPL was not distributed with this file, You can +// obtain one at http://mozilla.org/MPL/2.0/. +// +// Created by Yixin Hu on 5/31/18. +// + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace tetwild { + +//////////////////////////////////////////////////////////////////////////////// + +void printFinalQuality(double time, const std::vector& tet_vertices, + const std::vector>& tets, + const std::vector &t_is_removed, + const std::vector& tet_qualities, + const std::vector& v_ids, + const Args &args, const State &state) +{ + logger().debug("final quality:"); + double min = 10, max = 0; + double min_avg = 0, max_avg = 0; + // double max_asp_ratio = 0, avg_asp_ratio = 0; + double max_slim_energy = 0, avg_slim_energy = 0; + std::array cmp_cnt = {{0, 0, 0, 0, 0, 0}}; + std::array cmp_d_angles = {{6 / 180.0 * M_PI, 12 / 180.0 * M_PI, 18 / 180.0 * M_PI, + 162 / 180.0 * M_PI, 168 / 180.0 * M_PI, 174 / 180.0 * M_PI}}; + int cnt = 0; + for (int i = 0; i < tet_qualities.size(); i++) { + if (t_is_removed[i]) + continue; + cnt++; + if (tet_qualities[i].min_d_angle < min) + min = tet_qualities[i].min_d_angle; + if (tet_qualities[i].max_d_angle > max) + max = tet_qualities[i].max_d_angle; + // if (tet_qualities[i].asp_ratio_2 > max_asp_ratio) + // max_asp_ratio = tet_qualities[i].asp_ratio_2; + if (tet_qualities[i].slim_energy > max_slim_energy) + max_slim_energy = tet_qualities[i].slim_energy; + min_avg += tet_qualities[i].min_d_angle; + max_avg += tet_qualities[i].max_d_angle; + // avg_asp_ratio += tet_qualities[i].asp_ratio_2; + avg_slim_energy += tet_qualities[i].slim_energy; + + for (int j = 0; j < 3; j++) { + if (tet_qualities[i].min_d_angle < cmp_d_angles[j]) + cmp_cnt[j]++; + } + for (int j = 0; j < 3; j++) { + if (tet_qualities[i].max_d_angle > cmp_d_angles[j + 3]) + cmp_cnt[j + 3]++; + } + } + logger().debug("min_d_angle = {}, max_d_angle = {}, max_slim_energy = {}", min, max, max_slim_energy); + logger().debug("avg_min_d_angle = {}, avg_max_d_angle = {}, avg_slim_energy = {}", min_avg / cnt, max_avg / cnt, avg_slim_energy / cnt); + logger().debug("min_d_angle: <6 {}; <12 {}; <18 {}", cmp_cnt[0] / cnt, cmp_cnt[1] / cnt, cmp_cnt[2] / cnt); + logger().debug("max_d_angle: >174 {}; >168 {}; >162 {}", cmp_cnt[5] / cnt, cmp_cnt[4] / cnt, cmp_cnt[3] / cnt); + + addRecord(MeshRecord(MeshRecord::OpType::OP_WN, time, v_ids.size(), cnt, + min, min_avg / cnt, max, max_avg / cnt, max_slim_energy, avg_slim_energy / cnt), args, state); + + // output unrounded vertices: + cnt = 0; + for (int v_id: v_ids) { + if (!tet_vertices[v_id].is_rounded) { + cnt++; + } + } + logger().debug("{}/{} vertices are unrounded!!!", cnt, v_ids.size()); + addRecord(MeshRecord(MeshRecord::OpType::OP_UNROUNDED, -1, cnt, -1), args, state); +} + +void extractSurfaceMesh(const Eigen::MatrixXd &V, const Eigen::MatrixXi &T, + Eigen::MatrixXd &VS, Eigen::MatrixXi &FS) +{ + Eigen::VectorXi I; + igl::boundary_facets(T, FS); + igl::remove_unreferenced(V, FS, VS, FS, I); + for(int i=0;i < FS.rows();i++){ + int tmp = FS(i, 0); + FS(i, 0) = FS(i, 2); + FS(i, 2) = tmp; + } +} + +void extractFinalTetmesh(MeshRefinement& MR, + Eigen::MatrixXd &V_out, Eigen::MatrixXi &T_out, Eigen::VectorXd &A_out, + const Args &args, const State &state) +{ + std::vector &tet_vertices = MR.tet_vertices; + std::vector> &tets = MR.tets; + std::vector &v_is_removed = MR.v_is_removed; + std::vector &t_is_removed = MR.t_is_removed; + std::vector &tet_qualities = MR.tet_qualities; + int t_cnt = std::count(t_is_removed.begin(), t_is_removed.end(), false); + double tmp_time = 0; + if (!args.smooth_open_boundary) { + InoutFiltering IOF(tet_vertices, tets, MR.is_surface_fs, v_is_removed, t_is_removed, tet_qualities, state); + igl::Timer igl_timer; + igl_timer.start(); + IOF.filter(); + t_cnt = std::count(t_is_removed.begin(), t_is_removed.end(), false); + tmp_time = igl_timer.getElapsedTime(); + logger().info("time = {}s", tmp_time); + logger().debug("{} tets inside!", t_cnt); + } + + //output result + std::vector v_ids; + for (int i = 0; i < tets.size(); i++) { + if (t_is_removed[i]) + continue; + for (int j = 0; j < 4; j++) + v_ids.push_back(tets[i][j]); + } + std::sort(v_ids.begin(), v_ids.end()); + v_ids.erase(std::unique(v_ids.begin(), v_ids.end()), v_ids.end()); + std::unordered_map map_ids; + for (int i = 0; i < v_ids.size(); i++) + map_ids[v_ids[i]] = i; + + V_out.resize(v_ids.size(), 3); + T_out.resize(t_cnt, 4); + A_out.resize(t_cnt); + for (int i = 0; i < v_ids.size(); i++) { + for (int j = 0; j < 3; j++) { + V_out(i, + j) = tet_vertices[v_ids[i]].posf[j]; + } + } + int cnt = 0; + for (int i = 0; i < tets.size(); i++) { + if (t_is_removed[i]) { + continue; + } + for (int j = 0; j < 4; j++) { + T_out(cnt, j) = map_ids[tets[i][j]]; + } + A_out(cnt) = tet_qualities[i].min_d_angle; + cnt++; + } + logger().debug("#v = {}", V_out.rows()); + logger().debug("#t = {}", T_out.rows()); + + if (args.is_quiet) { + return; + } + printFinalQuality(tmp_time, tet_vertices, tets, t_is_removed, tet_qualities, v_ids, args, state); +} + +//////////////////////////////////////////////////////////////////////////////// + +// Simplify the input surface by swapping and removing edges, while staying within the envelope +double tetwild_stage_one_preprocess( + const Eigen::MatrixXd &VI, + const Eigen::MatrixXi &FI, + const Args &args, + State &state, + GEO::Mesh &geo_sf_mesh, + GEO::Mesh &geo_b_mesh, + std::vector &m_vertices, + std::vector> &m_faces) +{ + igl::Timer igl_timer; + igl_timer.start(); + logger().info("Preprocessing..."); + Preprocess pp(state); + if (!pp.init(VI, FI, geo_b_mesh, geo_sf_mesh, args)) { + //todo: output a empty tetmesh + PyMesh::MshSaver mSaver(state.working_dir + state.postfix + ".msh", true); + Eigen::VectorXd oV; + Eigen::VectorXi oT; + oV.resize(0); + oT.resize(0); + mSaver.save_mesh(oV, oT, 3, mSaver.TET); + log_and_throw("Empty mesh!"); + } + addRecord(MeshRecord(MeshRecord::OpType::OP_INIT, 0, geo_sf_mesh.vertices.nb(), geo_sf_mesh.facets.nb()), args, state); + + m_vertices.clear(); + m_faces.clear(); + pp.process(geo_sf_mesh, m_vertices, m_faces, args); + double tmp_time = igl_timer.getElapsedTime(); + addRecord(MeshRecord(MeshRecord::OpType::OP_PREPROCESSING, tmp_time, m_vertices.size(), m_faces.size()), args, state); + logger().info("time = {}s", tmp_time); + return tmp_time; +} + +// ----------------------------------------------------------------------------- + +// Compute an initial Delaunay triangulation of the input triangle soup +double tetwild_stage_one_delaunay( + const Args &args, + const State &state, + GEO::Mesh &geo_sf_mesh, + const std::vector &m_vertices, + const std::vector> &m_faces, + std::vector &bsp_vertices, + std::vector &bsp_edges, + std::vector &bsp_faces, + std::vector &bsp_nodes, + std::vector &m_f_tags, + std::vector &raw_e_tags, + std::vector> &raw_conn_e4v) +{ + igl::Timer igl_timer; + igl_timer.start(); + logger().info("Delaunay tetrahedralizing..."); + DelaunayTetrahedralization DT; + m_f_tags.clear(); + raw_e_tags.clear(); + raw_conn_e4v.clear(); + DT.init(m_vertices, m_faces, m_f_tags, raw_e_tags, raw_conn_e4v); + bsp_vertices.clear(); + bsp_edges.clear(); + bsp_faces.clear(); + bsp_nodes.clear(); + DT.tetra(m_vertices, geo_sf_mesh, bsp_vertices, bsp_edges, bsp_faces, bsp_nodes, args, state); + logger().debug("# bsp_vertices = {}", bsp_vertices.size()); + logger().debug("# bsp_edges = {}", bsp_edges.size()); + logger().debug("# bsp_faces = {}", bsp_faces.size()); + logger().debug("# bsp_nodes = {}", bsp_nodes.size()); + logger().info("Delaunay tetrahedralization done!"); + double tmp_time = igl_timer.getElapsedTime(); + addRecord(MeshRecord(MeshRecord::OpType::OP_DELAUNEY_TETRA, tmp_time, bsp_vertices.size(), bsp_nodes.size()), args, state); + logger().info("time = {}s", tmp_time); + return tmp_time; +} + +// ----------------------------------------------------------------------------- + +// Match faces of the Delaunay tetrahedralization with faces from the input mesh +double tetwild_stage_one_mc( + const Args &args, + const State &state, + MeshConformer &MC) +{ + igl::Timer igl_timer; + igl_timer.start(); + logger().info("Divfaces matching..."); + MC.match(); + logger().info("Divfaces matching done!"); + double tmp_time = igl_timer.getElapsedTime(); + addRecord(MeshRecord(MeshRecord::OpType::OP_DIVFACE_MATCH, tmp_time, MC.bsp_vertices.size(), MC.bsp_nodes.size()), args, state); + logger().info("time = {}s", tmp_time); + return tmp_time; +} + +// ----------------------------------------------------------------------------- + +// Compute BSP partition of the domain +double tetwild_stage_one_bsp( + const Args &args, + const State &state, + MeshConformer &MC) +{ + igl::Timer igl_timer; + igl_timer.start(); + logger().info("BSP subdivision ..."); + BSPSubdivision BS(MC); + BS.init(); + BS.subdivideBSPNodes(); + logger().debug("Output: "); + logger().debug("# node = {}", MC.bsp_nodes.size()); + logger().debug("# face = {}", MC.bsp_faces.size()); + logger().debug("# edge = {}", MC.bsp_edges.size()); + logger().debug("# vertex = {}", MC.bsp_vertices.size()); + logger().info("BSP subdivision done!"); + double tmp_time = igl_timer.getElapsedTime(); + addRecord(MeshRecord(MeshRecord::OpType::OP_BSP, tmp_time, MC.bsp_vertices.size(), MC.bsp_nodes.size()), args, state); + logger().info("time = {}s", tmp_time); + return tmp_time; +} + +// ----------------------------------------------------------------------------- + +// Compute an initial tetrahedral mesh from the BSP partition +double tetwild_stage_one_tetra( + const Args &args, + const State &state, + MeshConformer &MC, + const std::vector &m_f_tags, + const std::vector &raw_e_tags, + const std::vector> &raw_conn_e4v, + std::vector &tet_vertices, + std::vector> &tet_indices, + std::vector> &is_surface_facet) +{ + igl::Timer igl_timer; + igl_timer.start(); + logger().info("Tetrehedralizing ..."); + SimpleTetrahedralization ST(state, MC); + tet_vertices.clear(); + tet_indices.clear(); + is_surface_facet.clear(); + ST.tetra(tet_vertices, tet_indices); + ST.labelSurface(m_f_tags, raw_e_tags, raw_conn_e4v, tet_vertices, tet_indices, is_surface_facet); + ST.labelBbox(tet_vertices, tet_indices); + if (!state.is_mesh_closed)//if input is an open mesh + ST.labelBoundary(tet_vertices, tet_indices, is_surface_facet); + logger().debug("# tet_vertices = {}", tet_vertices.size()); + logger().debug("# tets = {}", tet_indices.size()); + logger().info("Tetrahedralization done!"); + double tmp_time = igl_timer.getElapsedTime(); + addRecord(MeshRecord(MeshRecord::OpType::OP_SIMPLE_TETRA, tmp_time, tet_vertices.size(), tet_indices.size()), args, state); + logger().info("time = {}s", tmp_time); + return tmp_time; +} + +//////////////////////////////////////////////////////////////////////////////// + +void tetwild_stage_one( + const Eigen::MatrixXd &VI, + const Eigen::MatrixXi &FI, + const Args &args, + State &state, + GEO::Mesh &geo_sf_mesh, + GEO::Mesh &geo_b_mesh, + std::vector &tet_vertices, + std::vector> &tet_indices, + std::vector> &is_surface_facet) +{ + igl::Timer igl_timer; + double tmp_time = 0; + double sum_time = 0; + + //preprocess + std::vector m_vertices; + std::vector> m_faces; + sum_time += tetwild_stage_one_preprocess(VI, FI, args, state, geo_sf_mesh, geo_b_mesh, m_vertices, m_faces); + + //delaunay tetrahedralization + std::vector bsp_vertices; + std::vector bsp_edges; + std::vector bsp_faces; + std::vector bsp_nodes; + std::vector m_f_tags; + std::vector raw_e_tags; + std::vector> raw_conn_e4v; + sum_time += tetwild_stage_one_delaunay(args, state, geo_sf_mesh, m_vertices, m_faces, + bsp_vertices, bsp_edges, bsp_faces, bsp_nodes, m_f_tags, raw_e_tags, raw_conn_e4v); + + //mesh conforming + MeshConformer MC(m_vertices, m_faces, bsp_vertices, bsp_edges, bsp_faces, bsp_nodes); + sum_time += tetwild_stage_one_mc(args, state, MC); + + //bsp subdivision + sum_time += tetwild_stage_one_bsp(args, state, MC); + + //simple tetrahedralization + sum_time += tetwild_stage_one_tetra(args, state, MC, m_f_tags, raw_e_tags, raw_conn_e4v, + tet_vertices, tet_indices, is_surface_facet); + + logger().info("Total time for the first stage = {}s", sum_time); +} + +// ----------------------------------------------------------------------------- + +void tetwild_stage_two(const Args &args, State &state, + GEO::Mesh &geo_sf_mesh, + GEO::Mesh &geo_b_mesh, + std::vector &tet_vertices, + std::vector> &tet_indices, + std::vector> &is_surface_facet, + Eigen::MatrixXd &VO, + Eigen::MatrixXi &TO, + Eigen::VectorXd &AO) +{ + //init + logger().info("Refinement initializing..."); + MeshRefinement MR(geo_sf_mesh, geo_b_mesh, args, state); + MR.tet_vertices = std::move(tet_vertices); + MR.tets = std::move(tet_indices); + MR.is_surface_fs = std::move(is_surface_facet); + MR.prepareData(); + logger().info("Refinement initialization done!"); + + //improvement + MR.refine(state.ENERGY_AMIPS); + + extractFinalTetmesh(MR, VO, TO, AO, args, state); //do winding number and output the tetmesh +} + +//////////////////////////////////////////////////////////////////////////////// + +void tetrahedralization(const Eigen::MatrixXd &VI, const Eigen::MatrixXi &FI, + Eigen::MatrixXd &VO, Eigen::MatrixXi &TO, Eigen::VectorXd &AO, + const Args &args) +{ + GEO::initialize(); + + igl::Timer igl_timer; + igl_timer.start(); + + ////pipeline + State state(args, VI); + GEO::Mesh geo_sf_mesh; + GEO::Mesh geo_b_mesh; + std::vector tet_vertices; + std::vector> tet_indices; + std::vector> is_surface_facet; + + /// STAGE 1 + tetwild_stage_one(VI, FI, args, state, geo_sf_mesh, geo_b_mesh, + tet_vertices, tet_indices, is_surface_facet); + + /// STAGE 2 + tetwild_stage_two(args, state, geo_sf_mesh, geo_b_mesh, + tet_vertices, tet_indices, is_surface_facet, VO, TO, AO); + + double total_time = igl_timer.getElapsedTime(); + logger().info("Total time for all stages = {}s", total_time); +} + +} // namespace tetwild diff --git a/contrib/NeRF-Editing/data_process/colmap_read_model.py b/contrib/NeRF-Editing/data_process/colmap_read_model.py new file mode 100644 index 00000000..8331660a --- /dev/null +++ b/contrib/NeRF-Editing/data_process/colmap_read_model.py @@ -0,0 +1,312 @@ +# Copyright (c) 2018, ETH Zurich and UNC Chapel Hill. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# Author: Johannes L. Schoenberger (jsch at inf.ethz.ch) + +import os +import sys +import collections +import numpy as np +import struct + + +CameraModel = collections.namedtuple( + "CameraModel", ["model_id", "model_name", "num_params"]) +Camera = collections.namedtuple( + "Camera", ["id", "model", "width", "height", "params"]) +BaseImage = collections.namedtuple( + "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"]) +Point3D = collections.namedtuple( + "Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"]) + +class Image(BaseImage): + def qvec2rotmat(self): + return qvec2rotmat(self.qvec) + + +CAMERA_MODELS = { + CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), + CameraModel(model_id=1, model_name="PINHOLE", num_params=4), + CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), + CameraModel(model_id=3, model_name="RADIAL", num_params=5), + CameraModel(model_id=4, model_name="OPENCV", num_params=8), + CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), + CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), + CameraModel(model_id=7, model_name="FOV", num_params=5), + CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), + CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), + CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12) +} +CAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model) \ + for camera_model in CAMERA_MODELS]) + + +def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): + """Read and unpack the next bytes from a binary file. + :param fid: + :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. + :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. + :param endian_character: Any of {@, =, <, >, !} + :return: Tuple of read and unpacked values. + """ + data = fid.read(num_bytes) + return struct.unpack(endian_character + format_char_sequence, data) + + +def read_cameras_text(path): + """ + see: src/base/reconstruction.cc + void Reconstruction::WriteCamerasText(const std::string& path) + void Reconstruction::ReadCamerasText(const std::string& path) + """ + cameras = {} + with open(path, "r") as fid: + while True: + line = fid.readline() + if not line: + break + line = line.strip() + if len(line) > 0 and line[0] != "#": + elems = line.split() + camera_id = int(elems[0]) + model = elems[1] + width = int(elems[2]) + height = int(elems[3]) + params = np.array(tuple(map(float, elems[4:]))) + cameras[camera_id] = Camera(id=camera_id, model=model, + width=width, height=height, + params=params) + return cameras + + +def read_cameras_binary(path_to_model_file): + """ + see: src/base/reconstruction.cc + void Reconstruction::WriteCamerasBinary(const std::string& path) + void Reconstruction::ReadCamerasBinary(const std::string& path) + """ + cameras = {} + with open(path_to_model_file, "rb") as fid: + num_cameras = read_next_bytes(fid, 8, "Q")[0] + for camera_line_index in range(num_cameras): + camera_properties = read_next_bytes( + fid, num_bytes=24, format_char_sequence="iiQQ") + camera_id = camera_properties[0] + model_id = camera_properties[1] + model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name + width = camera_properties[2] + height = camera_properties[3] + num_params = CAMERA_MODEL_IDS[model_id].num_params + params = read_next_bytes(fid, num_bytes=8*num_params, + format_char_sequence="d"*num_params) + cameras[camera_id] = Camera(id=camera_id, + model=model_name, + width=width, + height=height, + params=np.array(params)) + assert len(cameras) == num_cameras + return cameras + + +def read_images_text(path): + """ + see: src/base/reconstruction.cc + void Reconstruction::ReadImagesText(const std::string& path) + void Reconstruction::WriteImagesText(const std::string& path) + """ + images = {} + with open(path, "r") as fid: + while True: + line = fid.readline() + if not line: + break + line = line.strip() + if len(line) > 0 and line[0] != "#": + elems = line.split() + image_id = int(elems[0]) + qvec = np.array(tuple(map(float, elems[1:5]))) + tvec = np.array(tuple(map(float, elems[5:8]))) + camera_id = int(elems[8]) + image_name = elems[9] + elems = fid.readline().split() + xys = np.column_stack([tuple(map(float, elems[0::3])), + tuple(map(float, elems[1::3]))]) + point3D_ids = np.array(tuple(map(int, elems[2::3]))) + images[image_id] = Image( + id=image_id, qvec=qvec, tvec=tvec, + camera_id=camera_id, name=image_name, + xys=xys, point3D_ids=point3D_ids) + return images + + +def read_images_binary(path_to_model_file): + """ + see: src/base/reconstruction.cc + void Reconstruction::ReadImagesBinary(const std::string& path) + void Reconstruction::WriteImagesBinary(const std::string& path) + """ + images = {} + with open(path_to_model_file, "rb") as fid: + num_reg_images = read_next_bytes(fid, 8, "Q")[0] + for image_index in range(num_reg_images): + binary_image_properties = read_next_bytes( + fid, num_bytes=64, format_char_sequence="idddddddi") + image_id = binary_image_properties[0] + qvec = np.array(binary_image_properties[1:5]) + tvec = np.array(binary_image_properties[5:8]) + camera_id = binary_image_properties[8] + image_name = "" + current_char = read_next_bytes(fid, 1, "c")[0] + while current_char != b"\x00": # look for the ASCII 0 entry + image_name += current_char.decode("utf-8") + current_char = read_next_bytes(fid, 1, "c")[0] + num_points2D = read_next_bytes(fid, num_bytes=8, + format_char_sequence="Q")[0] + x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D, + format_char_sequence="ddq"*num_points2D) + xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])), + tuple(map(float, x_y_id_s[1::3]))]) + point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) + images[image_id] = Image( + id=image_id, qvec=qvec, tvec=tvec, + camera_id=camera_id, name=image_name, + xys=xys, point3D_ids=point3D_ids) + return images + + +def read_points3D_text(path): + """ + see: src/base/reconstruction.cc + void Reconstruction::ReadPoints3DText(const std::string& path) + void Reconstruction::WritePoints3DText(const std::string& path) + """ + points3D = {} + with open(path, "r") as fid: + while True: + line = fid.readline() + if not line: + break + line = line.strip() + if len(line) > 0 and line[0] != "#": + elems = line.split() + point3D_id = int(elems[0]) + xyz = np.array(tuple(map(float, elems[1:4]))) + rgb = np.array(tuple(map(int, elems[4:7]))) + error = float(elems[7]) + image_ids = np.array(tuple(map(int, elems[8::2]))) + point2D_idxs = np.array(tuple(map(int, elems[9::2]))) + points3D[point3D_id] = Point3D(id=point3D_id, xyz=xyz, rgb=rgb, + error=error, image_ids=image_ids, + point2D_idxs=point2D_idxs) + return points3D + + +def read_points3d_binary(path_to_model_file): + """ + see: src/base/reconstruction.cc + void Reconstruction::ReadPoints3DBinary(const std::string& path) + void Reconstruction::WritePoints3DBinary(const std::string& path) + """ + points3D = {} + with open(path_to_model_file, "rb") as fid: + num_points = read_next_bytes(fid, 8, "Q")[0] + for point_line_index in range(num_points): + binary_point_line_properties = read_next_bytes( + fid, num_bytes=43, format_char_sequence="QdddBBBd") + point3D_id = binary_point_line_properties[0] + xyz = np.array(binary_point_line_properties[1:4]) + rgb = np.array(binary_point_line_properties[4:7]) + error = np.array(binary_point_line_properties[7]) + track_length = read_next_bytes( + fid, num_bytes=8, format_char_sequence="Q")[0] + track_elems = read_next_bytes( + fid, num_bytes=8*track_length, + format_char_sequence="ii"*track_length) + image_ids = np.array(tuple(map(int, track_elems[0::2]))) + point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) + points3D[point3D_id] = Point3D( + id=point3D_id, xyz=xyz, rgb=rgb, + error=error, image_ids=image_ids, + point2D_idxs=point2D_idxs) + return points3D + + +def read_model(path, ext): + if ext == ".txt": + cameras = read_cameras_text(os.path.join(path, "cameras" + ext)) + images = read_images_text(os.path.join(path, "images" + ext)) + points3D = read_points3D_text(os.path.join(path, "points3D") + ext) + else: + cameras = read_cameras_binary(os.path.join(path, "cameras" + ext)) + images = read_images_binary(os.path.join(path, "images" + ext)) + points3D = read_points3d_binary(os.path.join(path, "points3D") + ext) + return cameras, images, points3D + + +def qvec2rotmat(qvec): + return np.array([ + [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2, + 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], + 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]], + [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], + 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2, + 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]], + [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], + 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], + 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]]) + + +def rotmat2qvec(R): + Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat + K = np.array([ + [Rxx - Ryy - Rzz, 0, 0, 0], + [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0], + [Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0], + [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0 + eigvals, eigvecs = np.linalg.eigh(K) + qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)] + if qvec[0] < 0: + qvec *= -1 + return qvec + + +def main(): + if len(sys.argv) != 3: + print("Usage: python read_model.py path/to/model/folder [.txt,.bin]") + return + + cameras, images, points3D = read_model(path=sys.argv[1], ext=sys.argv[2]) + + print("num_cameras:", len(cameras)) + print("num_images:", len(images)) + print("num_points3D:", len(points3D)) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/contrib/NeRF-Editing/data_process/process_colmap.py b/contrib/NeRF-Editing/data_process/process_colmap.py new file mode 100644 index 00000000..ccfa3b56 --- /dev/null +++ b/contrib/NeRF-Editing/data_process/process_colmap.py @@ -0,0 +1,79 @@ +import glob, os, sys +from natsort import natsorted +import numpy as np +from PIL import Image + +def gen_data_from_colmap(img_dir, save_dir): + import colmap_read_model as read_model + + # RGB, pose, intrinsic + img_list = glob.glob(os.path.join(img_dir, 'images', '*.png')) + img_list = natsorted(img_list) + if not os.path.exists(os.path.join(save_dir, 'rgb')): + os.makedirs(os.path.join(save_dir, 'rgb'), exist_ok=True) + if not os.path.exists(os.path.join(save_dir, 'pose')): + os.makedirs(os.path.join(save_dir, 'pose'), exist_ok=True) + + imagesfile = os.path.join(img_dir, 'sparse/0/images.bin') + imdata = read_model.read_images_binary(imagesfile) + names = [imdata[k].name for k in imdata] + ids = [imdata[k].id for k in imdata] + + # real_w, real_h = 480, 360 + real_w, real_h = 540, 960 + + test_id = [] + + for i in range(len(img_list)): + img_file = img_list[i] + basename = os.path.basename(img_file) + if basename not in names: + continue + + rgb_img = Image.open(img_file).convert('RGB') + rgb_img = rgb_img.resize((real_w, real_h)) + + if i in test_id: + rgb_img.save(os.path.join(save_dir, 'rgb', '1_test_%04d.png'%(i))) + else: + rgb_img.save(os.path.join(save_dir, 'rgb', '0_train_%04d.png'%(i))) + + camerasfile = os.path.join(img_dir, 'sparse/0/cameras.bin') + camdata = read_model.read_cameras_binary(camerasfile) + + list_of_keys = list(camdata.keys()) + cam = camdata[list_of_keys[0]] + print( 'Cameras', len(cam)) + + h, w, f = cam.height, cam.width, cam.params[0] + + K_savefile = open(os.path.join(save_dir, 'intrinsics.txt'), 'w') + K_savefile.write('%f %f %f 0.\n' % (f/4, cam.params[1]/4, cam.params[2]/4)) + K_savefile.write('0. 0. 0.\n0.\n1.\n') + K_savefile.write('%d %d\n' % (real_h, real_w)) + K_savefile.close() + + bottom = np.array([0,0,0,1.]).reshape([1,4]) + + print(names) + print( 'Images #', len(names)) + perm = np.argsort(names) + count = 0 + for k in perm: + im = imdata[ids[k]] + R = im.qvec2rotmat() + t = im.tvec.reshape([3,1]) + m = np.concatenate([np.concatenate([R, t], 1), bottom], 0) + pose = np.linalg.inv(m) + + if count in test_id: + pose_savefile = os.path.join(save_dir, 'pose', '1_test_%04d.txt'%(count)) + else: + pose_savefile = os.path.join(save_dir, 'pose', '0_train_%04d.txt'%(count)) + np.savetxt(pose_savefile, pose) + count = count + 1 + +if __name__ == "__main__": + img_dir = sys.argv[1] + save_dir = sys.argv[2] + gen_data_from_colmap(img_dir, save_dir) diff --git a/contrib/NeRF-Editing/img/teaser.gif b/contrib/NeRF-Editing/img/teaser.gif new file mode 100644 index 00000000..3dcde45d Binary files /dev/null and b/contrib/NeRF-Editing/img/teaser.gif differ diff --git a/contrib/NeRF-Editing/src/barycentric_control_pts_jittor.py b/contrib/NeRF-Editing/src/barycentric_control_pts_jittor.py new file mode 100644 index 00000000..9a15a228 --- /dev/null +++ b/contrib/NeRF-Editing/src/barycentric_control_pts_jittor.py @@ -0,0 +1,107 @@ +"""input: mesh file, tetrahedral txt file and deformed mesh file. + Calculate the tet ID of each vertices and its barycentric coordinate. + output: the control points for each vertices. +""" +import trimesh +import jittor as jt +from utils import TetMesh, readTXT +import numpy as np +import glob + +def main(mesh_path, tet_path, deformed_path, check_output=False): + mesh = trimesh.load_mesh(mesh_path, process=False, maintain_order=True) + mesh_verts = jt.array(np.asarray(mesh.vertices)).float() + tet_verts, tet_idx = readTXT(tet_path) + + tet_mesh = TetMesh(tet_verts, tet_idx) + + tet_ids, barys = batchify(mesh_verts, tet_mesh) + + save_path = deformed_path.replace(".obj", "_barycentric_control_simple3.txt") + deformed_verts = trimesh.load_mesh(deformed_path, process=False, maintain_order=True).vertices + deformed_verts = jt.array(np.asarray(deformed_verts)).float() + + ### add check output module + if check_output: + tet_ids = jt.stack(tet_ids, dim=0) + barys = jt.stack(barys, dim=0) # [N,4,1] + assert len(mesh_verts) == len(tet_ids) + values = tet_mesh.verts[tet_ids] # [N,4,3] + verts_new = (values * barys).sum(dim=1) + print(verts_new == mesh_verts) + import ipdb; ipdb.set_trace() + try: + saveControlPts(tet_ids, barys, deformed_verts, save_path) + + except: + import ipdb; ipdb.set_trace() + +def main_seq(mesh_path, tet_path, deformed_paths, check_output=False): + mesh = trimesh.load_mesh(mesh_path, process=False, maintain_order=True) + mesh_verts = jt.array(np.asarray(mesh.vertices)).float() + tet_verts, tet_idx = readTXT(tet_path) + + tet_mesh = TetMesh(tet_verts, tet_idx) + + tet_ids, barys = batchify(mesh_verts, tet_mesh, chunk=100) + + save_path = deformed_paths[0].replace(".obj", "_barycentric_control.txt") + def f(x): + out = trimesh.load_mesh(x, process=False, maintain_order=True).vertices + out = jt.array(np.asarray(out)).float() + return out + deformed_verts = list(map(f, deformed_paths)) + + saveControlPtsSeq(tet_ids, barys, deformed_verts, save_path) + +def batchify(verts, tet_mesh, chunk=100): + """find verts in which tets in batch. + """ + tet_ids, barys = [], [] + for i in range(0, verts.shape[0], chunk): + print("quering one chunk ...") + tet_id, barycentric = tet_mesh.findTet(verts[i:i+chunk]) + tet_ids += [tet_mesh.tets[x] for x in tet_id] + barys += [x for x in barycentric] + return tet_ids, barys # list: [[4,]...], [[4,1]...] + + +def saveControlPts(tet_ids, barys, deformed_verts, control_txt_path) -> None: + with open(control_txt_path, 'w') as cf: + cf.write('1\n') + cf.write('%d\n' % len(deformed_verts)) + print("saving %d control points coordinate" % (len(deformed_verts))) + for vert in deformed_verts: + cf.write('%f %f %f\n' % (vert[0], vert[1], vert[2])) + cf.write('%d\n' % len(tet_ids)) + print("saving %d tet verts idx and barycentric coordinate" % (len(tet_ids))) + for tet_id, bary in zip(tet_ids, barys): + cf.write("%d %d %d %d\n" % (tet_id[0], tet_id[1], tet_id[2], tet_id[3])) + cf.write("%f %f %f %f\n" % (bary[0], bary[1], bary[2], bary[3])) + print("write control txt to %s" % control_txt_path) + +def saveControlPtsSeq(tet_ids, barys, deformed_verts, control_txt_path) -> None: + with open(control_txt_path, 'w') as cf: + cf.write('%d\n' % (len(deformed_verts))) # number of seqence + print("saving %d sequence" % (len(deformed_verts))) + for idx, dv in enumerate(deformed_verts): + cf.write('%d\n' % len(dv)) + print("saving %d item, with %d control points coordinate" % (idx, len(dv))) + for vert in dv: + cf.write('%f %f %f\n' % (vert[0], vert[1], vert[2])) + cf.write('%d\n' % len(tet_ids)) + print("saving %d tet verts idx and barycentric coordinate" % (len(tet_ids))) + for tet_id, bary in zip(tet_ids, barys): + tet_id = tet_id[0] + cf.write("%d %d %d %d\n" % (tet_id[0], tet_id[1], tet_id[2], tet_id[3])) + cf.write("%f %f %f %f\n" % (bary[0], bary[1], bary[2], bary[3])) + print("write control txt to %s" % control_txt_path) + + +if __name__ == "__main__": + mesh_path = "./logs/hbychair_wo_mask/mesh_nofloor_simp.obj" + tet_path = "./logs/hbychair_wo_mask/mesh_cage_nofloor_.txt" + deformed_dir = "./logs/hbychair_wo_mask/mesh_seq/*.obj" + + deformed_paths = sorted(glob.glob(deformed_dir))[-1:] + main_seq(mesh_path, tet_path, deformed_paths) \ No newline at end of file diff --git a/contrib/NeRF-Editing/src/confs/wmask_lego.conf b/contrib/NeRF-Editing/src/confs/wmask_lego.conf new file mode 100644 index 00000000..e22228d0 --- /dev/null +++ b/contrib/NeRF-Editing/src/confs/wmask_lego.conf @@ -0,0 +1,83 @@ +general { + base_exp_dir = ./logs/lego_w_mask + recording = [ + ./, + ./models + ] +} + +dataset { + data_dir = ../data/nerf_synthetic/lego/ + type = nerf_synthetic +} + +train { + learning_rate = 5e-4 + learning_rate_alpha = 0.05 + end_iter = 300000 + + batch_size = 512 + validate_resolution_level = 4 + warm_up_end = 5000 + anneal_end = 50000 + use_white_bkgd = True + + save_freq = 10000 + val_freq = 500 + val_mesh_freq = 5000 + report_freq = 100 + + igr_weight = 0.1 + mask_weight = 0.1 +} + +model { + nerf { + D = 8, + d_in = 4, + d_in_view = 3, + W = 256, + multires = 10, + multires_view = 4, + output_ch = 4, + skips=[4], + use_viewdirs=True + } + + sdf_network { + d_out = 257 + d_in = 3 + d_hidden = 256 + n_layers = 8 + skip_in = [4] + multires = 6 + bias = 0.5 + scale = 1.0 + geometric_init = True + weight_norm = True + } + + variance_network { + init_val = 0.3 + } + + rendering_network { + d_feature = 256 + mode = idr + d_in = 9 + d_out = 3 + d_hidden = 256 + n_layers = 4 + weight_norm = True + multires_view = 4 + squeeze_out = True + } + + neus_renderer { + n_samples = 64 + n_importance = 64 + n_outside = 32 + up_sample_steps = 4 # 1 for simple coarse-to-fine sampling + perturb = 1.0 + } +} diff --git a/contrib/NeRF-Editing/src/confs/wmask_lego_render.conf b/contrib/NeRF-Editing/src/confs/wmask_lego_render.conf new file mode 100644 index 00000000..6a9c081c --- /dev/null +++ b/contrib/NeRF-Editing/src/confs/wmask_lego_render.conf @@ -0,0 +1,83 @@ +general { + base_exp_dir = ./logs/lego_w_mask + recording = [ + ./, + ./models + ] +} + +dataset { + data_dir = ../data/nerf_synthetic/lego/ + type = nerf_synthetic +} + +train { + learning_rate = 5e-4 + learning_rate_alpha = 0.05 + end_iter = 300000 + + batch_size = 512 + validate_resolution_level = 4 + warm_up_end = 5000 + anneal_end = 50000 + use_white_bkgd = True + + save_freq = 10000 + val_freq = 500 + val_mesh_freq = 5000 + report_freq = 100 + + igr_weight = 0.1 + mask_weight = 0.1 +} + +model { + nerf { + D = 8, + d_in = 4, + d_in_view = 3, + W = 256, + multires = 10, + multires_view = 4, + output_ch = 4, + skips=[4], + use_viewdirs=True + } + + sdf_network { + d_out = 257 + d_in = 3 + d_hidden = 256 + n_layers = 8 + skip_in = [4] + multires = 6 + bias = 0.5 + scale = 1.0 + geometric_init = True + weight_norm = True + } + + variance_network { + init_val = 0.3 + } + + rendering_network { + d_feature = 256 + mode = idr + d_in = 9 + d_out = 3 + d_hidden = 256 + n_layers = 4 + weight_norm = True + multires_view = 4 + squeeze_out = True + } + + neus_renderer { + n_samples = 64 + n_importance = 64 + n_outside = 0 + up_sample_steps = 4 # 1 for simple coarse-to-fine sampling + perturb = 1.0 + } +} diff --git a/contrib/NeRF-Editing/src/confs/wmask_mic.conf b/contrib/NeRF-Editing/src/confs/wmask_mic.conf new file mode 100644 index 00000000..fd5e0c03 --- /dev/null +++ b/contrib/NeRF-Editing/src/confs/wmask_mic.conf @@ -0,0 +1,83 @@ +general { + base_exp_dir = ./logs/mic_w_mask + recording = [ + ./, + ./models + ] +} + +dataset { + data_dir = ../data/nerf_synthetic/mic/ + type = nerf_synthetic +} + +train { + learning_rate = 5e-4 + learning_rate_alpha = 0.05 + end_iter = 300000 + + batch_size = 512 + validate_resolution_level = 4 + warm_up_end = 5000 + anneal_end = 50000 + use_white_bkgd = True + + save_freq = 10000 + val_freq = 500 + val_mesh_freq = 5000 + report_freq = 100 + + igr_weight = 0.1 + mask_weight = 0.1 +} + +model { + nerf { + D = 8, + d_in = 4, + d_in_view = 3, + W = 256, + multires = 10, + multires_view = 4, + output_ch = 4, + skips=[4], + use_viewdirs=True + } + + sdf_network { + d_out = 257 + d_in = 3 + d_hidden = 256 + n_layers = 8 + skip_in = [4] + multires = 6 + bias = 0.5 + scale = 1.0 + geometric_init = True + weight_norm = True + } + + variance_network { + init_val = 0.3 + } + + rendering_network { + d_feature = 256 + mode = idr + d_in = 9 + d_out = 3 + d_hidden = 256 + n_layers = 4 + weight_norm = True + multires_view = 4 + squeeze_out = True + } + + neus_renderer { + n_samples = 64 + n_importance = 64 + n_outside = 32 + up_sample_steps = 4 # 1 for simple coarse-to-fine sampling + perturb = 1.0 + } +} diff --git a/contrib/NeRF-Editing/src/confs/wmask_mic_render.conf b/contrib/NeRF-Editing/src/confs/wmask_mic_render.conf new file mode 100644 index 00000000..c4857022 --- /dev/null +++ b/contrib/NeRF-Editing/src/confs/wmask_mic_render.conf @@ -0,0 +1,83 @@ +general { + base_exp_dir = ./logs/mic_w_mask + recording = [ + ./, + ./models + ] +} + +dataset { + data_dir = ../data/nerf_synthetic/mic/ + type = nerf_synthetic +} + +train { + learning_rate = 5e-4 + learning_rate_alpha = 0.05 + end_iter = 300000 + + batch_size = 512 + validate_resolution_level = 4 + warm_up_end = 5000 + anneal_end = 50000 + use_white_bkgd = True + + save_freq = 10000 + val_freq = 500 + val_mesh_freq = 5000 + report_freq = 100 + + igr_weight = 0.1 + mask_weight = 0.1 +} + +model { + nerf { + D = 8, + d_in = 4, + d_in_view = 3, + W = 256, + multires = 10, + multires_view = 4, + output_ch = 4, + skips=[4], + use_viewdirs=True + } + + sdf_network { + d_out = 257 + d_in = 3 + d_hidden = 256 + n_layers = 8 + skip_in = [4] + multires = 6 + bias = 0.5 + scale = 1.0 + geometric_init = True + weight_norm = True + } + + variance_network { + init_val = 0.3 + } + + rendering_network { + d_feature = 256 + mode = idr + d_in = 9 + d_out = 3 + d_hidden = 256 + n_layers = 4 + weight_norm = True + multires_view = 4 + squeeze_out = True + } + + neus_renderer { + n_samples = 64 + n_importance = 64 + n_outside = 0 + up_sample_steps = 4 # 1 for simple coarse-to-fine sampling + perturb = 1.0 + } +} diff --git a/contrib/NeRF-Editing/src/confs/womask_hbychair.conf b/contrib/NeRF-Editing/src/confs/womask_hbychair.conf new file mode 100644 index 00000000..52ea062b --- /dev/null +++ b/contrib/NeRF-Editing/src/confs/womask_hbychair.conf @@ -0,0 +1,83 @@ +general { + base_exp_dir = ./logs/hbychair_wo_mask + recording = [ + ./, + ./models + ] +} + +dataset { + data_dir = ../data/hbychair_mask/ + type = custom +} + +train { + learning_rate = 5e-4 + learning_rate_alpha = 0.05 + end_iter = 300000 + + batch_size = 512 + validate_resolution_level = 4 + warm_up_end = 5000 + anneal_end = 50000 + use_white_bkgd = True + + save_freq = 10000 + val_freq = 500 + val_mesh_freq = 5000 + report_freq = 100 + + igr_weight = 0.1 + mask_weight = 0.0 +} + +model { + nerf { + D = 8, + d_in = 4, + d_in_view = 3, + W = 256, + multires = 10, + multires_view = 4, + output_ch = 4, + skips=[4], + use_viewdirs=True + } + + sdf_network { + d_out = 257 + d_in = 3 + d_hidden = 256 + n_layers = 8 + skip_in = [4] + multires = 6 + bias = 0.5 + scale = 1.0 + geometric_init = True + weight_norm = True + } + + variance_network { + init_val = 0.3 + } + + rendering_network { + d_feature = 256 + mode = idr + d_in = 9 + d_out = 3 + d_hidden = 256 + n_layers = 4 + weight_norm = True + multires_view = 4 + squeeze_out = True + } + + neus_renderer { + n_samples = 64 + n_importance = 64 + n_outside = 32 + up_sample_steps = 4 # 1 for simple coarse-to-fine sampling + perturb = 1.0 + } +} diff --git a/contrib/NeRF-Editing/src/confs/womask_hbychair_render.conf b/contrib/NeRF-Editing/src/confs/womask_hbychair_render.conf new file mode 100644 index 00000000..6330eb3c --- /dev/null +++ b/contrib/NeRF-Editing/src/confs/womask_hbychair_render.conf @@ -0,0 +1,83 @@ +general { + base_exp_dir = ./logs/hbychair_wo_mask + recording = [ + ./, + ./models + ] +} + +dataset { + data_dir = ../data/hbychair_mask/ + type = custom +} + +train { + learning_rate = 5e-4 + learning_rate_alpha = 0.05 + end_iter = 300000 + + batch_size = 256 + validate_resolution_level = 4 + warm_up_end = 5000 + anneal_end = 50000 + use_white_bkgd = True + + save_freq = 10000 + val_freq = 500 + val_mesh_freq = 5000 + report_freq = 100 + + igr_weight = 0.1 + mask_weight = 0.0 +} + +model { + nerf { + D = 8, + d_in = 4, + d_in_view = 3, + W = 256, + multires = 10, + multires_view = 4, + output_ch = 4, + skips=[4], + use_viewdirs=True + } + + sdf_network { + d_out = 257 + d_in = 3 + d_hidden = 256 + n_layers = 8 + skip_in = [4] + multires = 6 + bias = 0.5 + scale = 1.0 + geometric_init = True + weight_norm = True + } + + variance_network { + init_val = 0.3 + } + + rendering_network { + d_feature = 256 + mode = idr + d_in = 9 + d_out = 3 + d_hidden = 256 + n_layers = 4 + weight_norm = True + multires_view = 4 + squeeze_out = True + } + + neus_renderer { + n_samples = 64 + n_importance = 64 + n_outside = 0 + up_sample_steps = 4 # 1 for simple coarse-to-fine sampling + perturb = 1.0 + } +} diff --git a/contrib/NeRF-Editing/src/exp_runner.py b/contrib/NeRF-Editing/src/exp_runner.py new file mode 100644 index 00000000..9dd415f1 --- /dev/null +++ b/contrib/NeRF-Editing/src/exp_runner.py @@ -0,0 +1,662 @@ +import os +import time +import logging +import argparse +import numpy as np +import cv2 as cv +import trimesh +import jittor as jt +from shutil import copyfile +from tqdm import tqdm +from pyhocon import ConfigFactory +from models.dataset import Dataset +from models.fields import RenderingNetwork, SDFNetwork, SingleVarianceNetwork, NeRF +from models.renderer import NeuSRenderer +from tensorboardX import SummaryWriter + + +class Runner: + def __init__(self, conf_path, mode='train', case='CASE_NAME', is_continue=False): + + # Configuration + self.conf_path = conf_path + f = open(self.conf_path) + conf_text = f.read() + conf_text = conf_text.replace('CASE_NAME', case) + f.close() + + self.conf = ConfigFactory.parse_string(conf_text) + self.conf['dataset.data_dir'] = self.conf['dataset.data_dir'].replace('CASE_NAME', case) + self.base_exp_dir = self.conf['general.base_exp_dir'] + os.makedirs(self.base_exp_dir, exist_ok=True) + self.dataset = Dataset(self.conf['dataset']) + self.iter_step = 0 + + # Training parameters + self.end_iter = self.conf.get_int('train.end_iter') + self.save_freq = self.conf.get_int('train.save_freq') + self.report_freq = self.conf.get_int('train.report_freq') + self.val_freq = self.conf.get_int('train.val_freq') + self.val_mesh_freq = self.conf.get_int('train.val_mesh_freq') + self.batch_size = self.conf.get_int('train.batch_size') + self.validate_resolution_level = self.conf.get_int('train.validate_resolution_level') + self.learning_rate = self.conf.get_float('train.learning_rate') + self.learning_rate_alpha = self.conf.get_float('train.learning_rate_alpha') + self.use_white_bkgd = self.conf.get_bool('train.use_white_bkgd') + self.warm_up_end = self.conf.get_float('train.warm_up_end', default=0.0) + self.anneal_end = self.conf.get_float('train.anneal_end', default=0.0) + + # Weights + self.igr_weight = self.conf.get_float('train.igr_weight') + self.mask_weight = self.conf.get_float('train.mask_weight') + self.is_continue = is_continue + self.mode = mode + self.model_list = [] + self.writer = None + + # Networks + params_to_train = [] + self.nerf_outside = NeRF(**self.conf['model.nerf']) + self.sdf_network = SDFNetwork(**self.conf['model.sdf_network']) + self.deviation_network = SingleVarianceNetwork(**self.conf['model.variance_network']) + self.color_network = RenderingNetwork(**self.conf['model.rendering_network']) + params_to_train += list(self.nerf_outside.parameters()) + params_to_train += list(self.sdf_network.parameters()) + params_to_train += list(self.deviation_network.parameters()) + params_to_train += list(self.color_network.parameters()) + + self.optimizer = jt.optim.Adam(params_to_train, lr=self.learning_rate) + + self.renderer = NeuSRenderer(self.nerf_outside, + self.sdf_network, + self.deviation_network, + self.color_network, + **self.conf['model.neus_renderer']) + + # Load checkpoint + latest_model_name = None + if is_continue: + model_list_raw = os.listdir(os.path.join(self.base_exp_dir, 'checkpoints')) + model_list = [] + for model_name in model_list_raw: + if model_name[-3:] == 'pkl' and int(model_name[5:-4]) <= self.end_iter: + model_list.append(model_name) + model_list.sort() + latest_model_name = model_list[-1] + + if latest_model_name is not None: + logging.info('Find checkpoint: {}'.format(latest_model_name)) + self.load_checkpoint(latest_model_name) + + # Backup codes and configs for debug + if self.mode[:5] == 'train': + self.file_backup() + + def train(self): + self.writer = SummaryWriter(log_dir=os.path.join(self.base_exp_dir, 'logs')) + self.update_learning_rate() + res_step = self.end_iter - self.iter_step + image_perm = self.get_image_perm() + + for iter_i in tqdm(range(res_step)): + data = self.dataset.gen_random_rays_at(image_perm[self.iter_step % len(image_perm)], self.batch_size) + + rays_o, rays_d, true_rgb, mask = data[:, :3], data[:, 3: 6], data[:, 6: 9], data[:, 9: 10] + near, far = self.dataset.near_far_from_sphere(rays_o, rays_d) + # near, far = 1 * torch.ones_like(near), 100 * torch.ones_like(far) + + background_rgb = None + if self.use_white_bkgd: + background_rgb = jt.ones([1, 3]) + + if self.mask_weight > 0.0: + mask = (mask > 0.5).float() + else: + mask = jt.ones_like(mask) + + mask_sum = mask.sum() + 1e-5 + render_out = self.renderer.render(rays_o, rays_d, near, far, + background_rgb=background_rgb, + cos_anneal_ratio=self.get_cos_anneal_ratio()) + + color_fine = render_out['color_fine'] + s_val = render_out['s_val'] + cdf_fine = render_out['cdf_fine'] + gradient_error = render_out['gradient_error'] + weight_max = render_out['weight_max'] + weight_sum = render_out['weight_sum'] + + # Loss + color_error = (color_fine - true_rgb) * mask + # color_fine_loss = jt.nn.l1_loss(color_error, jt.zeros_like(color_error), reduction='sum') / mask_sum + color_fine_loss = color_error.abs().sum() / mask_sum + psnr = 20.0 * jt.log2(1.0 / (((color_fine - true_rgb)**2 * mask).sum() / (mask_sum * 3.0)).sqrt()) / jt.log2(10) + + eikonal_loss = gradient_error + + mask_loss = jt.nn.binary_cross_entropy_with_logits(weight_sum.safe_clip(1e-3, 1.0 - 1e-3), mask) + # mask_loss = 0 + + loss = color_fine_loss +\ + eikonal_loss * self.igr_weight +\ + mask_loss * self.mask_weight + + if loss.isnan().any(): + # print("***** Nan loss with %d ray *****" % (color_fine.isnan().sum())) + # print("Current image index is %d ." % (int(image_perm[self.iter_step % len(image_perm)]))) + continue + + self.optimizer.zero_grad() + self.optimizer.backward(loss) + self.optimizer.step() + + self.iter_step += 1 + + self.writer.add_scalar('Loss/loss', loss.numpy(), self.iter_step) + self.writer.add_scalar('Loss/color_loss', color_fine_loss.numpy(), self.iter_step) + self.writer.add_scalar('Loss/eikonal_loss', eikonal_loss.numpy(), self.iter_step) + self.writer.add_scalar('Statistics/s_val', s_val.mean().numpy(), self.iter_step) + self.writer.add_scalar('Statistics/cdf', ((cdf_fine[:, :1] * mask).sum() / mask_sum).numpy(), self.iter_step) + self.writer.add_scalar('Statistics/weight_max', ((weight_max * mask).sum() / mask_sum).numpy(), self.iter_step) + self.writer.add_scalar('Statistics/psnr', psnr.numpy(), self.iter_step) + + if self.iter_step % self.report_freq == 0: + print(self.base_exp_dir) + print('iter:{:8>d} loss = {} lr={}'.format(self.iter_step, loss, self.optimizer.param_groups[0]['lr'])) + + if self.iter_step % self.save_freq == 0: + self.save_checkpoint() + + if self.iter_step % self.val_freq == 0: + self.validate_image() + # print("Cancle the image validataion due to CUDA OOM") + + if self.iter_step % self.val_mesh_freq == 0: + self.validate_mesh() + # print("Cancle the mesh validataion due to CUDA OOM") + + self.update_learning_rate() + + if self.iter_step % len(image_perm) == 0: + image_perm = self.get_image_perm() + + def get_image_perm(self): + return jt.randperm(self.dataset.n_images) + # logging.debug("Debug by traversing images sequentially") + # return jt.arange(self.dataset.n_images) + + def get_cos_anneal_ratio(self): + if self.anneal_end == 0.0: + return 1.0 + else: + return np.min([1.0, self.iter_step / self.anneal_end]) + + def update_learning_rate(self): + if self.iter_step < self.warm_up_end: + learning_factor = self.iter_step / self.warm_up_end + else: + alpha = self.learning_rate_alpha + progress = (self.iter_step - self.warm_up_end) / (self.end_iter - self.warm_up_end) + learning_factor = (np.cos(np.pi * progress) + 1.0) * 0.5 * (1 - alpha) + alpha + + for g in self.optimizer.param_groups: + g['lr'] = self.learning_rate * learning_factor + + def file_backup(self): + dir_lis = self.conf['general.recording'] + os.makedirs(os.path.join(self.base_exp_dir, 'recording'), exist_ok=True) + for dir_name in dir_lis: + cur_dir = os.path.join(self.base_exp_dir, 'recording', dir_name) + os.makedirs(cur_dir, exist_ok=True) + files = os.listdir(dir_name) + for f_name in files: + if f_name[-3:] == '.py': + copyfile(os.path.join(dir_name, f_name), os.path.join(cur_dir, f_name)) + + copyfile(self.conf_path, os.path.join(self.base_exp_dir, 'recording', 'config.conf')) + + def load_checkpoint(self, checkpoint_name): + checkpoint = jt.load(os.path.join(self.base_exp_dir, 'checkpoints', checkpoint_name)) + self.nerf_outside.load_state_dict(checkpoint['nerf']) + self.sdf_network.load_state_dict(checkpoint['sdf_network_fine']) + self.deviation_network.load_state_dict(checkpoint['variance_network_fine']) + self.color_network.load_state_dict(checkpoint['color_network_fine']) + self.optimizer.load_state_dict(checkpoint['optimizer']) + self.iter_step = checkpoint['iter_step'] + + logging.info('End') + + def save_checkpoint(self): + checkpoint = { + 'nerf': self.nerf_outside.state_dict(), + 'sdf_network_fine': self.sdf_network.state_dict(), + 'variance_network_fine': self.deviation_network.state_dict(), + 'color_network_fine': self.color_network.state_dict(), + 'optimizer': self.optimizer.state_dict(), + 'iter_step': self.iter_step, + } + + os.makedirs(os.path.join(self.base_exp_dir, 'checkpoints'), exist_ok=True) + jt.save(checkpoint, os.path.join(self.base_exp_dir, 'checkpoints', 'ckpt_{:0>6d}.pkl'.format(self.iter_step))) + + def validate_image(self, idx=-1, resolution_level=-1): + if idx < 0: + idx = np.random.randint(self.dataset.n_images) + + print('Validate: iter: {}, camera: {}'.format(self.iter_step, idx)) + + if resolution_level < 0: + resolution_level = self.validate_resolution_level + rays_o, rays_d = self.dataset.gen_rays_at(idx, resolution_level=resolution_level) + H, W, _ = rays_o.shape + rays_o = rays_o.reshape(-1, 3).split(self.batch_size) + rays_d = rays_d.reshape(-1, 3).split(self.batch_size) + + out_rgb_fine = [] + out_normal_fine = [] + + for rays_o_batch, rays_d_batch in zip(rays_o, rays_d): + near, far = self.dataset.near_far_from_sphere(rays_o_batch, rays_d_batch) + background_rgb = jt.ones([1, 3]) if self.use_white_bkgd else None + + render_out = self.renderer.render(rays_o_batch, + rays_d_batch, + near, + far, + cos_anneal_ratio=self.get_cos_anneal_ratio(), + background_rgb=background_rgb) + + def feasible(key): return (key in render_out) and (render_out[key] is not None) + + if feasible('color_fine'): + out_rgb_fine.append(render_out['color_fine'].numpy()) + if feasible('gradients') and feasible('weights'): + n_samples = self.renderer.n_samples + self.renderer.n_importance + normals = render_out['gradients'] * render_out['weights'][:, :n_samples, None] + if feasible('inside_sphere'): + normals = normals * render_out['inside_sphere'][..., None] + normals = normals.sum(dim=1).numpy() + out_normal_fine.append(normals) + del render_out + + img_fine = None + if len(out_rgb_fine) > 0: + img_fine = (np.concatenate(out_rgb_fine, axis=0).reshape([H, W, 3, -1]) * 256).clip(0, 255) + + normal_img = None + if len(out_normal_fine) > 0: + normal_img = np.concatenate(out_normal_fine, axis=0) + rot = np.linalg.inv(self.dataset.pose_all[idx, :3, :3].numpy()) + normal_img = (np.matmul(rot[None, :, :], normal_img[:, :, None]) + .reshape([H, W, 3, -1]) * 128 + 128).clip(0, 255) + + os.makedirs(os.path.join(self.base_exp_dir, 'validations_fine'), exist_ok=True) + os.makedirs(os.path.join(self.base_exp_dir, 'normals'), exist_ok=True) + + for i in range(img_fine.shape[-1]): + if len(out_rgb_fine) > 0: + cv.imwrite(os.path.join(self.base_exp_dir, + 'validations_fine', + '{:0>8d}_{}_{}.png'.format(self.iter_step, i, idx)), + np.concatenate([img_fine[..., i], + self.dataset.image_at(idx, resolution_level=resolution_level)])) + if len(out_normal_fine) > 0: + cv.imwrite(os.path.join(self.base_exp_dir, + 'normals', + '{:0>8d}_{}_{}.png'.format(self.iter_step, i, idx)), + normal_img[..., i]) + + def render_novel_image(self, idx_0, idx_1, ratio, resolution_level): + """ + Interpolate view between two cameras. + """ + rays_o, rays_d = self.dataset.gen_rays_between(idx_0, idx_1, ratio, resolution_level=resolution_level) + H, W, _ = rays_o.shape + rays_o = rays_o.reshape(-1, 3).split(self.batch_size) + rays_d = rays_d.reshape(-1, 3).split(self.batch_size) + + out_rgb_fine = [] + for rays_o_batch, rays_d_batch in zip(rays_o, rays_d): + near, far = self.dataset.near_far_from_sphere(rays_o_batch, rays_d_batch) + background_rgb = jt.ones([1, 3]) if self.use_white_bkgd else None + + render_out = self.renderer.render(rays_o_batch, + rays_d_batch, + near, + far, + cos_anneal_ratio=self.get_cos_anneal_ratio(), + background_rgb=background_rgb) + + out_rgb_fine.append(render_out['color_fine'].detach().cpu().numpy()) + + del render_out + + img_fine = (np.concatenate(out_rgb_fine, axis=0).reshape([H, W, 3]) * 256).safe_clip(0, 255).astype(np.uint8) + return img_fine + + + def render_image(self, render_pose, use_deform=False, query_delta=None, + hull=None, deltas=None, mesh=None, c2w_staticcam=None): + out_rgb_fine = [] + if mesh != None: + rays_o, rays_d, depth = self.dataset.gen_rays_at_pose_with_depth(render_pose, mesh, resolution_level=2) + rays_o, rays_d = self.dataset.gen_rays_at_pose(render_pose, resolution_level=2) + H, W = depth.shape[:2] + mask = (depth > 1e-5).reshape(H, W, -1).astype(np.uint8) * 255 + # import imageio + # imageio.imwrite('./test.jpg', depth.cpu().numpy()) + # import ipdb; ipdb.set_trace() + else: + rays_o, rays_d = self.dataset.gen_rays_at_pose(render_pose, resolution_level=2) + + if c2w_staticcam != None: + view_dirs = rays_d.reshape(-1, 3).split(self.batch_size) + rays_o, rays_d, depth = self.dataset.gen_rays_at_pose_with_depth(c2w_staticcam, mesh, resolution_level=2) + else: + Num = len(rays_o.reshape(-1, 3).split(self.batch_size)) + view_dirs = [None] * Num + + H, W, _ = rays_o.shape + rays_o = rays_o.reshape(-1, 3).split(self.batch_size) + rays_d = rays_d.reshape(-1, 3).split(self.batch_size) + if mesh != None: + depth = jt.reshape(depth, [-1,1]).float().split(self.batch_size) # [H*W, 1] + epsilon = 0.2 + else: + depth = [None] * Num + + VIS_RAY = False + if VIS_RAY: + w_coord, h_coord = 180, 150 # fox pixel 1 + vis_coord_ind = h_coord * W + w_coord + else: + vis_coord_ind = -1 + + t1 = time.time() + for rays_o_batch, rays_d_batch, depth_batch, view_dir in zip(rays_o, rays_d, depth, view_dirs): + if mesh != None: + near, far = depth_batch - epsilon, depth_batch + epsilon + else: + near, far = self.dataset.near_far_from_sphere(rays_o_batch, rays_d_batch) + + background_rgb = jt.ones([1, 3]) if self.use_white_bkgd else None + + render_out = self.renderer.render(rays_o_batch, + rays_d_batch, + near, + far, + view_dir, + cos_anneal_ratio=self.get_cos_anneal_ratio(), + background_rgb=background_rgb, + use_deform=use_deform, + query_delta=query_delta, + hull=hull, + deltas=deltas, + vis_coord_ind=vis_coord_ind) + vis_coord_ind -= self.batch_size + + ttt = time.time() + # out_rgb_fine.append(render_out['color_fine'].numpy()) + # out_rgb_fine[-1] = np.concatenate([out_rgb_fine[-1], \ + # render_out['weights'].sum(dim=-1, keepdims=True).numpy()], axis=-1) + out_rgb_fine.append(np.concatenate([render_out['color_fine'].numpy(), render_out['weights'].sum(dim=-1, keepdims=True).numpy()], axis=-1)) + del render_out + # print("post process cost %s" % (time.time() - ttt)) + # print("..............") + + print("rendering cost time:", time.time()-t1) + # img_fine = (np.concatenate(out_rgb_fine, axis=0).reshape([H, W, 3]) * 256).safe_clip(0, 255).astype(np.uint8) + img_fine = (np.concatenate(out_rgb_fine, axis=0).reshape([H, W, 4]) * 256).clip(0, 255).astype(np.uint8) + # if mesh != None: + # img_fine = np.concatenate([img_fine, mask], axis=-1) + return img_fine + + def validate_mesh(self, world_space=False, resolution=64, threshold=0.0, with_color=False, do_dilation=False): + bound_min = jt.float32(self.dataset.object_bbox_min) + bound_max = jt.float32(self.dataset.object_bbox_max) + + vertices, triangles =\ + self.renderer.extract_geometry(bound_min, bound_max, resolution=resolution, threshold=threshold, do_dilation=do_dilation) + os.makedirs(os.path.join(self.base_exp_dir, 'meshes'), exist_ok=True) + + # if world_space: + # vertices = vertices * self.dataset.scale_mats_np[0][0, 0] + self.dataset.scale_mats_np[0][:3, 3][None] + + mesh = trimesh.Trimesh(vertices, triangles, process=False, maintain_order=True) + no_view_dependence = True + + if with_color: + normals = jt.array(mesh.vertex_normals.copy()).float() + normals = -1 * normals / jt.norm(normals, dim=-1, keepdim=True) + normals = normals.split(self.batch_size) + pts = jt.array(vertices).float().split(self.batch_size) + verts_color = [] + if no_view_dependence: + print("sample according to the vertex position") + for pts_batch, dir_batch in zip(pts, normals): + sdf_nn_output = self.renderer.sdf_network(pts_batch) + # sdf = sdf_nn_output[:, :1] + feature_vector = sdf_nn_output[:, 1:] # [bs, 256] + gradients = self.renderer.sdf_network.gradient(pts_batch) + sampled_color = self.renderer.color_network(pts_batch, gradients, dir_batch, feature_vector) + verts_color.append(sampled_color.numpy()) + # del sdf_nn_output, feature_vector, gradients, sampled_color + verts_color = (np.concatenate(verts_color, axis=0) * 255).clip(0, 255).astype(np.uint8) + else: + print("sample along the normal direction") + rays_o, rays_d = pts, normals + from tqdm import tqdm + epsilon = 0.1 + for rays_o_batch, rays_d_batch in tqdm(zip(rays_o, rays_d)): + near, far = -1 * epsilon * jt.ones_like(rays_o_batch[...,:1]), epsilon * jt.ones_like(rays_o_batch[...,:1]) + render_out = self.renderer.render(rays_o_batch, rays_d_batch, near, far, + cos_anneal_ratio=self.get_cos_anneal_ratio()) + verts_color.append(render_out['color_fine'].numpy()) + # verts_color = (np.concatenate(verts_color, axis=0) * 255).safe_clip(0, 255).astype(np.uint8) + verts_color = np.concatenate(verts_color, axis=0) + # mesh.export(os.path.join(self.base_exp_dir, 'meshes', '{:0>8d}.ply'.format(self.iter_step))) + mesh.visual.vertex_colors = verts_color[:,[2,1,0]] # modify BGR to RGB + save_path = os.path.join(self.base_exp_dir, 'meshes', '{:0>8d}.obj'.format(self.iter_step)) + if do_dilation: + save_path = save_path.replace('.obj', '_dilation.obj') + trimesh.exchange.export.export_mesh(mesh, save_path, 'obj') + + logging.info('End') + + def interpolate_view(self, img_idx_0, img_idx_1): + images = [] + n_frames = 60 + video_dir = os.path.join(self.base_exp_dir, 'render') + os.makedirs(video_dir, exist_ok=True) + for i in range(n_frames): + print(i) + images.append(self.render_novel_image(img_idx_0, + img_idx_1, + np.sin(((i / n_frames) - 0.5) * np.pi) * 0.5 + 0.5, + resolution_level=4)) + cv.imwrite(os.path.join(video_dir, + '{:0>8d}.png'.format(i)), images[-1]) + + for i in range(n_frames): + images.append(images[n_frames - i - 1]) + + fourcc = cv.VideoWriter_fourcc(*'mp4v') + h, w, _ = images[0].shape + writer = cv.VideoWriter(os.path.join(video_dir, + '{:0>8d}_{}_{}.mp4'.format(self.iter_step, img_idx_0, img_idx_1)), + fourcc, 30, (w, h)) + + for image in images: + writer.write(image) + + writer.release() + + + def render_circle_image(self, recon_file=None, deform_file=None, use_deform=False, obj_path=None, \ + fix_camera=False, is_view_dependent=False, save_dir="", is_val=False, add_alpha=False): + if is_val: + render_poses = self.dataset.gen_validation_pose() + else: + render_poses = self.dataset.gen_circle_poses() + if not save_dir: + save_dir = os.path.join(self.base_exp_dir, 'render_circle') + else: + save_dir = os.path.join(self.base_exp_dir, save_dir) + if use_deform: + from utils import genConvexhullVolume, queryDelta + # from utils import genKNN as genConvexhullVolume + # from utils import queryDelta_KNN as queryDelta + hull, deltas = genConvexhullVolume(recon_file, deform_file, fix_camera) + else: + hull = deltas = queryDelta = None + + # from pytorch3d.io import load_objs_as_meshes + from utils import load_objs_as_meshes + if fix_camera: + print("FIX CAMERA") + render_poses = render_poses[0:1].expand(len(deltas), *render_poses.shape[1:]) + ### for laptop + # render_poses = render_poses[6:7].expand(len(deltas), *render_poses.shape[1:]) + ### for hbychair + # render_poses = render_poses[26:27].expand(len(deltas), *render_poses.shape[1:]) + ### for dinosaur + # render_poses = render_poses[22:23].expand(len(deltas), *render_poses.shape[1:]) + import glob + mesh_files = sorted(glob.glob(os.path.join(args.obj_path, '*.obj'))) + mesh = load_objs_as_meshes(mesh_files) + else: + ### load obj file for sampling + if args.obj_path: + mesh = load_objs_as_meshes(args.obj_path) + else: + mesh = None + + ### copy tets, deltas and meshes + import copy + if fix_camera: + deltas_copy = copy.deepcopy(deltas) + hull_copy = copy.deepcopy(hull) + mesh_copy = copy.deepcopy(mesh) + + if is_view_dependent: + c2w_staticcam = render_poses[0] + else: + c2w_staticcam = None + os.makedirs(save_dir, exist_ok=True) + images = [] + for idx, render_pose in enumerate(render_poses): + print("render the %d / %d image" % (idx, len(render_poses))) + # if idx < 20: + # continue + if fix_camera: + hull, deltas, mesh = hull_copy[idx], deltas_copy[idx], mesh_copy[idx] + images.append(self.render_image(render_pose, use_deform, queryDelta, hull, deltas, mesh, c2w_staticcam)) + if add_alpha: + cv.imwrite(os.path.join(save_dir, '{:0>8d}.png'.format(idx)), images[-1]) + else: + cv.imwrite(os.path.join(save_dir, '{:0>8d}.png'.format(idx)), images[-1][:,:,:3]) + + if images[0].shape[-1] == 4: + # images = [img[:,:,:3] + (255 - img[:,:,3:]) for img in images] + images = [img[:,:,:3] for img in images] + + fourcc = cv.VideoWriter_fourcc(*'mp4v') + h, w, _ = images[0].shape + writer = cv.VideoWriter(os.path.join(save_dir, 'video.mp4'), + fourcc, 10, (w, h)) + + for image in images: + writer.write(image) + + writer.release() + + +if __name__ == '__main__': + print('Hello Wooden') + + FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s" + logging.basicConfig(level=logging.DEBUG, format=FORMAT) + logging.getLogger('PIL').setLevel(logging.WARNING) + # logging.basicConfig(level=logging.INFO, format=FORMAT) + + parser = argparse.ArgumentParser() + parser.add_argument('--conf', type=str, default='./confs/base.conf') + parser.add_argument('--mode', type=str, default='train') + parser.add_argument('--mcube_threshold', type=float, default=0.0) + parser.add_argument('--is_continue', default=False, action="store_true") + parser.add_argument('--gpu', type=int, default=0) + parser.add_argument('--case', type=str, default='') + + parser.add_argument("--use_deform", action='store_true', + help='use mesh to guide deformation') + parser.add_argument("--reconstructed_mesh_file", type=str, default=None, + help='reconstructed mesh path') + parser.add_argument("--deformed_mesh_file", type=str, default=None, + help='deformed mesh path') + + parser.add_argument("--fix_camera", action='store_true', + help='fix the camera for sequence generation') + parser.add_argument("--is_view_dependent", action='store_true', + help='fix the camera while change the ray direction') + + # use mesh to for better sampling + parser.add_argument("--obj_path", type=str, default=None, + help='mesh path') + + # for cage extraction + parser.add_argument("--do_dilation", action='store_true', + help='Optional. Extract cage from current NeRF') + + # for image save + parser.add_argument("--savedir", type=str, default="", + help='save data directory') + + # use LLFF + parser.add_argument("--use_llff", action='store_true', + help='use llff !') + + # add alpha + parser.add_argument("--add_alpha", action='store_true', + help='add alpha channel') + + args = parser.parse_args() + + if args.mode == 'train' or args.mode == 'default': + from models.render_train import NeuSRenderer + print("use train NeuS Renderer: total sampling ...") + else: + from models.renderer import NeuSRenderer + print("use test render NeuS Renderer: sparse sampling ...") + + # if args.use_llff == True: + # print("Use colmap extimated poses ~!") + # from models.dataset_llff import Dataset + # else: + from models.dataset import Dataset + + runner = Runner(args.conf, args.mode, args.case, args.is_continue) + + if args.mode == 'train': + runner.train() + elif args.mode == 'validate_mesh': + if args.do_dilation: + resol = 256 + else: + resol = 256 + print("use resoluation %d for marching cube" % resol) + runner.validate_mesh(world_space=True, resolution=resol, threshold=args.mcube_threshold, \ + with_color=True, do_dilation=args.do_dilation) + elif args.mode.startswith('interpolate'): # Interpolate views given two image indices + _, img_idx_0, img_idx_1 = args.mode.split('_') + img_idx_0 = int(img_idx_0) + img_idx_1 = int(img_idx_1) + runner.interpolate_view(img_idx_0, img_idx_1) + elif args.mode.startswith('circle'): # circle views + runner.batch_size = 300 + runner.render_circle_image(args.reconstructed_mesh_file, args.deformed_mesh_file,\ + args.use_deform, args.obj_path, args.fix_camera, args.is_view_dependent, args.savedir, add_alpha=args.add_alpha) + # elif args.mode.startswith('evaluate'): + # print("use evaluation poses !!!") + # runner.batch_size = 400 + # runner.render_circle_image(args.reconstructed_mesh_file, args.deformed_mesh_file,\ + # args.use_deform, args.obj_path, args.fix_camera, args.is_view_dependent, args.savedir, is_val=True) diff --git a/contrib/NeRF-Editing/src/models/dataset.py b/contrib/NeRF-Editing/src/models/dataset.py new file mode 100644 index 00000000..9aa8c3e4 --- /dev/null +++ b/contrib/NeRF-Editing/src/models/dataset.py @@ -0,0 +1,350 @@ +import logging +import jittor as jt +import cv2 as cv +import numpy as np +import os, json +from glob import glob +from scipy.spatial.transform import Rotation as Rot +from scipy.spatial.transform import Slerp +import imageio +from utils import render_depth + + +# This function is borrowed from IDR: https://github.com/lioryariv/idr +def load_K_Rt_from_P(filename, P=None): + if P is None: + lines = open(filename).read().splitlines() + if len(lines) == 4: + lines = lines[1:] + lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(" ") for x in lines)] + P = np.asarray(lines).astype(np.float32).squeeze() + + out = cv.decomposeProjectionMatrix(P) + K = out[0] + R = out[1] + t = out[2] + + K = K / K[2, 2] + intrinsics = np.eye(4) + intrinsics[:3, :3] = K + + pose = np.eye(4, dtype=np.float32) + pose[:3, :3] = R.transpose() + pose[:3, 3] = (t[:3] / t[3])[:, 0] + + return intrinsics, pose + +def calCenter(cameras:np.array): + """Given cameras, calculate the center by interset the rays + + Args: + cameras (np.array): [N,5,3]. The rays_o and 4 corners. + """ + ray_o = cameras[:,0,:] # [N,3] + ray_d = cameras[:,1:,:].mean(axis=1) - ray_o # [N,3] + ray_d = ray_d / np.linalg.norm(ray_d, axis=1, keepdims=True) + pts = [] + + np.random.seed(1) # set random seed, to avoid different every time + for _ in range(100): + id1, id2 = np.random.choice(np.arange(ray_o.shape[0]), 2, replace=False) + ### calculate A * t = b + A, b = np.zeros((2,2)), np.zeros(2,) + A[0,0] = np.dot(ray_d[id1], ray_d[id1]) + A[0,1] = -1 * np.dot(ray_d[id1], ray_d[id2]) + A[1,0] = np.dot(ray_d[id1], ray_d[id2]) + A[1,1] = -1 * np.dot(ray_d[id2], ray_d[id2]) + b[0] = np.dot(ray_o[id2]-ray_o[id1], ray_d[id1]) + b[1] = np.dot(ray_o[id2]-ray_o[id1], ray_d[id2]) + try: + t = np.linalg.solve(A, b) + pts.append(ray_o[[id1,id2]] + ray_d[[id1,id2]] * t[:,np.newaxis]) + except: + print("unable to solve ...") + continue + pts = np.concatenate(pts, axis=0) # [20, 3] + # print("center", pts.mean(axis=0)) + return pts.mean(axis=0) + +def normalizeCamera(c2w, H, W, F, rad=3): + ### construct camera: [N,5,3] + isTensor = False + if jt.is_var(c2w): + c2w_np = c2w.cpu().numpy() + isTensor = True + else: + c2w_np = c2w + + rays_o = c2w_np[:,:3,3] # [N,3] + + left_top = [-1*W/2, H/2] + right_top = [W/2, H/2] + left_bottom = [-1*W/2, -1*H/2] + right_bottom = [W/2, -1*H/2] + corners = np.stack([left_bottom,right_bottom,right_top,left_top], axis=0) # [NumPt,2] + corners /= F + corners = np.concatenate([corners, -1*np.ones_like(corners[:,:1])], axis=-1) # [NumPt,3] + # change the z direction to negative + corners = np.broadcast_to(corners, [c2w_np.shape[0]]+list(corners.shape)) # [N,NumPt,3] + rays_d = np.matmul(c2w_np[:,:3,:3], corners.transpose(0,2,1)).transpose(0,2,1) # [N,NumPt,3] + rays_d = rays_d / np.linalg.norm(rays_d, axis=-1, keepdims=True) # [N,NumPt,3] + cameras = np.concatenate([rays_o[:,np.newaxis,:], rays_o[:,np.newaxis,:]+rays_d], axis=1) + center = calCenter(cameras) # the center of cameras + cameras -= center # move to origin + radius = np.linalg.norm(cameras[:,0,:], axis=-1).mean() + scale = rad / radius # change different scale of inside ball + + c2w_np[:,:3,3] -= center + c2w_np[:,:3,3] *= scale + + if isTensor: + c2w_np = jt.array(c2w_np) + + return c2w_np + + +class Dataset: + def __init__(self, conf): + super(Dataset, self).__init__() + print('Load data: Begin') + jt.flags.use_cuda = 1 + self.conf = conf + + self.data_dir = conf.get_string('data_dir') + dataset_type = conf.get_string('type') + + USE_CUSTOM_DATASET = False + if dataset_type == 'nerf_synthetic': + print("Use the nerf synthetic dataset") + splits = ['train', 'val', 'test'] + metas = {} + for s in splits: + with open(os.path.join(self.data_dir, 'transforms_{}.json'.format(s)), 'r') as fp: + metas[s] = json.load(fp) + + train_meta = metas['train'] + img_files = [] + all_imgs = [] + all_poses = [] + all_val_poses = [] + for frame in train_meta['frames']: + fname = os.path.join(self.data_dir, frame['file_path'] + '.png') + img_files.append(fname) + all_imgs.append(imageio.imread(fname)) + all_poses.append(np.array(frame['transform_matrix'])) + all_imgs = (np.array(all_imgs) / 255.).astype(np.float32) # keep all 4 channels (RGBA) + all_poses = np.array(all_poses).astype(np.float32) + all_val_poses = all_poses[-5:] + + H, W = all_imgs[0].shape[:2] + camera_angle_x = float(train_meta['camera_angle_x']) + focal = .5 * W / np.tan(.5 * camera_angle_x) + _intrinsics = np.array([ + [focal, 0, 0.5*W], + [0, focal, 0.5*H], + [0, 0, 1] + ]) + + elif dataset_type == 'custom': + print("Use the custom dataset") + USE_CUSTOM_DATASET = True + NEED_INVERT = False + pose_dir = os.path.join(self.data_dir, "pose") + if not os.path.exists(pose_dir): + pose_dir = os.path.join(self.data_dir, "extrinsic") + NEED_INVERT = True + img_dir = os.path.join(self.data_dir, "rgb") + + pose_files = sorted([os.path.join(pose_dir, x) for idx,x in enumerate(os.listdir(pose_dir))]) + img_files = sorted([os.path.join(img_dir, x) for idx,x in enumerate(os.listdir(img_dir))]) + val_pose_files = pose_files[-5:] + + all_poses = [np.loadtxt(x) for x in sorted(pose_files)] # list of 4x4 array + all_val_poses = [np.loadtxt(x) for x in sorted(val_pose_files)] # list of 4x4 array + + if NEED_INVERT: + all_poses = [np.linalg.inv(x) for x in all_poses] + all_val_poses = [np.linalg.inv(x) for x in all_val_poses] + + all_poses = np.stack(all_poses, axis=0).astype(np.float32) + all_val_poses = np.stack(all_val_poses, axis=0).astype(np.float32) + + all_poses[:,:,1:3] = -all_poses[:,:,1:3] # I don't know why ... + all_val_poses[:,:,1:3] = -all_val_poses[:,:,1:3] # I don't know why ... + + all_imgs = [imageio.imread(x) for x in sorted(img_files)] # list of images + all_imgs = np.stack(all_imgs, axis=0).astype(np.float32) / 255. # keep all 3 channels (RGBA):3 + + intrinsic_path = os.path.join(self.data_dir, "intrinsics.txt") # + with open(intrinsic_path) as f: + lines = f.readlines() + focal = np.fromstring(lines[0], sep=' ', dtype=np.float32)[0] + H, W = np.fromstring(lines[-1], sep=' ', dtype=np.int) + _intrinsics = np.array([ + [focal, 0, 0.5*W], + [0, focal, 0.5*H], + [0, 0, 1] + ]) + + print("normalize the camera") + all_poses = normalizeCamera(all_poses, H, W, focal) + + else: + raise NotImplementedError + + ### load data + self.n_images = len(img_files) + self.images_np = all_imgs[..., [2,1,0]] + self.masks_np = all_imgs[..., 3:] + if USE_CUSTOM_DATASET: + self.masks_np = np.ones_like(all_imgs[..., 0:1]) + self.images_lis = img_files + + self.intrinsics_all = [jt.array(_intrinsics).float()] * self.n_images + self.pose_all = [jt.array(pose).float() for pose in all_poses] + self.val_pose_all = [jt.array(pose).float() for pose in all_val_poses] + + self.images = jt.array(self.images_np.astype(np.float32)) # [n_images, H, W, 3] + self.masks = jt.array(self.masks_np.astype(np.float32)) # [n_images, H, W, 3] + self.intrinsics_all = jt.stack(self.intrinsics_all) # [n_images, 4, 4] + self.intrinsics_all_inv = jt.linalg.inv(self.intrinsics_all) # [n_images, 4, 4] + self.focal = self.intrinsics_all[0][0, 0] + self.pose_all = jt.stack(self.pose_all) # [n_images, 4, 4] + self.val_pose_all = jt.stack(self.val_pose_all) # [n_images, 4, 4] + self.H, self.W = self.images.shape[1], self.images.shape[2] + self.image_pixels = self.H * self.W + + object_bbox_min = np.array([-1.3, -1.3, -1.3, 1.0]) + object_bbox_max = np.array([ 1.3, 1.3, 1.3, 1.0]) + self.object_bbox_min = object_bbox_min[:3] + self.object_bbox_max = object_bbox_max[:3] + + print('Load data: End') + + + def gen_rays_at(self, img_idx, resolution_level=1): + """ + Generate rays at world space from one camera. + """ + img_idx = int(img_idx) + l = resolution_level + tx = jt.linspace(0, self.W - 1, self.W // l) + ty = jt.linspace(0, self.H - 1, self.H // l) + pixels_x, pixels_y = jt.meshgrid(tx, ty) + K = self.intrinsics_all[img_idx] + rays_v = jt.stack([(pixels_x-K[0][2])/K[0][0], -(pixels_y-K[1][2])/K[1][1], -jt.ones_like(pixels_x)], -1) # [W,H,3] + rays_v = jt.matmul(self.pose_all[img_idx, None, None, :3, :3].expand(self.W//l, self.H//l, 3,3), rays_v[:, :, :, None]).squeeze(-1) # W, H, 3 + rays_o = self.pose_all[img_idx, None, None, :3, 3].expand(rays_v.shape) # W, H, 3 + return rays_o.transpose(0, 1), rays_v.transpose(0, 1) + + def gen_random_rays_at(self, img_idx, batch_size): + """ + Generate random rays at world space from one camera. + """ + img_idx = int(img_idx) + pixels_x = jt.randint(low=0, high=self.W, shape=(batch_size,)) + pixels_y = jt.randint(low=0, high=self.H, shape=(batch_size,)) + color = self.images[img_idx][(pixels_y, pixels_x)] # batch_size, 3 + mask = self.masks[img_idx][(pixels_y, pixels_x)] # batch_size, 3 + K = self.intrinsics_all[img_idx] + rays_v = jt.stack([(pixels_x-K[0][2])/K[0][0], -(pixels_y-K[1][2])/K[1][1], -jt.ones_like(pixels_x)], -1) # batch_size, 3 + rays_v = jt.matmul(self.pose_all[img_idx, None, :3, :3].expand(batch_size,3,3), rays_v[:, :, None]).squeeze(-1) # batch_size, 3 + rays_o = self.pose_all[img_idx, None, :3, 3].expand(rays_v.shape) # batch_size, 3 + return jt.concat([rays_o, rays_v, color, mask[:, :1]], dim=-1) # batch_size, 10 + + + def near_far_from_sphere(self, rays_o, rays_d): + a = jt.sum(rays_d**2, dim=-1, keepdims=True) + b = 2.0 * jt.sum(rays_o * rays_d, dim=-1, keepdims=True) + mid = 0.5 * (-b) / a + near = mid - 1.0 + far = mid + 1.0 + return near, far + + def image_at(self, idx, resolution_level): + img = cv.imread(self.images_lis[idx]) + return (cv.resize(img, (self.W // resolution_level, self.H // resolution_level))).clip(0, 255) + +### ---------------------------------------------------------- + + def gen_circle_poses(self): + trans_t = lambda t : jt.array([ + [1,0,0,0], + [0,1,0,0], + [0,0,1,t], + [0,0,0,1]]).float() + + rot_phi = lambda phi : jt.array([ + [1,0,0,0], + [0,np.cos(phi),-np.sin(phi),0], + [0,np.sin(phi), np.cos(phi),0], + [0,0,0,1]]).float() + + rot_theta = lambda th : jt.array([ + [np.cos(th),0,-np.sin(th),0], + [0,1,0,0], + [np.sin(th),0, np.cos(th),0], + [0,0,0,1]]).float() + + def pose_spherical(theta, phi, radius): + c2w = trans_t(radius) + c2w = rot_phi(phi/180.*np.pi) @ c2w + c2w = rot_theta(theta/180.*np.pi) @ c2w + c2w = jt.array(np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]])) @ c2w + return c2w + + Nframes = 40 + + if "hbychair" in self.data_dir: + print("set render raids to 3.5 ! Add aditional rotation.") + render_poses = [] + for elev in [-30]: + render_pose = jt.stack([pose_spherical(angle, elev, 3.5) for angle in np.linspace(-180,180,Nframes+1)[:-1]], 0) + rotation_mat = jt.array([[1,0,0,0],[0,-0.35,0.94,0],[0,-0.94,-0.35,-0.15],[0,0,0,1]]).float() + rotation_mat = jt.linalg.inv(rotation_mat) + render_poses += [rotation_mat @ x for x in render_pose] + render_poses = jt.stack(render_poses) + else: + print("set render raids to 3.5 !") + for elev in [-30]: + render_poses = jt.stack([pose_spherical(angle, elev, 3.5) for angle in np.linspace(-180,180,Nframes+1)[:-1]], 0) + + return render_poses + + def gen_validation_pose(self): + return self.val_pose_all[::5] + + def gen_rays_at_pose(self, render_pose, resolution_level=1): + l = resolution_level + tx = jt.linspace(0, self.W - 1, self.W // l) + ty = jt.linspace(0, self.H - 1, self.H // l) + pixels_x, pixels_y = jt.meshgrid(tx, ty) # [W,H] + K = self.intrinsics_all[0] + rays_v = jt.stack([(pixels_x-K[0][2])/K[0][0], -(pixels_y-K[1][2])/K[1][1], -jt.ones_like(pixels_x)], -1) # [W,H,3] + w, h = rays_v.shape[:2] + # rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3 + rays_v = jt.matmul(render_pose[None, None, :3, :3].expand(w,h,3,3), rays_v[:, :, :, None]).squeeze(-1) # W, H, 3 + rays_o = render_pose[None, None, :3, 3].expand(rays_v.shape) # W, H, 3 + return rays_o.transpose(0, 1), rays_v.transpose(0, 1) + + + + def gen_rays_at_pose_with_depth(self, render_pose, mesh, resolution_level=1): + l = resolution_level + tx = jt.linspace(0, self.W - 1, self.W // l) + ty = jt.linspace(0, self.H - 1, self.H // l) + pixels_x, pixels_y = jt.meshgrid(tx, ty) # [W,H] + K = self.intrinsics_all[0] + + rays_v = jt.stack([(pixels_x-K[0][2])/K[0][0], -(pixels_y-K[1][2])/K[1][1], -jt.ones_like(pixels_x)], -1) # [W,H,3] + w, h = rays_v.shape[:2] + # rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3 + rays_v = jt.matmul(render_pose[None, None, :3, :3].expand(w,h,3,3), rays_v[:, :, :, None]).squeeze(-1) # W, H, 3 + rays_o = render_pose[None, None, :3, 3].expand(rays_v.shape) # W, H, 3 + + # dep_norm_render = DepthNormalRenderer(render_pose.unsqueeze(0), IMGSIZE=self.H//l, \ + # FOCAL=K.cpu()[0][0]/l, aspect_ratio=float(self.W)/float(self.H)) + # dep_im, norm_im = dep_norm_render(mesh) # [bs,H,W,1], [bs,H,W,3] + dep_im = render_depth(render_pose.numpy(), mesh, IMGSIZE=self.H//l, FOCAL=K.numpy()[0][0]/l, aspect_ratio=float(self.W)/float(self.H)) + return rays_o.transpose(0, 1), rays_v.transpose(0, 1), dep_im + diff --git a/contrib/NeRF-Editing/src/models/embedder.py b/contrib/NeRF-Editing/src/models/embedder.py new file mode 100644 index 00000000..9b81cd9f --- /dev/null +++ b/contrib/NeRF-Editing/src/models/embedder.py @@ -0,0 +1,52 @@ +import jittor as jt +from jittor import nn + + +# Positional encoding embedding. Code was taken from https://github.com/bmild/nerf. +class Embedder: + def __init__(self, **kwargs): + self.kwargs = kwargs + self.create_embedding_fn() + + def create_embedding_fn(self): + embed_fns = [] + d = self.kwargs['input_dims'] + out_dim = 0 + if self.kwargs['include_input']: + embed_fns.append(lambda x: x) + out_dim += d + + max_freq = self.kwargs['max_freq_log2'] + N_freqs = self.kwargs['num_freqs'] + + if self.kwargs['log_sampling']: + freq_bands = 2. ** jt.linspace(0., max_freq, N_freqs) + else: + freq_bands = jt.linspace(2.**0., 2.**max_freq, N_freqs) + + for freq in freq_bands: + for p_fn in self.kwargs['periodic_fns']: + embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq)) + out_dim += d + + self.embed_fns = embed_fns + self.out_dim = out_dim + + def embed(self, inputs): + return jt.concat([fn(inputs) for fn in self.embed_fns], -1) + + +def get_embedder(multires, input_dims=3): + embed_kwargs = { + 'include_input': True, + 'input_dims': input_dims, + 'max_freq_log2': multires-1, + 'num_freqs': multires, + 'log_sampling': True, + 'periodic_fns': [jt.sin, jt.cos], + } + + embedder_obj = Embedder(**embed_kwargs) + def embed(x, eo=embedder_obj): return eo.embed(x) + return embed, embedder_obj.out_dim + diff --git a/contrib/NeRF-Editing/src/models/fields.py b/contrib/NeRF-Editing/src/models/fields.py new file mode 100644 index 00000000..ffcf72ec --- /dev/null +++ b/contrib/NeRF-Editing/src/models/fields.py @@ -0,0 +1,256 @@ +import jittor as jt +from jittor import nn +# import torch.nn.functional as F +import numpy as np +from models.embedder import get_embedder + + +# This implementation is borrowed from IDR: https://github.com/lioryariv/idr +class SDFNetwork(nn.Module): + def __init__(self, + d_in, + d_out, + d_hidden, + n_layers, + skip_in=(4,), + multires=0, + bias=0.5, + scale=1, + geometric_init=True, + weight_norm=True, + inside_outside=False): + super(SDFNetwork, self).__init__() + + dims = [d_in] + [d_hidden for _ in range(n_layers)] + [d_out] + + self.embed_fn_fine = None + + if multires > 0: + embed_fn, input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + dims[0] = input_ch + + self.num_layers = len(dims) + self.skip_in = skip_in + self.scale = scale + + for l in range(0, self.num_layers - 1): + if l + 1 in self.skip_in: + out_dim = dims[l + 1] - dims[0] + else: + out_dim = dims[l + 1] + + lin = nn.Linear(dims[l], out_dim) + + if geometric_init: + if l == self.num_layers - 2: + if not inside_outside: + jt.init.gauss_(lin.weight, mean=np.sqrt(np.pi) / np.sqrt(dims[l]), std=0.0001) + jt.init.constant_(lin.bias, -bias) + else: + jt.init.gauss_(lin.weight, mean=-np.sqrt(np.pi) / np.sqrt(dims[l]), std=0.0001) + jt.init.constant_(lin.bias, bias) + elif multires > 0 and l == 0: + jt.init.constant_(lin.bias, 0.0) + jt.init.constant_(lin.weight[:, 3:], 0.0) + jt.init.gauss_(lin.weight[:, :3], 0.0, np.sqrt(2) / np.sqrt(out_dim)) + elif multires > 0 and l in self.skip_in: + jt.init.constant_(lin.bias, 0.0) + jt.init.gauss_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim)) + jt.init.constant_(lin.weight[:, -(dims[0] - 3):], 0.0) + else: + jt.init.constant_(lin.bias, 0.0) + jt.init.gauss_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim)) + + # if weight_norm: + # lin = nn.utils.weight_norm(lin) + + setattr(self, "lin" + str(l), lin) + + self.activation = nn.Softplus(beta=100) + + def execute(self, inputs): + inputs = inputs * self.scale + if self.embed_fn_fine is not None: + inputs = self.embed_fn_fine(inputs) + + x = inputs + for l in range(0, self.num_layers - 1): + lin = getattr(self, "lin" + str(l)) + + if l in self.skip_in: + x = jt.concat([x, inputs], 1) / np.sqrt(2) + + x = lin(x) + + if l < self.num_layers - 2: + x = self.activation(x) + return jt.concat([x[:, :1] / self.scale, x[:, 1:]], dim=-1) + + def sdf(self, x): + return self.execute(x)[:, :1] + + def sdf_hidden_appearance(self, x): + return self.execute(x) + + def gradient(self, x): + y = self.sdf(x) + gradients = jt.grad(y,x,retain_graph=True) # same shape with x + # gradients = jt.grad(y,x) # same shape with x + return gradients + + +# This implementation is borrowed from IDR: https://github.com/lioryariv/idr +class RenderingNetwork(nn.Module): + def __init__(self, + d_feature, + mode, + d_in, + d_out, + d_hidden, + n_layers, + weight_norm=True, + multires_view=0, + squeeze_out=True): + super().__init__() + + self.mode = mode + self.squeeze_out = squeeze_out + dims = [d_in + d_feature] + [d_hidden for _ in range(n_layers)] + [d_out] + + self.embedview_fn = None + if multires_view > 0: + embedview_fn, input_ch = get_embedder(multires_view) + self.embedview_fn = embedview_fn + dims[0] += (input_ch - 3) + + self.num_layers = len(dims) + + for l in range(0, self.num_layers - 1): + out_dim = dims[l + 1] + lin = nn.Linear(dims[l], out_dim) + + # if weight_norm: + # lin = nn.utils.weight_norm(lin) + + setattr(self, "lin" + str(l), lin) + + self.relu = nn.ReLU() + + def execute(self, points, normals, view_dirs, feature_vectors): + if self.embedview_fn is not None: + view_dirs = self.embedview_fn(view_dirs) + + rendering_input = None + + if self.mode == 'idr': + rendering_input = jt.concat([points, view_dirs, normals, feature_vectors], dim=-1) + elif self.mode == 'no_view_dir': + rendering_input = jt.concat([points, normals, feature_vectors], dim=-1) + elif self.mode == 'no_normal': + rendering_input = jt.concat([points, view_dirs, feature_vectors], dim=-1) + + x = rendering_input + + for l in range(0, self.num_layers - 1): + lin = getattr(self, "lin" + str(l)) + + x = lin(x) + + if l < self.num_layers - 2: + x = self.relu(x) + + if self.squeeze_out: + x = jt.sigmoid(x) + return x + + +# This implementation is borrowed from nerf-pytorch: https://github.com/yenchenlin/nerf-pytorch +class NeRF(nn.Module): + def __init__(self, + D=8, + W=256, + d_in=3, + d_in_view=3, + multires=0, + multires_view=0, + output_ch=4, + skips=[4], + use_viewdirs=False): + super(NeRF, self).__init__() + self.D = D + self.W = W + self.d_in = d_in + self.d_in_view = d_in_view + self.input_ch = 3 + self.input_ch_view = 3 + self.embed_fn = None + self.embed_fn_view = None + + if multires > 0: + embed_fn, input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn = embed_fn + self.input_ch = input_ch + + if multires_view > 0: + embed_fn_view, input_ch_view = get_embedder(multires_view, input_dims=d_in_view) + self.embed_fn_view = embed_fn_view + self.input_ch_view = input_ch_view + + self.skips = skips + self.use_viewdirs = use_viewdirs + + self.pts_linears = nn.ModuleList( + [nn.Linear(self.input_ch, W)] + + [nn.Linear(W, W) if i not in self.skips else nn.Linear(W + self.input_ch, W) for i in range(D - 1)]) + + ### Implementation according to the official code release + ### (https://github.com/bmild/nerf/blob/master/run_nerf_helpers.py#L104-L105) + self.views_linears = nn.ModuleList([nn.Linear(self.input_ch_view + W, W // 2)]) + + ### Implementation according to the paper + # self.views_linears = nn.ModuleList( + # [nn.Linear(input_ch_views + W, W//2)] + [nn.Linear(W//2, W//2) for i in range(D//2)]) + + if use_viewdirs: + self.feature_linear = nn.Linear(W, W) + self.alpha_linear = nn.Linear(W, 1) + self.rgb_linear = nn.Linear(W // 2, 3) + else: + self.output_linear = nn.Linear(W, output_ch) + + def execute(self, input_pts, input_views): + if self.embed_fn is not None: + input_pts = self.embed_fn(input_pts) + if self.embed_fn_view is not None: + input_views = self.embed_fn_view(input_views) + + h = input_pts + for i, l in enumerate(self.pts_linears): + h = self.pts_linears[i](h) + h = nn.relu(h) + if i in self.skips: + h = jt.concat([input_pts, h], -1) + + if self.use_viewdirs: + alpha = self.alpha_linear(h) + feature = self.feature_linear(h) + h = jt.concat([feature, input_views], -1) + + for i, l in enumerate(self.views_linears): + h = self.views_linears[i](h) + h = nn.relu(h) + + rgb = self.rgb_linear(h) + return alpha, rgb + else: + assert False + + +class SingleVarianceNetwork(nn.Module): + def __init__(self, init_val): + super(SingleVarianceNetwork, self).__init__() + # self.register_parameter('variance', nn.Parameter(jt.array(init_val))) + self.variance = jt.array(init_val) + def execute(self, x): + return jt.ones([len(x), 1]) * jt.exp(self.variance * 10.0) diff --git a/contrib/NeRF-Editing/src/models/render_train.py b/contrib/NeRF-Editing/src/models/render_train.py new file mode 100644 index 00000000..af69a6b3 --- /dev/null +++ b/contrib/NeRF-Editing/src/models/render_train.py @@ -0,0 +1,442 @@ +import jittor as jt +import numpy as np +import logging +import mcubes + + +def extract_fields(bound_min, bound_max, resolution, query_func): + N = 64 + X = jt.linspace(bound_min[0], bound_max[0], resolution).split(N) + Y = jt.linspace(bound_min[1], bound_max[1], resolution).split(N) + Z = jt.linspace(bound_min[2], bound_max[2], resolution).split(N) + + u = np.zeros([resolution, resolution, resolution], dtype=np.float32) + with jt.no_grad(): + for xi, xs in enumerate(X): + for yi, ys in enumerate(Y): + for zi, zs in enumerate(Z): + xx, yy, zz = jt.meshgrid(xs, ys, zs) + pts = jt.concat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) + val = query_func(pts).reshape(len(xs), len(ys), len(zs)).numpy() + u[xi * N: xi * N + len(xs), yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = val + return u + + +def extract_geometry(bound_min, bound_max, resolution, threshold, query_func): + print('threshold: {}'.format(threshold)) + u = extract_fields(bound_min, bound_max, resolution, query_func) + vertices, triangles = mcubes.marching_cubes(u, threshold) + b_max_np = bound_max.numpy() + b_min_np = bound_min.numpy() + + vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + return vertices, triangles + + +def sample_pdf(bins, weights, n_samples, det=False): + # This implementation is from NeRF + # Get pdf + weights = weights + 1e-5 # prevent nans + pdf = weights / jt.sum(weights, -1, keepdims=True) + cdf = jt.cumsum(pdf, -1) + cdf = jt.concat([jt.zeros_like(cdf[..., :1]), cdf], -1) + # Take uniform samples + if det: + u = jt.linspace(0. + 0.5 / n_samples, 1. - 0.5 / n_samples, steps=n_samples) + u = u.expand(list(cdf.shape[:-1]) + [n_samples]) + else: + u = jt.rand(list(cdf.shape[:-1]) + [n_samples]) + + # Invert CDF + # u = u.contiguous() + inds = jt.searchsorted(cdf, u, right=True) + below = jt.maximum(jt.zeros_like(inds - 1), inds - 1) + above = jt.minimum((cdf.shape[-1] - 1) * jt.ones_like(inds), inds) + inds_g = jt.stack([below, above], -1) # (batch, N_samples, 2) + + matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]] + cdf_g = jt.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g) + bins_g = jt.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g) + + denom = (cdf_g[..., 1] - cdf_g[..., 0]) + denom = jt.where(denom < 1e-5, jt.ones_like(denom), denom) + t = (u - cdf_g[..., 0]) / denom + samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0]) + + return samples + + +class NeuSRenderer: + def __init__(self, + nerf, + sdf_network, + deviation_network, + color_network, + n_samples, + n_importance, + n_outside, + up_sample_steps, + perturb): + self.nerf = nerf + self.sdf_network = sdf_network + self.deviation_network = deviation_network + self.color_network = color_network + self.n_samples = n_samples + self.n_importance = n_importance + self.n_outside = n_outside + self.up_sample_steps = up_sample_steps + self.perturb = perturb + + def render_core_outside(self, rays_o, rays_d, z_vals, sample_dist, nerf, background_rgb=None): + """ + Render background + """ + batch_size, n_samples = z_vals.shape + + # Section length + dists = z_vals[..., 1:] - z_vals[..., :-1] + dists = jt.concat([dists, jt.array([sample_dist]).expand(dists[..., :1].shape)], -1) + mid_z_vals = z_vals + dists * 0.5 + + # Section midpoints + pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # batch_size, n_samples, 3 + + dis_to_center = jt.norm(pts, p=2, dim=-1, keepdim=True).safe_clip(1.0, 1e10) + pts = jt.concat([pts / dis_to_center, 1.0 / dis_to_center], dim=-1) # batch_size, n_samples, 4 + + dirs = rays_d[:, None, :].expand(batch_size, n_samples, 3) + + pts = pts.reshape(-1, 3 + int(self.n_outside > 0)) + dirs = dirs.reshape(-1, 3) + + density, sampled_color = nerf(pts, dirs) + alpha = 1.0 - jt.exp(-jt.nn.softplus(density.reshape(batch_size, n_samples)) * dists) + alpha = alpha.reshape(batch_size, n_samples) + weights = alpha * jt.cumprod(jt.concat([jt.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1] + sampled_color = sampled_color.reshape(batch_size, n_samples, 3) + color = (weights[:, :, None] * sampled_color).sum(dim=1) + if background_rgb is not None: + color = color + background_rgb * (1.0 - weights.sum(dim=-1, keepdims=True)) + + return { + 'color': color, + 'sampled_color': sampled_color, + 'alpha': alpha, + 'weights': weights, + } + + def up_sample(self, rays_o, rays_d, z_vals, sdf, n_importance, inv_s, pts): + """ + Up sampling give a fixed inv_s + """ + batch_size, n_samples = z_vals.shape + # pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] # n_rays, n_samples, 3 + radius = jt.norm(pts, p=2, dim=-1, keepdim=False) + # inside_sphere = (radius[:, :-1] < 1.0) | (radius[:, 1:] < 1.0) + inside_sphere = (radius[:, :-1] < 1.5) | (radius[:, 1:] < 1.5) + sdf = sdf.reshape(batch_size, n_samples) + prev_sdf, next_sdf = sdf[:, :-1], sdf[:, 1:] + prev_z_vals, next_z_vals = z_vals[:, :-1], z_vals[:, 1:] + mid_sdf = (prev_sdf + next_sdf) * 0.5 + cos_val = (next_sdf - prev_sdf) / (next_z_vals - prev_z_vals + 1e-5) + + # ---------------------------------------------------------------------------------------------------------- + # Use min value of [ cos, prev_cos ] + # Though it makes the sampling (not rendering) a little bit biased, this strategy can make the sampling more + # robust when meeting situations like below: + # + # SDF + # ^ + # |\ -----x----... + # | \ / + # | x x + # |---\----/-------------> 0 level + # | \ / + # | \/ + # | + # ---------------------------------------------------------------------------------------------------------- + prev_cos_val = jt.concat([jt.zeros([batch_size, 1]), cos_val[:, :-1]], dim=-1) + cos_val = jt.stack([prev_cos_val, cos_val], dim=-1) + cos_val = jt.min(cos_val, dim=-1, keepdims=False) + cos_val = cos_val.safe_clip(-1e3, 0.0) * inside_sphere + + dist = (next_z_vals - prev_z_vals) + prev_esti_sdf = mid_sdf - cos_val * dist * 0.5 + next_esti_sdf = mid_sdf + cos_val * dist * 0.5 + prev_cdf = jt.sigmoid(prev_esti_sdf * inv_s) + next_cdf = jt.sigmoid(next_esti_sdf * inv_s) + alpha = (prev_cdf - next_cdf + 1e-5) / (prev_cdf + 1e-5) + weights = alpha * jt.cumprod( + jt.concat([jt.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1] + + z_samples = sample_pdf(z_vals, weights, n_importance, det=True).detach() + return z_samples + + def cat_z_vals(self, rays_o, rays_d, z_vals, new_z_vals, sdf, pts, last=False): + batch_size, n_samples = z_vals.shape + _, n_importance = new_z_vals.shape + # pts = rays_o[:, None, :] + rays_d[:, None, :] * new_z_vals[..., :, None] + z_vals = jt.concat([z_vals, new_z_vals], dim=-1) + # z_vals, index = torch.sort(z_vals, dim=-1) + index, z_vals = jt.argsort(z_vals, dim=-1) + + if not last: + new_sdf = self.sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, n_importance) + sdf = jt.concat([sdf, new_sdf], dim=-1) + xx = jt.arange(batch_size)[:, None].expand(batch_size, n_samples + n_importance).reshape(-1) + index = index.reshape(-1) + sdf = sdf[(xx, index)].reshape(batch_size, n_samples + n_importance) + + return z_vals, sdf + + def render_core(self, + rays_o, + rays_d, + z_vals, + sample_dist, + sdf_network, + deviation_network, + color_network, + pts, + setZero=None, + background_alpha=None, + background_sampled_color=None, + background_rgb=None, + cos_anneal_ratio=0.0): + batch_size, n_samples = z_vals.shape + + # Section length + dists = z_vals[..., 1:] - z_vals[..., :-1] + dists = jt.concat([dists, jt.array([sample_dist]).expand(dists[..., :1].shape)], -1) + mid_z_vals = z_vals + dists * 0.5 + + # Section midpoints + # pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # n_rays, n_samples, 3 + dirs = rays_d[:, None, :].expand(pts.shape) + + ### only consider the valid points + pts = pts.reshape(-1, 3) + dirs = dirs.reshape(-1, 3) + + sdf_nn_output = sdf_network(pts) + sdf = sdf_nn_output[:, :1] + feature_vector = sdf_nn_output[:, 1:] + + gradients = sdf_network.gradient(pts) + + # feature_vector[setZero.reshape(-1)] = 0 + # gradients[setZero.reshape(-1)] = 0 + sampled_color = color_network(pts, gradients, dirs, feature_vector).reshape(batch_size, n_samples, 3) + + inv_s = deviation_network(jt.zeros([1, 3]))[:, :1].safe_clip(1e-6, 1e6) # Single parameter + inv_s = inv_s.expand(batch_size * n_samples, 1) + + true_cos = (dirs * gradients).sum(-1, keepdims=True) + + # "cos_anneal_ratio" grows from 0 to 1 in the beginning training iterations. The anneal strategy below makes + # the cos value "not dead" at the beginning training iterations, for better convergence. + iter_cos = -(jt.nn.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) + + jt.nn.relu(-true_cos) * cos_anneal_ratio) # always non-positive + + # Estimate signed distances at section points + estimated_next_sdf = sdf + iter_cos * dists.reshape(-1, 1) * 0.5 + estimated_prev_sdf = sdf - iter_cos * dists.reshape(-1, 1) * 0.5 + + prev_cdf = jt.sigmoid(estimated_prev_sdf * inv_s) + next_cdf = jt.sigmoid(estimated_next_sdf * inv_s) + + p = prev_cdf - next_cdf + c = prev_cdf + + alpha = ((p + 1e-5) / (c + 1e-5)).reshape(batch_size, n_samples).safe_clip(0.0, 1.0) + + pts_norm = jt.norm(pts, p=2, dim=-1, keepdim=True).reshape(batch_size, n_samples) + # inside_sphere = (pts_norm < 1.0).float().detach() + inside_sphere = (pts_norm < 1.5).float().detach() + + relax_inside_sphere = (pts_norm < 2).float().detach() + + # Render with background + if background_alpha is not None: + alpha = alpha * inside_sphere + background_alpha[:, :n_samples] * (1.0 - inside_sphere) + alpha = jt.concat([alpha, background_alpha[:, n_samples:]], dim=-1) + sampled_color = sampled_color * inside_sphere[:, :, None] +\ + background_sampled_color[:, :n_samples] * (1.0 - inside_sphere)[:, :, None] + sampled_color = jt.concat([sampled_color, background_sampled_color[:, n_samples:]], dim=1) + + weights = alpha * jt.cumprod(jt.concat([jt.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1] + # weights[weights.isnan()] = 0 ### strange !!! + weights_sum = weights.sum(dim=-1, keepdims=True) + color = (sampled_color * weights[:, :, None]).sum(dim=1) + if background_rgb is not None: # Fixed background, usually black + color = color + background_rgb * (1.0 - weights_sum) + + # color[color.isnan()] = 0 ### strange !!! + + # Eikonal loss + gradient_error = (jt.norm(gradients.reshape(batch_size, n_samples, 3), p=2, + dim=-1) - 1.0) ** 2 + + # gradient_error[gradient_error.isnan()] = 0 ### strange !!! + + gradient_error = (relax_inside_sphere * gradient_error).sum() / (relax_inside_sphere.sum() + 1e-5) + + return { + 'color': color, + 'sdf': sdf, + 'dists': dists, + 'gradients': gradients.reshape(batch_size, n_samples, 3), + 's_val': 1.0 / inv_s, + 'mid_z_vals': mid_z_vals, + 'weights': weights, + 'cdf': c.reshape(batch_size, n_samples), + 'gradient_error': gradient_error, + 'inside_sphere': inside_sphere + } + + def render(self, rays_o, rays_d, near, far, perturb_overwrite=-1, background_rgb=None, cos_anneal_ratio=0.0, + use_deform=False, query_delta=None, hull=None, deltas=None, vis_coord_ind=-1): + batch_size = len(rays_o) + sample_dist = 2.0 / self.n_samples # Assuming the region of interest is a unit sphere + z_vals = jt.linspace(0.0, 1.0, self.n_samples) + z_vals = near + (far - near) * z_vals[None, :] + + z_vals_outside = None + if self.n_outside > 0: + z_vals_outside = jt.linspace(1e-3, 1.0 - 1.0 / (self.n_outside + 1.0), self.n_outside) + + n_samples = self.n_samples + perturb = self.perturb + + if perturb_overwrite >= 0: + perturb = perturb_overwrite + if perturb > 0: + t_rand = (jt.rand([batch_size, 1]) - 0.5) + z_vals = z_vals + t_rand * 2.0 / self.n_samples + + if self.n_outside > 0: + mids = .5 * (z_vals_outside[..., 1:] + z_vals_outside[..., :-1]) + upper = jt.concat([mids, z_vals_outside[..., -1:]], -1) + lower = jt.concat([z_vals_outside[..., :1], mids], -1) + t_rand = jt.rand([batch_size, z_vals_outside.shape[-1]]) + z_vals_outside = lower[None, :] + (upper - lower)[None, :] * t_rand + + if self.n_outside > 0: + z_vals_outside = far / jt.flip(z_vals_outside, dim=-1) + 1.0 / self.n_samples + + background_alpha = None + background_sampled_color = None + + # Up sample + if self.n_importance > 0: + with jt.no_grad(): + pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] + if use_deform: + det, _ = query_delta(hull, deltas, pts) + pts += det + sdf = self.sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, self.n_samples) + + for i in range(self.up_sample_steps): + pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] # n_rays, n_samples, 3 + if use_deform: + det, _ = query_delta(hull, deltas, pts) + pts += det + new_z_vals = self.up_sample(rays_o, + rays_d, + z_vals, + sdf, + self.n_importance // self.up_sample_steps, + 64 * 2**i, + pts) + pts = rays_o[:, None, :] + rays_d[:, None, :] * new_z_vals[..., :, None] + if use_deform: + det, _ = query_delta(hull, deltas, pts) + pts += det + z_vals, sdf = self.cat_z_vals(rays_o, + rays_d, + z_vals, + new_z_vals, + sdf, + pts, + last=(i + 1 == self.up_sample_steps)) + + n_samples = self.n_samples + self.n_importance + + # Background model + if self.n_outside > 0: + z_vals_feed = jt.concat([z_vals, z_vals_outside], dim=-1) + _, z_vals_feed = jt.argsort(z_vals_feed, dim=-1) + ret_outside = self.render_core_outside(rays_o, rays_d, z_vals_feed, sample_dist, self.nerf) + + background_sampled_color = ret_outside['sampled_color'] + background_alpha = ret_outside['alpha'] + + dists = z_vals[..., 1:] - z_vals[..., :-1] + dists = jt.concat([dists, jt.array([sample_dist]).expand(dists[..., :1].shape)], -1) + mid_z_vals = z_vals + dists * 0.5 + # Section midpoints + pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # n_rays, n_samples, 3 + + if vis_coord_ind >= 0 and vis_coord_ind < pts.shape[0]: # visualize ray + pickedRays = pts[vis_coord_ind] # [numSample 3] + save_path = './vis_ray_ori.obj' + with open(save_path, 'w') as f: + for pt in pickedRays.cpu().numpy(): + f.write("v %s %s %s\n" % (pt[0], pt[1], pt[2])) + print("ray has visualized") + + if use_deform: + for hull_x, delta_x in zip(hull, deltas): + det, tri_verts = query_delta(hull_x, delta_x, pts) + pts += det + setZero = tri_verts[-1] + else: + setZero = None + + if vis_coord_ind >= 0 and vis_coord_ind < pts.shape[0]: # visualize ray + pickedRays = pts[vis_coord_ind] # [numSample 3] + save_path = './vis_ray.obj' + with open(save_path, 'w') as f: + for pt in pickedRays.cpu().numpy(): + f.write("v %s %s %s\n" % (pt[0], pt[1], pt[2])) + print("ray has visualized") + + # Render core + ret_fine = self.render_core(rays_o, + rays_d, + z_vals, + sample_dist, + self.sdf_network, + self.deviation_network, + self.color_network, + pts, + setZero, + background_rgb=background_rgb, + background_alpha=background_alpha, + background_sampled_color=background_sampled_color, + cos_anneal_ratio=cos_anneal_ratio) + + color_fine = ret_fine['color'] + weights = ret_fine['weights'] + weights_sum = weights.sum(dim=-1, keepdims=True) + gradients = ret_fine['gradients'] + s_val = ret_fine['s_val'].reshape(batch_size, n_samples).mean(dim=-1, keepdims=True) + + return { + 'color_fine': color_fine, + 's_val': s_val, + 'cdf_fine': ret_fine['cdf'], + 'weight_sum': weights_sum, + 'weight_max': jt.max(weights, dim=-1, keepdims=True), + 'gradients': gradients, + 'weights': weights, + 'gradient_error': ret_fine['gradient_error'], + 'inside_sphere': ret_fine['inside_sphere'] + } + + def extract_geometry(self, bound_min, bound_max, resolution, threshold=0.0, do_dilation=False): + return extract_geometry(bound_min, + bound_max, + resolution=resolution, + threshold=threshold, + query_func=lambda pts: -self.sdf_network.sdf(pts)) diff --git a/contrib/NeRF-Editing/src/models/renderer.py b/contrib/NeRF-Editing/src/models/renderer.py new file mode 100644 index 00000000..a045707f --- /dev/null +++ b/contrib/NeRF-Editing/src/models/renderer.py @@ -0,0 +1,364 @@ +import jittor as jt +import numpy as np +import logging +import mcubes + +import time, functools + +def metric(fn): + @functools.wraps(fn) + def wrapper(*args, **kwargs): + t1 = time.time() + res = fn(*args, **kwargs) + print('%s executed in %s s' % (fn.__name__, (time.time() - t1))) + return res + return wrapper + + +def extract_fields(bound_min, bound_max, resolution, query_func): + N = 64 + X = jt.linspace(bound_min[0], bound_max[0], resolution).split(N) + Y = jt.linspace(bound_min[1], bound_max[1], resolution).split(N) + Z = jt.linspace(bound_min[2], bound_max[2], resolution).split(N) + + u = np.zeros([resolution, resolution, resolution], dtype=np.float32) + with jt.no_grad(): + for xi, xs in enumerate(X): + for yi, ys in enumerate(Y): + for zi, zs in enumerate(Z): + xx, yy, zz = jt.meshgrid(xs, ys, zs) + pts = jt.concat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) + val = query_func(pts).reshape(len(xs), len(ys), len(zs)).numpy() + u[xi * N: xi * N + len(xs), yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = val + return u + + +def extract_geometry(bound_min, bound_max, resolution, threshold, query_func, do_dilation=False): + print('threshold: {}'.format(threshold)) + u = extract_fields(bound_min, bound_max, resolution, query_func) + if do_dilation: + import scipy + kernel_size = 5 + print("do the density dilation with kernel size %d ! " % (kernel_size)) + # erosion since the sdf outside is negative + u = scipy.ndimage.morphology.grey_dilation(u, size=(kernel_size,kernel_size,kernel_size)) + vertices, triangles = mcubes.marching_cubes(u, threshold) + b_max_np = bound_max.numpy() + b_min_np = bound_min.numpy() + + vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + return vertices, triangles + + +def sample_pdf(bins, weights, n_samples, det=False): + # This implementation is from NeRF + # Get pdf + weights = weights + 1e-5 # prevent nans + pdf = weights / jt.sum(weights, -1, keepdims=True) + cdf = jt.cumsum(pdf, -1) + cdf = jt.concat([jt.zeros_like(cdf[..., :1]), cdf], -1) + # Take uniform samples + if det: + u = jt.linspace(0. + 0.5 / n_samples, 1. - 0.5 / n_samples, steps=n_samples) + u = u.expand(list(cdf.shape[:-1]) + [n_samples]) + else: + u = jt.rand(list(cdf.shape[:-1]) + [n_samples]) + + # Invert CDF + u = u.contiguous() + inds = jt.searchsorted(cdf, u, right=True) + below = jt.maximum(jt.zeros_like(inds - 1), inds - 1) + above = jt.minimum((cdf.shape[-1] - 1) * jt.ones_like(inds), inds) + inds_g = jt.stack([below, above], -1) # (batch, N_samples, 2) + + matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]] + cdf_g = jt.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g) + bins_g = jt.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g) + + denom = (cdf_g[..., 1] - cdf_g[..., 0]) + denom = jt.where(denom < 1e-5, jt.ones_like(denom), denom) + t = (u - cdf_g[..., 0]) / denom + samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0]) + + return samples + + +class NeuSRenderer: + def __init__(self, + nerf, + sdf_network, + deviation_network, + color_network, + n_samples, + n_importance, + n_outside, + up_sample_steps, + perturb): + self.nerf = nerf + self.sdf_network = sdf_network + self.deviation_network = deviation_network + self.color_network = color_network + self.n_samples = n_samples + self.n_importance = n_importance + self.n_outside = n_outside + self.up_sample_steps = up_sample_steps + self.perturb = perturb + + # @metric + def render_core_outside(self, rays_o, rays_d, z_vals, sample_dist, nerf, background_rgb=None): + """ + Render background + """ + batch_size, n_samples = z_vals.shape + + # Section length + dists = z_vals[..., 1:] - z_vals[..., :-1] + dists = jt.concat([dists, jt.array([sample_dist]).expand(dists[..., :1].shape)], -1) + mid_z_vals = z_vals + dists * 0.5 + + # Section midpoints + pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # batch_size, n_samples, 3 + + dis_to_center = jt.linalg.norm(pts, ord=2, dim=-1, keepdim=True).safe_clip(1.0, 1e10) + pts = jt.concat([pts / dis_to_center, 1.0 / dis_to_center], dim=-1) # batch_size, n_samples, 4 + + dirs = rays_d[:, None, :].expand(batch_size, n_samples, 3) + + pts = pts.reshape(-1, 3 + int(self.n_outside > 0)) + dirs = dirs.reshape(-1, 3) + + density, sampled_color = nerf(pts, dirs) + alpha = 1.0 - jt.exp(-jt.nn.softplus(density.reshape(batch_size, n_samples)) * dists) + alpha = alpha.reshape(batch_size, n_samples) + weights = alpha * jt.cumprod(jt.concat([jt.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1] + sampled_color = sampled_color.reshape(batch_size, n_samples, 3) + color = (weights[:, :, None] * sampled_color).sum(dim=1) + if background_rgb is not None: + color = color + background_rgb * (1.0 - weights.sum(dim=-1, keepdim=True)) + + return { + 'color': color, + 'sampled_color': sampled_color, + 'alpha': alpha, + 'weights': weights, + } + + def up_sample(self, rays_o, rays_d, z_vals, sdf, n_importance, inv_s, pts): + pass + + def cat_z_vals(self, rays_o, rays_d, z_vals, new_z_vals, sdf, pts, last=False): + pass + + # @metric + def render_core(self, + rays_o, + rays_d, + z_vals, + sample_dist, + sdf_network, + deviation_network, + color_network, + pts, + view_dir, + setZero=None, + background_alpha=None, + background_sampled_color=None, + background_rgb=None, + cos_anneal_ratio=0.0): + batch_size, n_samples = z_vals.shape + + # Section length + dists = z_vals[..., 1:] - z_vals[..., :-1] + dists = jt.concat([dists, jt.array([sample_dist]).expand(dists[..., :1].shape)], -1) + mid_z_vals = z_vals + dists * 0.5 + + # Section midpoints + # pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # n_rays, n_samples, 3 + if view_dir != None: + dirs = view_dir[:, None, :].expand(pts.shape) + else: + dirs = rays_d[:, None, :].expand(pts.shape) + + ### only consider the valid points + pts = pts.reshape(-1, 3) + dirs = dirs.reshape(-1, 3) + + sdf_nn_output = sdf_network(pts) + sdf = sdf_nn_output[:, :1] + feature_vector = sdf_nn_output[:, 1:] + + gradients = sdf_network.gradient(pts) + + with jt.no_grad(): # if setZero != None: + if isinstance(setZero, jt.Var): + # pass + sdf[setZero.reshape(-1)] = 10000 + feature_vector[setZero.reshape(-1)] = 0 + gradients[setZero.reshape(-1)] = 0 + sampled_color = color_network(pts, gradients, dirs, feature_vector).reshape(batch_size, n_samples, 3) + + inv_s = deviation_network(jt.zeros([1, 3]))[:, :1].safe_clip(1e-6, 1e6) # Single parameter + inv_s = inv_s.expand(batch_size * n_samples, 1) + + true_cos = (dirs * gradients).sum(-1, keepdims=True) + + # "cos_anneal_ratio" grows from 0 to 1 in the beginning training iterations. The anneal strategy below makes + # the cos value "not dead" at the beginning training iterations, for better convergence. + iter_cos = -(jt.nn.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) + + jt.nn.relu(-true_cos) * cos_anneal_ratio) # always non-positive + + # Estimate signed distances at section points + estimated_next_sdf = sdf + iter_cos * dists.reshape(-1, 1) * 0.5 + estimated_prev_sdf = sdf - iter_cos * dists.reshape(-1, 1) * 0.5 + + prev_cdf = jt.sigmoid(estimated_prev_sdf * inv_s) + next_cdf = jt.sigmoid(estimated_next_sdf * inv_s) + + p = prev_cdf - next_cdf + c = prev_cdf + + alpha = ((p + 1e-5) / (c + 1e-5)).reshape(batch_size, n_samples).safe_clip(0.0, 1.0) + + pts_norm = jt.norm(pts, p=2, dim=-1, keepdim=True).reshape(batch_size, n_samples) + # inside_sphere = (pts_norm < 1.0).float().detach() + inside_sphere = (pts_norm < 1.5).float().detach() + + relax_inside_sphere = (pts_norm < 1.2).float().detach() + + # Render with background + if background_alpha is not None: + alpha = alpha * inside_sphere + background_alpha[:, :n_samples] * (1.0 - inside_sphere) + alpha = jt.concat([alpha, background_alpha[:, n_samples:]], dim=-1) + sampled_color = sampled_color * inside_sphere[:, :, None] +\ + background_sampled_color[:, :n_samples] * (1.0 - inside_sphere)[:, :, None] + sampled_color = jt.concat([sampled_color, background_sampled_color[:, n_samples:]], dim=1) + + weights = alpha * jt.cumprod(jt.concat([jt.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1] + + # set zero + # if setZero != None: + if isinstance(setZero, jt.Var): + weights[setZero.squeeze(-1)==True] = 0 + + weights_sum = weights.sum(dim=-1, keepdims=True) + + color = (sampled_color * weights[:, :, None]).sum(dim=1) + if background_rgb is not None: # Fixed background, usually black + color = color + background_rgb * (1.0 - weights_sum) + + # Eikonal loss + gradient_error = (jt.norm(gradients.reshape(batch_size, n_samples, 3), p=2, + dim=-1) - 1.0) ** 2 + gradient_error = (relax_inside_sphere * gradient_error).sum() / (relax_inside_sphere.sum() + 1e-5) + + return { + 'color': color, + 'sdf': sdf, + 'dists': dists, + 'gradients': gradients.reshape(batch_size, n_samples, 3), + 's_val': 1.0 / inv_s, + 'mid_z_vals': mid_z_vals, + 'weights': weights, + 'cdf': c.reshape(batch_size, n_samples), + 'gradient_error': gradient_error, + 'inside_sphere': inside_sphere + } + + # @metric + def render(self, rays_o, rays_d, near, far, view_dir, perturb_overwrite=-1, background_rgb=None, cos_anneal_ratio=0.0, + use_deform=False, query_delta=None, hull=None, deltas=None, vis_coord_ind=-1): + batch_size = len(rays_o) + sample_dist = 2.0 / self.n_samples # Assuming the region of interest is a unit sphere + z_vals = jt.linspace(0.0, 1.0, self.n_samples) + z_vals = near + (far - near) * z_vals[None, :] + + z_vals_outside = None + if self.n_outside > 0: + z_vals_outside = jt.linspace(1e-3, 1.0 - 1.0 / (self.n_outside + 1.0), self.n_outside) + + n_samples = self.n_samples + perturb = self.perturb + + background_alpha = None + background_sampled_color = None + + # Background model + if self.n_outside > 0: + z_vals_feed = jt.concat([z_vals, z_vals_outside], dim=-1) + _, z_vals_feed = jt.argsort(z_vals_feed, dim=-1) + ret_outside = self.render_core_outside(rays_o, rays_d, z_vals_feed, sample_dist, self.nerf) + + background_sampled_color = ret_outside['sampled_color'] + background_alpha = ret_outside['alpha'] + + dists = z_vals[..., 1:] - z_vals[..., :-1] + dists = jt.concat([dists, jt.array([sample_dist]).expand(dists[..., :1].shape)], -1) + mid_z_vals = z_vals + dists * 0.5 + # Section midpoints + pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # n_rays, n_samples, 3 + + if vis_coord_ind >= 0 and vis_coord_ind < pts.shape[0]: # visualize ray + pickedRays = pts[vis_coord_ind] # [numSample 3] + save_path = './vis_ray_ori.obj' + with open(save_path, 'w') as f: + for pt in pickedRays.cpu().numpy(): + f.write("v %s %s %s\n" % (pt[0], pt[1], pt[2])) + print("ray has visualized") + + if use_deform: + det, tri_verts = query_delta(hull, deltas, pts) + pts += det + setZero = tri_verts[-1] + else: + setZero = None + + if vis_coord_ind >= 0 and vis_coord_ind < pts.shape[0]: # visualize ray + pickedRays = pts[vis_coord_ind] # [numSample 3] + save_path = './vis_ray.obj' + with open(save_path, 'w') as f: + for pt in pickedRays.cpu().numpy(): + f.write("v %s %s %s\n" % (pt[0], pt[1], pt[2])) + print("ray has visualized") + + # Render core + # import time + # t1 = time.time() + ret_fine = self.render_core(rays_o, + rays_d, + z_vals, + sample_dist, + self.sdf_network, + self.deviation_network, + self.color_network, + pts, + view_dir, + setZero, + background_rgb=background_rgb, + background_alpha=background_alpha, + background_sampled_color=background_sampled_color, + cos_anneal_ratio=cos_anneal_ratio) + # print("rendering batch core cost time %s" % (time.time()-t1)) + color_fine = ret_fine['color'] + weights = ret_fine['weights'] + weights_sum = weights.sum(dim=-1, keepdims=True) + gradients = ret_fine['gradients'] + s_val = ret_fine['s_val'].reshape(batch_size, n_samples).mean(dim=-1, keepdims=True) + + return { + 'color_fine': color_fine, + 's_val': s_val, + 'cdf_fine': ret_fine['cdf'], + 'weight_sum': weights_sum, + 'weight_max': jt.max(weights, dim=-1, keepdims=True), + 'gradients': gradients, + 'weights': weights, + 'gradient_error': ret_fine['gradient_error'], + 'inside_sphere': ret_fine['inside_sphere'] + } + + def extract_geometry(self, bound_min, bound_max, resolution, threshold=0.0, do_dilation=False): + return extract_geometry(bound_min, + bound_max, + resolution=resolution, + threshold=threshold, + query_func=lambda pts: -self.sdf_network.sdf(pts), do_dilation=do_dilation) diff --git a/contrib/NeRF-Editing/src/utils.py b/contrib/NeRF-Editing/src/utils.py new file mode 100644 index 00000000..c3104db8 --- /dev/null +++ b/contrib/NeRF-Editing/src/utils.py @@ -0,0 +1,491 @@ +import os +os.environ['PYOPENGL_PLATFORM'] = 'osmesa' +from concurrent.futures import process +import numpy as np +import trimesh +import pyrender +import cv2 +import jittor as jt +jt.flags.use_cuda = 1 +import time, functools + +def metric(fn): + @functools.wraps(fn) + def wrapper(*args, **kwargs): + t1 = time.time() + res = fn(*args, **kwargs) + print('%s executed in %s s' % (fn.__name__, (time.time() - t1))) + return res + return wrapper + + +def render_depth(c2w, mesh, FOCAL=555.555, IMGSIZE=400, aspect_ratio=1.0) -> np.array: + + scene = pyrender.Scene() + + mesh = pyrender.Mesh.from_trimesh(mesh) + scene.add(mesh) + + camera_pose = c2w + camera = pyrender.PerspectiveCamera(yfov=2*np.arctan(IMGSIZE/FOCAL/2), aspectRatio=aspect_ratio) + + scene.add(camera, pose=camera_pose) + + r = pyrender.OffscreenRenderer(int(IMGSIZE*aspect_ratio), IMGSIZE) + _, depth = r.render(scene) + return depth + + +def load_objs_as_meshes(mesh_files): + if isinstance(mesh_files, list): + res = [load_objs_as_meshes(x) for x in mesh_files] + else: + res = trimesh.load_mesh(mesh_files, process=False, maintain_order=True) + return res + + +import re +def natural_sort(l): + convert = lambda text: int(text) if text.isdigit() else text.lower() + alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)] + return sorted(l, key=alphanum_key) + + +def readTXT(txt_file:str) -> tuple: + with open(txt_file, 'r') as f: + NumV = int(f.readline()) + verts, tets = [],[] + for _ in range(NumV): + verts.append(np.fromstring(f.readline(), sep = " ")) + NumT = int(f.readline()) + for _ in range(NumT): + tets.append(np.fromstring(f.readline(), sep=" ", dtype=np.int32)) + verts = np.stack(verts) + tets = np.stack(tets) + verts = jt.array(verts, dtype=jt.float32) + tets = jt.array(tets, dtype=jt.int32) + return verts, tets + + +def readVertsofOVM(ovm_path:str): + verts = [] + with open(ovm_path, 'r') as vmf: + lines = vmf.readlines() + NumV = int(lines[2]) + for idx in range(3, 3+NumV): + verts.append(np.fromstring(lines[idx], sep=" ", dtype=np.float)) + verts = np.stack(verts, axis=0) + return jt.array(verts, dtype=jt.float32) + + +class jt_KNN(jt.nn.Module): + def __init__(self, k): + self.k = k + self.cuda_inc= """ + #undef out + #include "helper_cuda.h" + __global__ void compute_distances(float * ref, + int ref_width, + int ref_pitch, + float * query, + int query_width, + int query_pitch, + int height, + float * dist) { + // Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B + const int BLOCK_DIM = 16; + __shared__ float shared_A[BLOCK_DIM][BLOCK_DIM]; + __shared__ float shared_B[BLOCK_DIM][BLOCK_DIM]; + // Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step) + __shared__ int begin_A; + __shared__ int begin_B; + __shared__ int step_A; + __shared__ int step_B; + __shared__ int end_A; + // Thread index + int tx = threadIdx.x; + int ty = threadIdx.y; + int batch_id = blockIdx.z; + // Initializarion of the SSD for the current thread + float ssd = 0.f; + // Loop parameters + begin_A = BLOCK_DIM * blockIdx.y; + begin_B = BLOCK_DIM * blockIdx.x; + step_A = BLOCK_DIM * ref_pitch; + step_B = BLOCK_DIM * query_pitch; + end_A = begin_A + (height-1) * ref_pitch; + // Conditions + int cond0 = (begin_A + tx < ref_width); // used to write in shared memory + int cond1 = (begin_B + tx < query_width); // used to write in shared memory & to computations and to write in output array + int cond2 = (begin_A + ty < ref_width); // used to computations and to write in output matrix + // Loop over all the sub-matrices of A and B required to compute the block sub-matrix + for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) { + // Load the matrices from device memory to shared memory; each thread loads one element of each matrix + if (a/ref_pitch + ty < height) { + shared_A[ty][tx] = (cond0)? ref[a + ref_pitch * ty + tx + batch_id * height * ref_pitch] : 0; + shared_B[ty][tx] = (cond1)? query[b + query_pitch * ty + tx + batch_id * height * query_pitch] : 0; + } + else { + shared_A[ty][tx] = 0; + shared_B[ty][tx] = 0; + } + // Synchronize to make sure the matrices are loaded + __syncthreads(); + // Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix + if (cond2 && cond1) { + for (int k = 0; k < BLOCK_DIM; ++k){ + float tmp = shared_A[k][ty] - shared_B[k][tx]; + ssd += tmp*tmp; + } + } + // Synchronize to make sure that the preceeding computation is done before loading two new sub-matrices of A and B in the next iteration + __syncthreads(); + } + // Write the block sub-matrix to device memory; each thread writes one element + if (cond2 && cond1) { + dist[ (begin_A + ty) * query_pitch + begin_B + tx + batch_id * ref_pitch * query_pitch ] = ssd; + } + } + __global__ void modified_insertion_sort(float * dist, + int ref_pitch, + int * index, + int index_pitch, + int width, + int height, + int k){ + // Column position + unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x; + int batch_id = blockIdx.z ; + + // Do nothing if we are out of bounds + if (xIndex < width) { + // Pointer shift + float * p_dist = dist + xIndex + batch_id * ref_pitch * index_pitch; + int * p_index = index + xIndex + batch_id * index_pitch * k; + // Initialise the first index + p_index[0] = 0; + // Go through all points + for (int i=1; i= k and if it's higher the k-th slready sorted mallest value + if (i >= k && curr_dist >= p_dist[(k-1)*index_pitch]) { + continue; + } + // Shift values (and indexes) higher that the current distance to the right + int j = min(i, k-1); + while (j > 0 && p_dist[(j-1)*index_pitch] > curr_dist) { + p_dist[j*index_pitch] = p_dist[(j-1)*index_pitch]; + p_index[j*index_pitch] = p_index[(j-1)*index_pitch]; + --j; + } + // Write the current distance and index at their position + p_dist[j*index_pitch] = curr_dist; + p_index[j*index_pitch] = curr_index; + } + } + } + __global__ void compute_sqrt(float * dist, int width, int pitch, int k){ + unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x; + unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y; + int batch_id = blockIdx.z; + if (xIndex>>(ref_dev, ref_nb, ref_pitch, query_dev, query_nb, query_pitch, dim, dist_dev); + // checkCudaErrors(cudaDeviceSynchronize()); + // printf("%d", cudaDeviceSynchronize()); + // printf(" after compute_distances \\n"); + // Sort the distances with their respective indexes + dim3 block1(256, 1, 1); + dim3 grid1(query_nb / 256, 1, batch_size); + if (query_nb % 256 != 0) grid1.x += 1; + // printf("%d", cudaDeviceSynchronize()); + // printf(" before modified_insertion_sort \\n"); + // checkCudaErrors(cudaDeviceSynchronize()); + modified_insertion_sort<<>>(dist_dev, ref_pitch, index_dev, index_pitch, query_nb, ref_nb, k); + // checkCudaErrors(cudaDeviceSynchronize()); + // printf("%d", cudaDeviceSynchronize()); + // printf(" after modified_insertion_sort \\n"); + + // Compute the square root of the k smallest distances + //dim3 block2(16, 16, 1); + //dim3 grid2(query_nb / 16, k / 16, batch_size); + //if (query_nb % 16 != 0) grid2.x += 1; + //if (k % 16 != 0) grid2.y += 1; + //compute_sqrt<<>>(dist_dev, query_nb, query_pitch, k); + // Copy k smallest distances / indexes from the device to the host + // TODO: batch 2d copy dist + // cudaMemcpy2DAsync(knn_dist, query_nb * size_of_float, dist_dev, dist_pitch*size_of_float, query_nb * size_of_float, k, cudaMemcpyDefault); + return true; + } + """ + self.cuda_src = ''' + const int k = out0_shape1; + const int query_nb = in1_shape2; + const int ref_nb = in0_shape2; + const int dim = in0_shape1; + const int batch_size = in0_shape0; + knn_cuda_global(batch_size, in0_p, ref_nb, in1_p, query_nb, dim, k, out0_p, in2_p); + ''' + + def execute(self, x_q, x_r): # n_points, c_dim + x_q, x_r = x_q.transpose().unsqueeze(0), x_r.transpose().unsqueeze(0) + batch_size, c_dim, q_points = x_q.shape + batch_size, c_dim, r_points = x_r.shape + out_idx_shapes = [batch_size, self.k, q_points] + tmp_dist = jt.empty((batch_size, r_points, q_points), "float32") + idxs, = jt.code( + [out_idx_shapes], + ['int32'], + [x_r, x_q, tmp_dist], # in0 r point in1 q point + cuda_src=self.cuda_src, + cuda_header=self.cuda_inc, + ) + return idxs[0].transpose() + + + +from sklearn.neighbors import NearestNeighbors +class TetMesh(): + """ + A class to represent a Tetradral mesh. + + ... + + Attributes + ---------- + verts : vertices of the mesh, [NV,3] + tets : tethedrals of the mesh, [NT,4] + NV : number of verts + NT: number of tets + deltas : deformation deltas of each vertices + indexes: use for calculate det, check which tet the sample point is in + + Methods + ------- + readTXT(txt_file): + read verts and tets from .txt file + findTet(sample_pts): + query the delta of sample points + """ + + def __init__(self, verts, tets) -> None: + self.verts, self.tets = verts, tets + self.tet_verts = self.verts[self.tets] # [NT,4,3] + self.NV = self.verts.shape[0] + self.NT = self.tets.shape[0] + self.genIndex() + self.vert2tet = self.calVert2Tet() # [NV, max_degree] + verts = self.verts.reshape(-1,3).numpy() # [N,3] + self.nbrs = NearestNeighbors(n_neighbors=4, algorithm='ball_tree').fit(verts) + self.knn_fun = jt_KNN(4) + + + def calVert2Tet(self): + """calculate the tet idx for each verts. + Padding -1 if tets number is less than max_degree + """ + vert2tet = [[] for _ in range(self.NV)] + max_degree = 0 + for idx, tet in enumerate(self.tets): + for vID in tet: + vID = int(vID) + vert2tet[vID].append(idx) + max_degree = max(max_degree, len(vert2tet[vID])) + for x in vert2tet: + while (len(x) < max_degree): + x.append(-1) + vert2tet = jt.array(vert2tet).long() + self.max_degree = max_degree + print("the max degree of vertices in tets is %d" % self.max_degree) + return vert2tet + + def genIndex(self) -> None: + indexes = [] + index = jt.array([[1,2,3,4]]).long().transpose().expand(4,3) + indexes.append(index.unsqueeze(0)) + index = jt.array([[0,2,3,4]]).long().transpose().expand(4,3) + indexes.append(index.unsqueeze(0)) + index = jt.array([[1,0,3,4]]).long().transpose().expand(4,3) + indexes.append(index.unsqueeze(0)) + index = jt.array([[1,2,0,4]]).long().transpose().expand(4,3) + indexes.append(index.unsqueeze(0)) + index = jt.array([[1,2,3,0]]).long().transpose().expand(4,3) + indexes.append(index.unsqueeze(0)) + self.indexes = jt.concat(indexes, dim=0) # [5,4,3] + + def findTet(self, sample_pts) -> tuple: + """[calculate the barycentric coord for each pt and each tet] + """ + bs, NT = sample_pts.shape[0], self.NT + tet_verts = self.tet_verts.unsqueeze(0).expand(bs,NT,4,3) + sample_pts = sample_pts.unsqueeze(1).expand(bs,NT,3) + + return self.findTetCore(sample_pts, tet_verts) + + def findTetCore(self, sample_pts, tet_verts) -> tuple: + ### need to be accelerated! + with jt.no_grad(): + bs, NT = sample_pts.shape[:2] + all_verts = jt.concat([sample_pts.unsqueeze(2),tet_verts], dim=2) # [bs,NT,5,3] + indexes = self.indexes.unsqueeze(0).expand(bs*NT,5,4,3).reshape(bs,NT,5,4,3) + all_verts = all_verts.unsqueeze(2).expand(bs,NT,5,5,3) # [bs,NT,5,5,3] + + all_dets = jt.gather(all_verts, dim=3, index=indexes) # [bs,NT,5,4,3] + all_dets = jt.concat([all_dets, jt.ones(list(all_dets.shape[:-1])+[1])], dim=-1) # [bs,NT,5,4,4] + + all_dets = jt.linalg.det(all_dets) # [bs,NT,5] + + uvwz = all_dets[...,1:] / (all_dets[...,0:1] + 1e-9) # [bs,NT,4] + u, v, w, z = uvwz[...,0], uvwz[...,1], uvwz[...,2], uvwz[...,3] # [bs,NT] + + in_tet_mask = jt.logical_and(u > 0, v > 0) + in_tet_mask = jt.logical_and(in_tet_mask, w > 0) + in_tet_mask = jt.logical_and(in_tet_mask, z > 0) # [bs,NT] + + in_tet_mask = jt.concat([in_tet_mask.int(), jt.ones((bs,1),dtype=jt.int32)], dim=-1) # [bs,NT+1] + tet_idx = jt.argmax(in_tet_mask, dim=1)[0] # [bs,] + + uvwz = jt.concat([uvwz, jt.zeros([bs,1,4])], dim=1) # [bs,NT+1,4] + bary_index = tet_idx.unsqueeze(-1).unsqueeze(-1).expand(bs,1,4) + barycentric = jt.gather(uvwz, dim=1, index=bary_index).permute(0,2,1) # [bs,4,1] + + tet_idx[tet_idx==NT] = -1 + + return tet_idx, barycentric + + def findTetKNN(self, sample_pts): + """find the Tet using KNN. Only find in K tets indicated by the nearest k points + """ + with jt.no_grad(): + bs, NT = sample_pts.shape[0], self.NT + # + + sample_pts = sample_pts.reshape(1,-1,3) # [1,N,3] + verts = self.verts.reshape(1,-1,3) # [1,N,3] + K = 4 # nearnest points + # res = knn_points(sample_pts, verts, K=K) + t1 = time.time() + # _, verts_id = self.nbrs.kneighbors(sample_pts[0].numpy()) # + # verts_id = jt.array(verts_id).long() + verts_id = self.knn_fun(sample_pts[0], verts[0]) + # print("knn cost time %s" % (time.time() - t1)) + ### construct tets [bs,K*max_degree,4,3] for calculation + # verts_id = res.idx[0] # [bs, K] + tets_id = self.vert2tet[verts_id] # [bs, K, max_degree] + # expand the dimension of tet_verts, to padding the tets + pad_tet_verts = jt.concat([self.tet_verts, jt.rand((1,4,3),dtype=jt.float32)+999], dim=0) # [NT+1,4,3] + tets_id[tets_id==-1] = NT + cur_tet_verts = pad_tet_verts[tets_id.reshape(bs,-1)] # [bs,K*max_degree,4,3]. gather cannot use -1 as index, while slice can! + cur_NT = K * self.max_degree + + sample_pts = sample_pts.permute(1,0,2).expand(bs,cur_NT,3) + + tets_local_id, barycentric = self.findTetCore(sample_pts, cur_tet_verts) # [bs,], [bs,4,1] + # tets_local_id[tets_local_id!=-1] = tets_id.reshape(bs,-1)[tets_local_id[tets_local_id!=-1]] # change local to global, too complex ... + tets_id_ = jt.concat([tets_id.reshape(bs,-1),-1*jt.ones((bs,1), dtype=jt.int32)], dim=1) # [bs,K*max_degree+1] + tets_local_id[tets_local_id==-1] = cur_NT + result = jt.gather(tets_id_, dim=1, index=tets_local_id.unsqueeze(-1)) + + # return jt.zeros_like(result.squeeze(-1)), jt.zeros_like(barycentric) + return result.squeeze(-1), barycentric + + +def genConvexhullVolume(reconstructed_mesh_file:str, deformed_mesh_file:str, fix_camera = False) -> tuple: + """[summary] + + Args: + reconstructed_mesh_file (str): [reconstructed file ,txt format] + deformed_mesh_file (str): [deformaed file, ovm format] + fix_camera (bool, optional): [description]. Defaults to False. + + Returns: + tuple: [tri, deltas] + """ + ori_verts, tets = readTXT(reconstructed_mesh_file) + + if fix_camera: + import glob, os + deformed_mesh_files = sorted(glob.glob(os.path.join(deformed_mesh_file, '*.ovm'))) + deformed_verts = [readVertsofOVM(x) for x in deformed_mesh_files] + + tri = [TetMesh(x, tets) for x in deformed_verts] + deltas = [ori_verts-x for x in deformed_verts] + print("finish constructing Cage hulls !") + return tri, deltas + else: + deformed_verts = readVertsofOVM(deformed_mesh_file) + Num = 40 + deltas = ori_verts - deformed_verts + + tri = TetMesh(deformed_verts, tets) + print("finish constructing Cage hulls !") + return tri, deltas + +def queryDelta(hull, deltas, query_pts): + ''' + hull: the tet mesh + deltas: [NV,3] + query_pts: [bs, N, 3] + ''' + bs, N, _ = query_pts.shape + query_pts = query_pts.reshape(-1,3) + + t1 = time.time() + with jt.no_grad(): + # simplexID, barycentric = hull.findTet(query_pts) # [N,], [N,4,1] + simplexID, barycentric = hull.findTetKNN(query_pts) # [N,], [N,4,1] + # simplexID_ = simplexID.numpy() + # barycentric_ = barycentric.numpy() + # print("find TNN cost time %s" % (time.time()-t1)) + + zero_mask = (simplexID == -1).reshape(bs, N, -1) + simplexID[simplexID==-1] = hull.NT-1 + values = deltas[hull.tets[simplexID]] # [N,4,3] + + result = (barycentric * values).sum(dim=1) + # print("the hull has %d simplicies, query %d points with time [%f]" % (len(hull.tets), len(result), time.time()-t1)) + + result = result.reshape(bs, N, -1) + tri_verts = hull.verts[hull.tets[simplexID]].reshape(bs, N, 4, 3) + tri_deltas = values.reshape(bs, N, 4, 3) + + + return result, (tri_verts,tri_deltas,zero_mask) + diff --git a/contrib/NeRF-Editing/volumeARAP_batch/CMakeLists.txt b/contrib/NeRF-Editing/volumeARAP_batch/CMakeLists.txt new file mode 100644 index 00000000..5903be4f --- /dev/null +++ b/contrib/NeRF-Editing/volumeARAP_batch/CMakeLists.txt @@ -0,0 +1,34 @@ +cmake_minimum_required(VERSION 3.0) +project(volumeARAP) +set(CMAKE_CXX_STANDARD 11) + +IF(NOT CMAKE_BUILD_TYPE) + SET(CMAKE_BUILD_TYPE Release) +ENDIF() + +SET(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${PROJECT_SOURCE_DIR}/Eigen/cmake") + +#Eigen3 +find_package(Eigen3 REQUIRED) +INCLUDE_DIRECTORIES(${PROJECT_SOURCE_DIR}/Eigen) + +#OpenVolumeMesh +INCLUDE_DIRECTORIES(${PROJECT_SOURCE_DIR}/../OpenVolumeMesh/src) +link_directories(${PROJECT_SOURCE_DIR}/../OpenVolumeMesh/build/Build/lib) + +INCLUDE_DIRECTORIES(${PROJECT_SOURCE_DIR}/src) + +#aux_source_directory(${CMAKE_CURRENT_LIST_DIR}/src ${hello_src}) + +add_definitions( + -D_USE_MATH_DEFINES -DNOMINMAX -DDUSE_OPENMP -D_SCL_SECURE_NO_DEPRECATE + ) + +#BUILD +SET(HEADERS +./src/ARAPDeform.h ./src/MyUtils.h +) +add_executable(${PROJECT_NAME} ./src/main.cpp ./src/MyUtils.cpp ./src/yyjARAPDeform.cpp ${HEADERS}) +#add_executable(${PROJECT_NAME} ${hello_src}) +target_link_libraries(${PROJECT_NAME} OpenVolumeMesh) +set(CMAKE_CXX_FLAGS_RELEASE "-O2 -DNDEBUG " ) \ No newline at end of file diff --git a/contrib/NeRF-Editing/volumeARAP_batch/src/ARAPDeform.h b/contrib/NeRF-Editing/volumeARAP_batch/src/ARAPDeform.h new file mode 100644 index 00000000..c7f7845a --- /dev/null +++ b/contrib/NeRF-Editing/volumeARAP_batch/src/ARAPDeform.h @@ -0,0 +1,61 @@ +#pragma once + +//#include "MatEngine.h" +#include "MyUtils.h" +#include +#include +#include + +typedef Eigen::Triplet Tri; + +class ARAPDeform +{ +public: + TetrahedralMesh * mesh; + int half_edge_num; + std::vector degree; + std::vector edgeijs; // edgeijs vector + std::vector edge_weights; + std::vector edge_index; // first neighbour edge index + std::vector> edge_pairs; + std::vector isConst; + std::vector isConst_i; + std::vector constPoint; + std::vector> seq_constPoint; + std::vector bary_vert_index; // save index of tet verts for each control points + std::vector barycentric; // save barycentric coordinate + std::vector> control_index; + std::vector> control_weight; + std::vector> controlpoint_number; + //Utility::MatEngine matEngine; + + int * AcsrRowIndPtr; + int * AcsrColPtr; + double * AcsrValPtr; + int Annz; // number of non-zero values in matrix A + int AcsrRowNum; + int AcsrColNum; + double *resultX; + long vectorBSize; + double * vectorBPtr; + + // for eigen solve + std::vector tripletList; + Eigen::VectorXd vectorB; + + int maxIterTime; + bool hardConstrain; + + ARAPDeform() {}; + ARAPDeform(TetrahedralMesh& mesh, bool hardConstrain = true); + ~ARAPDeform(); + void loadConstPoint(std::istream& cin); + void setConstPoint(int i, Eigen::Vector3d v); + void global_step_pre(TetrahedralMesh& deformed_mesh); + void eigen_global_step_pre(TetrahedralMesh& deformed_mesh); + void local_step(std::vector& R, TetrahedralMesh& deformed_mesh); + void yyj_ARAPDeform(std::string &handlefile, std::string outputFolder); + //bool yyj_LeastSquareSolve(Utility::MatEngine &matEngine, int rowNum, int colNum, int Annz, int *rowPtr, int *colPtr, double *valPtr, const double *b, double *x); + //bool yyj_CholeskyPre(Utility::MatEngine &matEngine, int rowNum, int colNum, int Annz, int *rowPtr, int *colPtr, double *valPtr); + //bool yyj_CholeskySolve(Utility::MatEngine &matEngine, int rowNum, int colNum, const double *b, double *x); +}; \ No newline at end of file diff --git a/contrib/NeRF-Editing/volumeARAP_batch/src/MyUtils.cpp b/contrib/NeRF-Editing/volumeARAP_batch/src/MyUtils.cpp new file mode 100644 index 00000000..8adf0f36 --- /dev/null +++ b/contrib/NeRF-Editing/volumeARAP_batch/src/MyUtils.cpp @@ -0,0 +1,401 @@ +#include "MyUtils.h" + +#include +#include +#include + +using namespace OpenVolumeMesh; + +Tet_vec3d EtoOV(const Eigen::Vector3d &v) { + return Tet_vec3d(v(0), v(1), v(2)); +} + +Eigen::Vector3d OVtoE(const Tet_vec3d &v) { + return Eigen::Vector3d(v[0], v[1], v[2]); +} + +Eigen::Matrix3d exp(Eigen::Matrix3d x) { + //return x.exp(); + double theta = sqrt(x(0, 1)*x(0, 1) + x(0, 2)*x(0, 2) + x(1, 2)*x(1, 2)); + if (abs(theta) == 0) return Eigen::Matrix3d::Identity(); + x /= theta; + return Eigen::Matrix3d::Identity() + + x * sin(theta) + + x * x * (1 - cos(theta)); +} + +Eigen::Matrix3d log(Eigen::Matrix3d x) { + //return x.log(); + double theta = (x.trace() - 1) / 2; + theta = std::acos(std::max(-1.0, std::min(1.0, theta))); + if (abs(theta) == 0) return Eigen::Matrix3d::Zero(); + return (theta / (2 * sin(theta))) * (x - x.transpose()); +} + +void trimString(std::string& _string) { + + // Trim Both leading and trailing spaces + size_t start = _string.find_first_not_of(" \t\r\n"); + size_t end = _string.find_last_not_of(" \t\r\n"); + + if ((std::string::npos == start) || (std::string::npos == end)) { + _string = ""; + } + else { + _string = _string.substr(start, end - start + 1); + } +} + +bool getCleanLine(std::istream& _ifs, std::string& _string, bool _skipEmptyLines) { + + // While we are not at the end of the file + while (true) { + + // Get the current line: + std::getline(_ifs, _string); + + // Remove whitespace at beginning and end + trimString(_string); + + // Check if string is not empty ( otherwise we continue + if (_string.size() != 0) { + + // Check if string is a comment ( starting with # ) + if (_string[0] != '#') { + return true; + } + + } + else { + if (!_skipEmptyLines) + return true; + } + + if (_ifs.eof()) { + std::cerr << "End of file reached while searching for input!" << std::endl; + return false; + } + } + + return false; +} + +bool myReadFile(const std::string& _filename, TetrahedralMesh& _mesh, + bool _topologyCheck, bool _computeBottomUpIncidences) +{ + + std::ifstream iff(_filename.c_str(), std::ios::in); + + if (!iff.good()) { + std::cerr << "Error: Could not open file " << _filename << " for reading!" << std::endl; + iff.close(); + return false; + } + /*return readStream(iff, _mesh, _topologyCheck, _computeBottomUpIncidences);*/ + + std::stringstream sstr; + std::string line; + std::string s_tmp; + uint64_t c = 0u; + typedef typename TetrahedralMesh::PointT Point; + Point v = Point(0.0, 0.0, 0.0); + + _mesh.clear(false); + // Temporarily disable bottom-up incidences + // since it's way faster to first add all the + // geometry and compute them in one pass afterwards + _mesh.enable_bottom_up_incidences(false); + + /* + * Header + */ + + bool header_found = true; + + // Get first line + getCleanLine(iff, line); + sstr.str(line); + + // Check header + sstr >> s_tmp; + std::transform(s_tmp.begin(), s_tmp.end(), s_tmp.begin(), ::toupper); + if (s_tmp != "OVM") { + //iff.close(); + header_found = false; + std::cerr << "The specified file might not be in OpenVolumeMesh format!" << std::endl; + //return false; + } + + // Get ASCII/BINARY string + sstr >> s_tmp; + std::transform(s_tmp.begin(), s_tmp.end(), s_tmp.begin(), ::toupper); + if (s_tmp == "BINARY") { + std::cerr << "Binary files are not supported at the moment!" << std::endl; + return false; + } + + /* + * Vertices + */ + if (!header_found) { + sstr.clear(); + sstr.str(line); + } + else { + getCleanLine(iff, line); + sstr.clear(); + sstr.str(line); + } + + sstr >> s_tmp; + std::transform(s_tmp.begin(), s_tmp.end(), s_tmp.begin(), ::toupper); + if (s_tmp != "VERTICES") { + std::cerr << "No vertex section defined!" << std::endl; + return false; + } + else { + + // Read in number of vertices + getCleanLine(iff, line); + sstr.clear(); + sstr.str(line); + sstr >> c; + + // Read in vertices + for (uint64_t i = 0u; i < c; ++i) { + + getCleanLine(iff, line); + sstr.clear(); + sstr.str(line); + sstr >> v[0]; + sstr >> v[1]; + sstr >> v[2]; + _mesh.add_vertex(v); + } + } + + /* + * Edges + */ + getCleanLine(iff, line); + sstr.clear(); + sstr.str(line); + sstr >> s_tmp; + std::transform(s_tmp.begin(), s_tmp.end(), s_tmp.begin(), ::toupper); + if (s_tmp != "EDGES") { + std::cerr << "No edge section defined!" << std::endl; + return false; + } + else { + + // Read in number of edges + getCleanLine(iff, line); + sstr.clear(); + sstr.str(line); + sstr >> c; + + // Read in edges + for (uint64_t i = 0u; i < c; ++i) { + + unsigned int v1 = 0; + unsigned int v2 = 0; + getCleanLine(iff, line); + sstr.clear(); + sstr.str(line); + sstr >> v1; + sstr >> v2; + _mesh.add_edge(VertexHandle(v1), VertexHandle(v2), true); + } + } + + /* + * Faces + */ + getCleanLine(iff, line); + sstr.clear(); + sstr.str(line); + sstr >> s_tmp; + std::transform(s_tmp.begin(), s_tmp.end(), s_tmp.begin(), ::toupper); + if (s_tmp != "FACES") { + std::cerr << "No face section defined!" << std::endl; + return false; + } + else { + + // Read in number of faces + getCleanLine(iff, line); + sstr.clear(); + sstr.str(line); + sstr >> c; + + // Read in faces + for (uint64_t i = 0u; i < c; ++i) { + + getCleanLine(iff, line); + sstr.clear(); + sstr.str(line); + + std::vector hes; + + // Get face valence + uint64_t val = 0u; + sstr >> val; + + // Read half-edge indices + for (unsigned int e = 0; e < val; ++e) { + + unsigned int v1 = 0; + sstr >> v1; + hes.push_back(HalfEdgeHandle(v1)); + } + + _mesh.add_face(hes, _topologyCheck); + } + } + + /* + * Cells + */ + getCleanLine(iff, line); + sstr.clear(); + sstr.str(line); + sstr >> s_tmp; + std::transform(s_tmp.begin(), s_tmp.end(), s_tmp.begin(), ::toupper); + if (s_tmp != "POLYHEDRA") { + std::cerr << "No polyhedra section defined!" << std::endl; + return false; + } + else { + + // Read in number of cells + getCleanLine(iff, line); + sstr.clear(); + sstr.str(line); + sstr >> c; + + // Read in cells + for (uint64_t i = 0u; i < c; ++i) { + + getCleanLine(iff, line); + sstr.clear(); + sstr.str(line); + + std::vector hfs; + + // Get cell valence + uint64_t val = 0u; + sstr >> val; + + // Read half-face indices + for (unsigned int f = 0; f < val; ++f) { + + unsigned int v1 = 0; + sstr >> v1; + hfs.push_back(HalfFaceHandle(v1)); + } + + _mesh.add_cell(hfs, _topologyCheck); + } + } + + //while (!iff.eof()) { + // // "End of file reached while searching for input!" + // // is thrown here. \TODO Fix it! + + // // Read property + // readProperty(iff, _mesh); + //} + + if (_computeBottomUpIncidences) { + // Compute bottom-up incidences + _mesh.enable_bottom_up_incidences(true); + } + + std::cerr << "######## openvolumemesh info #########" << std::endl; + std::cerr << "#vertices: " << _mesh.n_vertices() << std::endl; + std::cerr << "#edges: " << _mesh.n_edges() << std::endl; + std::cerr << "#faces: " << _mesh.n_faces() << std::endl; + std::cerr << "#cells: " << _mesh.n_cells() << std::endl; + std::cerr << "######################################" << std::endl; + + return true; +} + +//template +void myWriteFile(const std::string& _filename, TetrahedralMesh &_mesh) +{ + std::ofstream off(_filename.c_str(), std::ios::out); + // Write header + off << "OVM ASCII" << std::endl; + + uint64_t n_vertices(_mesh.n_vertices()); + off << "Vertices" << std::endl; + off << n_vertices << std::endl; + + typedef typename TetrahedralMesh::PointT Point; + + // write vertices + for (VertexIter v_it = _mesh.v_iter(); v_it; ++v_it) { + + Point v = _mesh.vertex(*v_it); + off << v[0] << " " << v[1] << " " << v[2] << std::endl; + } + + uint64_t n_edges(_mesh.n_edges()); + off << "Edges" << std::endl; + off << n_edges << std::endl; + + // write edges + for (EdgeIter e_it = _mesh.e_iter(); e_it; ++e_it) { + + VertexHandle from_vertex = _mesh.edge(*e_it).from_vertex(); + VertexHandle to_vertex = _mesh.edge(*e_it).to_vertex(); + off << from_vertex << " " << to_vertex << std::endl; + } + + uint64_t n_faces(_mesh.n_faces()); + off << "Faces" << std::endl; + off << n_faces << std::endl; + + // write faces + for (FaceIter f_it = _mesh.f_iter(); f_it; ++f_it) { + + off << static_cast(_mesh.face(*f_it).halfedges().size()) << " "; + + std::vector halfedges = _mesh.face(*f_it).halfedges(); + + for (typename std::vector::const_iterator it = halfedges.begin(); it + != halfedges.end(); ++it) { + + off << it->idx(); + + if ((it + 1) != halfedges.end()) + off << " "; + } + + off << std::endl; + } + + uint64_t n_cells(_mesh.n_cells()); + off << "Polyhedra" << std::endl; + off << n_cells << std::endl; + + for (CellIter c_it = _mesh.c_iter(); c_it; ++c_it) { + + off << static_cast(_mesh.cell(*c_it).halffaces().size()) << " "; + + std::vector halffaces = _mesh.cell(*c_it).halffaces(); + + for (typename std::vector::const_iterator it = halffaces.begin(); it + != halffaces.end(); ++it) { + + off << it->idx(); + + if ((it + 1) != halffaces.end()) + off << " "; + } + + off << std::endl; + } + +} \ No newline at end of file diff --git a/contrib/NeRF-Editing/volumeARAP_batch/src/MyUtils.h b/contrib/NeRF-Editing/volumeARAP_batch/src/MyUtils.h new file mode 100644 index 00000000..7ef6a458 --- /dev/null +++ b/contrib/NeRF-Editing/volumeARAP_batch/src/MyUtils.h @@ -0,0 +1,46 @@ +#ifndef MYUTILS_H +#define MYUTILS_H + +#include +#include +#include +#include +#include +#include +#include +// Include the polyhedral mesh header +#include +#include + +#define Eps 1e-10 + +typedef OpenVolumeMesh::Geometry::Vec3d Tet_vec3d; +//typedef OpenVolumeMesh::TetrahedralGeometryKernel TetrahedralMesh; +//typedef OpenVolumeMesh::GeometryKernel PolyhedralMesh; +typedef OpenVolumeMesh::GeometricPolyhedralMeshV3d TetrahedralMesh; + +using VertexHandle = OpenVolumeMesh::VertexHandle; +using Parameter = Tet_vec3d; +using Position = Tet_vec3d; +using VertexVertexIter = OpenVolumeMesh::VertexVertexIter; + +Tet_vec3d EtoOV(const Eigen::Vector3d &v); +Eigen::Vector3d OVtoE(const Tet_vec3d &v); + +Eigen::Matrix3d exp(Eigen::Matrix3d); +Eigen::Matrix3d log(Eigen::Matrix3d); + +void trimString(std::string& _string); + +bool getCleanLine(std::istream& ifs, std::string& _string, bool _skipEmptyLines = true); + +bool myReadFile(const std::string& _filename, TetrahedralMesh& _mesh, + bool _topologyCheck = true, + bool _computeBottomUpIncidences = true); + +void myWriteFile(const std::string& _filename, TetrahedralMesh& _mesh); + +//template +//void myWriteFile(const std::string& _filename, MeshT& _mesh); + +#endif diff --git a/contrib/NeRF-Editing/volumeARAP_batch/src/main.cpp b/contrib/NeRF-Editing/volumeARAP_batch/src/main.cpp new file mode 100644 index 00000000..c0344855 --- /dev/null +++ b/contrib/NeRF-Editing/volumeARAP_batch/src/main.cpp @@ -0,0 +1,28 @@ +#include +//#include +#include "ARAPDeform.h" +//#include "fileSystemUtility.h" +//#include "MatEngine.h" + +int main(int argc, char *argv[]) +{ + if (argc == 5) + { + std::string inputObj = argv[1]; + std::string handleFile = argv[2]; + std::string outputFolder = argv[3]; + bool hardConstrain = atoi(argv[4]); + TetrahedralMesh meshOri; + myReadFile(inputObj.c_str(), meshOri); + std::string outputName = "test_output.ovm"; + myWriteFile(outputName, meshOri); + ARAPDeform *arapDeform = new ARAPDeform(meshOri, hardConstrain); + arapDeform->yyj_ARAPDeform(handleFile, outputFolder); + } + else + { + std::cout << "exe inputObj handleFile outputFolder hardConstrain" << std::endl; + } + + return 0; +} \ No newline at end of file diff --git a/contrib/NeRF-Editing/volumeARAP_batch/src/yyjARAPDeform.cpp b/contrib/NeRF-Editing/volumeARAP_batch/src/yyjARAPDeform.cpp new file mode 100644 index 00000000..092c17c8 --- /dev/null +++ b/contrib/NeRF-Editing/volumeARAP_batch/src/yyjARAPDeform.cpp @@ -0,0 +1,784 @@ +#include +#include +#include +#include "ARAPDeform.h" + +using namespace std; +#define DUSE_OPENMP + +// #ifdef DUSE_OPENMP +// #define OMP_open __pragma(omp parallel num_threads(omp_get_num_procs()*2)) \ +// { \ +// __pragma(omp for) +// #define OMP_end \ +// } +// #else +// #define OMP_open ; +// #define OMP_end ; +// #endif + +double yyj_cotan(Eigen::Vector3d a, Eigen::Vector3d b) { + double na = a.norm(), nb = b.norm(); + if (na < Eps || nb < Eps) return 0; + double cos = a.dot(b) / (na*nb); + if (cos == 1) return 1; + return cos / sqrt(1 - cos * cos); +} + +Eigen::Matrix3d vec2mat(Eigen::Vector3d a, Eigen::Vector3d b) { + Eigen::Matrix3d resultMat; + for (int i = 0; i < 3; i++) + { + for (int j = 0; j < 3; j++) + { + resultMat(i, j) = a[i] * b[j]; + } + } + return resultMat; +} + +double calDihedralAngle(std::vector dihedral) { + // v1, v2 constructs the common edge. v3 and v4 expand the dihedral angle + OpenVolumeMesh::Geometry::Vec3d v1 = dihedral[0]; + OpenVolumeMesh::Geometry::Vec3d v2 = dihedral[1]; + OpenVolumeMesh::Geometry::Vec3d v3 = dihedral[2]; + OpenVolumeMesh::Geometry::Vec3d v4 = dihedral[3]; + + // assume the order: v3 -> v1 -> v2 -> v4 + OpenVolumeMesh::Geometry::Vec3d d1 = v1 - v3; + OpenVolumeMesh::Geometry::Vec3d d2 = v2 - v1; + OpenVolumeMesh::Geometry::Vec3d d3 = v4 - v2; + + OpenVolumeMesh::Geometry::Vec3d n1 = d1.cross(d2); + OpenVolumeMesh::Geometry::Vec3d n2 = d2.cross(d3); + + double cos_angle = n1.dot(n2) / n1.norm() / n2.norm(); + + return cos_angle / sqrt(1 - cos_angle*cos_angle); +} + +ARAPDeform::ARAPDeform(TetrahedralMesh& input_mesh, bool hardConstrain) :mesh(&input_mesh) +{ + degree.resize(input_mesh.n_vertices(), 0); + edge_index.resize(input_mesh.n_vertices(), 0); + half_edge_num = 0; + for (int i = 0; i < input_mesh.n_vertices(); i++) + { + VertexHandle vi(i); + std::vector neighborPoints; + Tet_vec3d pi = input_mesh.vertex(vi); + + edge_index[i] = half_edge_num; + for (OpenVolumeMesh::VertexVertexIter vv_it = input_mesh.vv_iter(vi); vv_it.valid(); ++vv_it) + { + int j = vv_it->idx(); + neighborPoints.push_back(input_mesh.vertex(VertexHandle(j))); + half_edge_num++; + degree[i]++; + } + + + int neighborsNum = (int)neighborPoints.size(); + + std::vector weights(neighborsNum, 0); + + // calculate e dihedral angles + int cnt = 0; + for (OpenVolumeMesh::VertexOHalfEdgeIter voheit=input_mesh.voh_iter(vi); voheit.valid(); voheit++) { + OpenVolumeMesh::VertexHandle vi2 = input_mesh.to_vertex_handle(*voheit); + // std::cout << "****** the edge between ****** " << vi.idx() << " and " << vi2.idx() << std::endl; + int cell_num = 0; + OpenVolumeMesh::EdgeHandle eh = input_mesh.edge_handle(*voheit); + for (OpenVolumeMesh::EdgeCellIter ecit=input_mesh.ec_iter(eh); ecit.valid(); ecit++) { + // std::cout << "iterate cells ..." << std::endl; + std::vector dihedralAngle; + // vector size is 4. The first and second is the common edge + for (OpenVolumeMesh::CellVertexIter cvit=input_mesh.cv_iter(*ecit); cvit.valid(); cvit++) { + if (*cvit != vi && *cvit != vi2) { + dihedralAngle.push_back(input_mesh.vertex(*cvit)); + // std::cout << "verts add " << (*cvit).idx() << std::endl; + } + } + dihedralAngle.push_back(input_mesh.vertex(vi)); + dihedralAngle.push_back(input_mesh.vertex(vi2)); + // std::cout << "specific verts are: " << vi.idx() << ", " << vi2.idx() << std::endl; + cell_num++; + weights[cnt] += calDihedralAngle(dihedralAngle); + // std::cout << "the cot of dihedralAngle is: " << calDihedralAngle(dihedralAngle) << std::endl; + } + weights[cnt] /= cell_num; cnt++; + } + assert(cnt == neighborsNum); + + for (int neighborCounts = 0; neighborCounts < neighborsNum; neighborCounts++) + { + Tet_vec3d edgeij_tet = pi - neighborPoints[neighborCounts]; + Eigen::Vector3d edgeij = OVtoE(edgeij_tet); + this->edgeijs.push_back(edgeij); + + /*double w1 = yyj_cotan(OVtoE(pi - neighborPoints[(neighborCounts + neighborsNum - 1) % neighborsNum]), OVtoE(neighborPoints[neighborCounts] - neighborPoints[(neighborCounts + neighborsNum - 1) % neighborsNum])); + double w2 = yyj_cotan(OVtoE(pi - neighborPoints[(neighborCounts + 1) % neighborsNum]), OVtoE(neighborPoints[neighborCounts] - neighborPoints[(neighborCounts + 1) % neighborsNum])); + weights[neighborCounts] = 0.5*(w1 + w2);*/ + // weights[neighborCounts] = 1; + /*if ((weights[neighborCounts] != weights[neighborCounts]) || weights[neighborCounts] > 100000 || abs(weights[neighborCounts]) < Eps) + { + + }*/ + edge_weights.push_back(weights[neighborCounts]); + } + } + + isConst.resize(input_mesh.n_vertices(), false); + isConst_i.resize(input_mesh.n_vertices(), 0); + //constPoint.resize(input_mesh.n_vertices(), Eigen::Vector3d(0, 0, 0)); + control_index.resize(input_mesh.n_vertices()); + control_weight.resize(input_mesh.n_vertices()); + //this->matEngine = Utility::MatEngine(); + //this->matEngine.OpenEngine(); + + this->AcsrRowIndPtr = NULL; + this->AcsrColPtr = NULL; + this->AcsrValPtr = NULL; + this->AcsrColNum = 0; + this->AcsrRowNum = 0; + this->Annz = 0; + this->vectorBPtr = NULL; + this->vectorBSize = 0; + this->resultX = NULL; + + maxIterTime = 10; + this->hardConstrain = hardConstrain; + +} + +void ARAPDeform::setConstPoint(int i, Eigen::Vector3d v) { + //isConst[i] = true; + //isConst_i[i] = 1; + // constPoint.push_back(v); + seq_constPoint[i].push_back(v); +} + +void ARAPDeform::loadConstPoint(std::istream& cin) { + int n; + cin >> n; // sequence³¤¶È + seq_constPoint.resize(n); + for (int i = 0; i < n; i++) { + int m; // ÿ¸ö¿ØÖƵã¸öÊý + cin >> m; + std::cout << "loading " << m << " control points\n"; + std::vector ids(m); + for (int j = 0; j < m; j++) { + double x, y, z; + cin >> x >> y >> z; + this->setConstPoint(i, Eigen::Vector3d(x, y, z)); + } + } + // add barycentric as constrain + cin >> n; double u, v, w, z; int v1, v2, v3, v4; + std::cout << "loading " << n << " barycentric coordinates and tetrahedral vertex index\n"; + for (int i = 0; i < n; i++) { + cin >> v1 >> v2 >> v3 >> v4; + bary_vert_index.push_back(Eigen::Vector4i(v1, v2, v3, v4)); + cin >> u >> v >> w >> z; + barycentric.push_back(Eigen::Vector4d(u, v, w, z)); + control_index[v1].push_back(i); + control_index[v2].push_back(i); + control_index[v3].push_back(i); + control_index[v4].push_back(i); + control_weight[v1].push_back(u); + control_weight[v2].push_back(v); + control_weight[v3].push_back(w); + control_weight[v4].push_back(z); + } +} + +void ARAPDeform::global_step_pre(TetrahedralMesh& deformedMesh) +{ + /*int vvpairs = 0; + for (int i = 0; i < mesh->n_vertices(); i++) + { + VertexHandle vi(i); + for (OpenVolumeMesh::VertexVertexIter vvi = mesh->vv_iter(vi); vvi.valid(); vvi++) + { + Eigen::Vector3d q = OVtoE(mesh->vertex(VertexHandle(vvi->idx())) - mesh->vertex(vi)); + } + }*/ + int m = 0; + for (int i = 0; i < mesh->n_vertices(); i++) + { + m += 3 * this->degree[i]; + } + //cout << mesh->n_edges() << " " << 3 * half_edge_num; + //m += this->controlpoint_number.size() * 3; + int row_num = m + this->controlpoint_number.size() * 3; + if (this->AcsrRowIndPtr == NULL) + { + this->AcsrRowIndPtr = (int *)malloc(sizeof(int)*(row_num + 1)); + this->AcsrRowIndPtr[0] = 0; + } + + int ele_num; + if (!hardConstrain) + { + ele_num = m * 2 + this->controlpoint_number.size() * 3 * 4; + } + else + { + int n = 0; + for (int i = 0; i < control_index.size(); i++) + { + n += 3 * this->degree[i] * control_index[i].size(); + } + ele_num = m * 2 + this->controlpoint_number.size() * 3 * 4 + n; + } + if (this->AcsrColPtr == NULL) + { + this->AcsrColPtr = (int *)malloc(sizeof(int) * ele_num); + } + if (this->AcsrValPtr == NULL) + { + this->AcsrValPtr = (double *)malloc(sizeof(double) * ele_num); + memset(this->AcsrValPtr, 0, sizeof(double) * ele_num); + } + this->vectorBSize = row_num + 1; + this->vectorBPtr = (double *)malloc(vectorBSize * sizeof(double)); + + if (!hardConstrain) + { + this->resultX = (double *)malloc(mesh->n_vertices() * 3 * sizeof(double)); + memset(this->resultX, 0, sizeof(double)*mesh->n_vertices() * 3); + } + else + { + this->resultX = (double *)malloc((mesh->n_vertices() * 3 + this->controlpoint_number.size()) * sizeof(double)); + memset(this->resultX, 0, sizeof(double)*(mesh->n_vertices() * 3 + this->controlpoint_number.size())); + } + + int csrRowIndPtrCounter = 1; + int csrColPtrCounter = 0; + int csrValAssignCounter = 0; + int maxCol = 0; + + // Deformation Term //set rowIndPtr and colPtr for Matrix A in Ax=b + for (int i = 0; i < mesh->n_vertices(); i++) //base energy item + { + VertexHandle vi(i); + for (OpenVolumeMesh::VertexVertexIter vj = deformedMesh.vv_iter(vi); vj; vj++) + { + int j = vj->idx(); + for (int index = 0; index < 3; index++)//x,y,z,3 axis + { + int handlePointCounter = 0; + if (!this->isConst[i] && this->isConst[j])//handle Point + { + this->AcsrColPtr[csrColPtrCounter++] = i * 3 + index; + handlePointCounter = 1; + } + else if (this->isConst[i] && !this->isConst[j]) + { + this->AcsrColPtr[csrColPtrCounter++] = j * 3 + index; + handlePointCounter = 1; + } + else if (!this->isConst[i] && !this->isConst[j]) + { + if (i > j)//point coefficient + { + this->AcsrColPtr[csrColPtrCounter++] = j * 3 + index; + this->AcsrColPtr[csrColPtrCounter++] = i * 3 + index; + } + else { + this->AcsrColPtr[csrColPtrCounter++] = i * 3 + index; + this->AcsrColPtr[csrColPtrCounter++] = j * 3 + index; + } + } + else if (this->isConst[i] && this->isConst[j]) + { + handlePointCounter = 2; + } + if (hardConstrain) + { + for (int k = 0; k < control_index[i].size(); k++) + { + this->AcsrColPtr[csrColPtrCounter++] = mesh->n_vertices() * 3 + control_index[i][k]; + } + //set rowIndex + this->AcsrRowIndPtr[csrRowIndPtrCounter++] = 2 - handlePointCounter + this->AcsrRowIndPtr[csrRowIndPtrCounter - 1] + control_index[i].size(); + } + else + { + this->AcsrRowIndPtr[csrRowIndPtrCounter++] = 2 - handlePointCounter + this->AcsrRowIndPtr[csrRowIndPtrCounter - 1]; + } + } + } + } + std::cout << "the lapalce part has " << csrRowIndPtrCounter - 1 << " rows" << std::endl; + std::cout << "the lapalce part has " << csrColPtrCounter << " non-zero elements" << std::endl; + //Handle point,set rowIndPtr and colPtr + for (int i = 0; i < this->controlpoint_number.size(); i++) + { + //std::cout << "i: " << i << " csrColPtrCounter: " << csrColPtrCounter << ", value:" << bary_vert_index[this->controlpoint_number[i].first] << std::endl; + this->AcsrColPtr[csrColPtrCounter++] = bary_vert_index[this->controlpoint_number[i].first][0] * 3 + 0; + this->AcsrColPtr[csrColPtrCounter++] = bary_vert_index[this->controlpoint_number[i].first][1] * 3 + 0; + this->AcsrColPtr[csrColPtrCounter++] = bary_vert_index[this->controlpoint_number[i].first][2] * 3 + 0; + this->AcsrColPtr[csrColPtrCounter++] = bary_vert_index[this->controlpoint_number[i].first][3] * 3 + 0; + + this->AcsrRowIndPtr[csrRowIndPtrCounter++] = 4 + this->AcsrRowIndPtr[csrRowIndPtrCounter - 1]; + + this->AcsrColPtr[csrColPtrCounter++] = bary_vert_index[this->controlpoint_number[i].first][0] * 3 + 1; + this->AcsrColPtr[csrColPtrCounter++] = bary_vert_index[this->controlpoint_number[i].first][1] * 3 + 1; + this->AcsrColPtr[csrColPtrCounter++] = bary_vert_index[this->controlpoint_number[i].first][2] * 3 + 1; + this->AcsrColPtr[csrColPtrCounter++] = bary_vert_index[this->controlpoint_number[i].first][3] * 3 + 1; + + this->AcsrRowIndPtr[csrRowIndPtrCounter++] = 4 + this->AcsrRowIndPtr[csrRowIndPtrCounter - 1]; + + this->AcsrColPtr[csrColPtrCounter++] = bary_vert_index[this->controlpoint_number[i].first][0] * 3 + 2; + this->AcsrColPtr[csrColPtrCounter++] = bary_vert_index[this->controlpoint_number[i].first][1] * 3 + 2; + this->AcsrColPtr[csrColPtrCounter++] = bary_vert_index[this->controlpoint_number[i].first][2] * 3 + 2; + this->AcsrColPtr[csrColPtrCounter++] = bary_vert_index[this->controlpoint_number[i].first][3] * 3 + 2; + + this->AcsrRowIndPtr[csrRowIndPtrCounter++] = 4 + this->AcsrRowIndPtr[csrRowIndPtrCounter - 1]; + + } + + for (int i = 0; i < mesh->n_vertices(); i++) + { + VertexHandle vi(i); + //modify ±È½ÏÀ÷º¦µÄr + //Eigen::Matrix3d &ri = this->featurevector_result.rots[i].r; + //Eigen::Matrix3d &ri = R[i]; + int edgeijIndex = this->edge_index[i];//first neighbour edge index + //iterate point j (i adjacent points) + for (OpenVolumeMesh::VertexVertexIter vj = deformedMesh.vv_iter(vi); vj; vj++) + { + int j = vj->idx(); + double lambdaDeformWeightCiCij = 1.0 * (this->edge_weights[edgeijIndex]); + for (int axis = 0; axis < 3; axis++)//x,y,z + { + //Point parameter + if (!this->isConst[i] && !this->isConst[j]) + { + if (i < j)//point coefficient + { + this->AcsrValPtr[csrValAssignCounter++] = lambdaDeformWeightCiCij; + this->AcsrValPtr[csrValAssignCounter++] = -lambdaDeformWeightCiCij; + } + else + { + this->AcsrValPtr[csrValAssignCounter++] = -lambdaDeformWeightCiCij; + this->AcsrValPtr[csrValAssignCounter++] = lambdaDeformWeightCiCij; + } + } + else if (!this->isConst[i] && this->isConst[j]) + { + this->AcsrValPtr[csrValAssignCounter++] = lambdaDeformWeightCiCij; + } + else if (this->isConst[i] && !this->isConst[j]) + { + this->AcsrValPtr[csrValAssignCounter++] = -lambdaDeformWeightCiCij; + } + if (hardConstrain) + { + for (int k = 0; k < control_weight[i].size(); k++) + { + this->AcsrValPtr[csrValAssignCounter++] = control_weight[i][k] / 4; + } + } + }//end of xyz + edgeijIndex++; + }//end of j + }//end of i + + //handle point as hard constrain + for (int i = 0; i < this->controlpoint_number.size(); i++) + { + for (int j = 0; j < 3; j++) { + this->AcsrValPtr[csrValAssignCounter++] = barycentric[i][0]; + this->AcsrValPtr[csrValAssignCounter++] = barycentric[i][1]; + this->AcsrValPtr[csrValAssignCounter++] = barycentric[i][2]; + this->AcsrValPtr[csrValAssignCounter++] = barycentric[i][3]; + } + } + std::cout << "the all maxtrix has " << csrRowIndPtrCounter - 1 << " rows" << std::endl; + std::cout << "the all maxtrix has " << csrValAssignCounter << " non-zero elems" << std::endl; +} + +void ARAPDeform::eigen_global_step_pre(TetrahedralMesh& deformedMesh) +{ + int row_num = (half_edge_num + this->controlpoint_number.size()) * 3; + + int ele_num; + if (!hardConstrain) + { + ele_num = half_edge_num * 3 * 2 + this->controlpoint_number.size() * 3 * 4; + } + else + { + int n = 0; + for (int i = 0; i < control_index.size(); i++) + { + n += 3 * this->degree[i] * control_index[i].size(); + } + ele_num = half_edge_num * 3 * 2 + this->controlpoint_number.size() * 3 * 4 + n; + } + this->tripletList.reserve(ele_num); + this->vectorBSize = row_num; + this->vectorBPtr = (double *)malloc(vectorBSize * sizeof(double)); + + int rowCounter = 0; + int axisNum = 3; + + // Deformation Term //set rowIndPtr and colPtr for Matrix A in Ax=b + for (int i = 0; i < mesh->n_vertices(); i++) //base energy item + { + VertexHandle vi(i); + int edgeijIndex = this->edge_index[i];//first neighbour edge index + for (OpenVolumeMesh::VertexVertexIter vj = deformedMesh.vv_iter(vi); vj; vj++) + { + int j = vj->idx(); + double lambdaDeformWeightCiCij = 1.0 * (this->edge_weights[edgeijIndex]); + for (int index = 0; index < axisNum; index++)//x,y,z,3 axis + { + int handlePointCounter = 0; + if (!this->isConst[i] && this->isConst[j])//handle Point + { + tripletList.push_back(Tri(rowCounter + index, i * 3 + index, lambdaDeformWeightCiCij)); + } + else if (this->isConst[i] && !this->isConst[j]) + { + tripletList.push_back(Tri(rowCounter + index, j * 3 + index, -lambdaDeformWeightCiCij)); + } + else if (!this->isConst[i] && !this->isConst[j]) + { + if (i > j)//point coefficient + { + tripletList.push_back(Tri(rowCounter + index, j * 3 + index, -lambdaDeformWeightCiCij)); + tripletList.push_back(Tri(rowCounter + index, i * 3 + index, lambdaDeformWeightCiCij)); + } + else { + tripletList.push_back(Tri(rowCounter + index, i * 3 + index, lambdaDeformWeightCiCij)); + tripletList.push_back(Tri(rowCounter + index, j * 3 + index, -lambdaDeformWeightCiCij)); + } + } + if (hardConstrain) + { + for (int k = 0; k < control_index[i].size(); k++) + { + tripletList.push_back(Tri(rowCounter + index, mesh->n_vertices() * 3 + control_index[i][k], control_weight[i][k] / 4)); + } + } + }//end of xyz + edgeijIndex++; + rowCounter += axisNum; + }//end of vj + }//end of vi + + //Handle point,set rowIndPtr and colPtr + for (int i = 0; i < this->controlpoint_number.size(); i++) + { + for (int index = 0; index < axisNum; index++) + { + tripletList.push_back(Tri(rowCounter, bary_vert_index[this->controlpoint_number[i].first][0] * 3 + index, barycentric[i][0])); + tripletList.push_back(Tri(rowCounter, bary_vert_index[this->controlpoint_number[i].first][1] * 3 + index, barycentric[i][1])); + tripletList.push_back(Tri(rowCounter, bary_vert_index[this->controlpoint_number[i].first][2] * 3 + index, barycentric[i][2])); + tripletList.push_back(Tri(rowCounter, bary_vert_index[this->controlpoint_number[i].first][3] * 3 + index, barycentric[i][3])); + rowCounter++; + } + + } +} + +void ARAPDeform::local_step(std::vector& R, TetrahedralMesh& deformedMesh) +{ + std::cout << "Local Step" << endl; + //OMP_open + for (int i = 0, edgeCounter = 0; i < mesh->n_vertices(); i++) + { + Eigen::Matrix3d edgeMatrixSum = Eigen::Matrix3d::Zero(); + VertexHandle vi(i); + for (OpenVolumeMesh::VertexVertexIter vj = deformedMesh.vv_iter(vi); vj; vj++) + { + int j = vj->idx(); + Eigen::Vector3d deformedEdgeij = OVtoE(deformedMesh.vertex(vi) - deformedMesh.vertex(VertexHandle(j))); + //edgeMatrixSum += edge_weights[edgeCounter] * vec2mat(edgeijs[edgeCounter], deformedEdgeij); + edgeMatrixSum += vec2mat(edgeijs[edgeCounter], deformedEdgeij); + edgeCounter++; + } + Eigen::JacobiSVD svd(edgeMatrixSum, Eigen::ComputeThinU | Eigen::ComputeThinV); + Eigen::MatrixXd U = svd.matrixU(); + Eigen::MatrixXd V = svd.matrixV(); + Eigen::Matrix3d tmpR = V * U.transpose(); + if (tmpR.determinant() < 0) + { + Eigen::VectorXd sigularDiagonal = svd.singularValues().diagonal(); + double minSigularValue = sigularDiagonal.minCoeff(); + int id = 0; + for (int m = 0; m < sigularDiagonal.size(); m++) + { + if (minSigularValue == sigularDiagonal[m]) + id = m; + } + for (int m = 0; m < 3; m++) + { + U(m, id) = 0 - U(m, id); + } + tmpR = V * U.transpose(); + } + R[i] = tmpR; + } + //OMP_end +} + +void ARAPDeform::yyj_ARAPDeform(std::string &handlefile, std::string outputFolder) +{ + std::ifstream iff(handlefile.c_str()); + this->loadConstPoint(iff); + //for (int i = 0; i < mesh->n_vertices(); i++) { + for (int i = 0; i < barycentric.size(); i++) { + this->controlpoint_number.push_back(make_pair(i, Eigen::Vector3d(0, 0, 0))); // ¿ØÖƵã(ÖØÐÄ×ø±ê)ÊýÄ¿ + } + TetrahedralMesh *deformed_mesh; + deformed_mesh = new TetrahedralMesh(*this->mesh); + std::vector Rots; + + for (int i = 0; i < mesh->n_vertices(); i++) + { + Rots.push_back(Eigen::Matrix3d::Identity()); + } + + //this->global_step_pre(*deformed_mesh); + this->eigen_global_step_pre(*deformed_mesh); + + int columnNumber; + if (!hardConstrain) + { + columnNumber = mesh->n_vertices() * 3; + } + else + { + columnNumber = mesh->n_vertices() * 3 + this->controlpoint_number.size(); + } + int rowNumber = (edgeijs.size() + this->controlpoint_number.size()) * 3; + //this->vectorBSize = rowNumber + 1; + + //this->yyj_CholeskyPre(this->matEngine, rowNumber, columnNumber, AcsrRowIndPtr[rowNumber], AcsrRowIndPtr, AcsrColPtr, AcsrValPtr); + std::cout << "Construct sparse A" << std::endl; + //Eigen::Map > sparseA(rowNumber, columnNumber, AcsrRowIndPtr[rowNumber], AcsrRowIndPtr, AcsrColPtr, AcsrValPtr); + Eigen::SparseMatrix sparseA(rowNumber, columnNumber); + sparseA.setFromTriplets(this->tripletList.begin(), this->tripletList.end()); + std::cout << "Construct sparse AT" << std::endl; + Eigen::SparseMatrix sparseAT = sparseA.transpose(); + std::cout << "cholesky begin" << std::endl; + Eigen::SimplicialCholesky> chol(sparseAT*sparseA); + + // modify to sequence deformation. + // ÔÚload_data´¦¶¨Òåseq_constPoint + for (int seq_id = 0; seq_id < seq_constPoint.size(); seq_id++) { + constPoint = seq_constPoint[seq_id]; + std::cout << "processing the " << seq_id << " deformation" << std::endl; + for (int iterationCounter = 0; iterationCounter < this->maxIterTime; iterationCounter++) + { + memset(this->vectorBPtr, 0, this->vectorBSize * sizeof(double)); + int rowCounter = 0; //used in assign value to Vertex Bs + + double lambdaDeformWeightCiCij = 0; + Eigen::Vector3d edgeij_weight; + Eigen::Matrix3d Ri, Rj; + Eigen::Vector3d RiRjEdgeij; + std::vector VecRiRjEdgeij; + /*VecRiRjEdgeij.resize(edgeijs.size(), Eigen::Vector3d::Zero());*/ + + for (int i = 0, edgeCounter = 0; i < mesh->n_vertices(); i++) + { + //iterate point j (i adjacent points) + VertexHandle vi(i); + //int edgeijIndex = this->edge_index[i];//first neighbor edge index + Ri = Rots[i]; + + //iterate point j (i adjacent points) + for (OpenVolumeMesh::VertexVertexIter vj = deformed_mesh->vv_iter(vi); vj; vj++) + { + int j = vj->idx(); + //lambdaDeformWeightCiCij = this->edge_weights[edgeijIndex]; + lambdaDeformWeightCiCij = this->edge_weights[edgeCounter]; + edgeij_weight = this->edge_weights[edgeCounter] * this->edgeijs[edgeCounter];//edgejk = pj -pk + //assert(edgeij_weight == edgeij_weight); + Rj = Rots[j]; + RiRjEdgeij = 0.5 * (Ri + Rj) * edgeij_weight; + VecRiRjEdgeij.push_back(RiRjEdgeij); + //VecRiRjEdgeij[_edgetick] = RiRjEdgeij; + //_edgetick++; + //Point parameter + //this->vectorBPtr[rowCounter + 0] = lambdaDeformWeightCiCij * (this->constPoint[j][0] * this->isConst_i[j] - this->constPoint[i][0] * this->isConst_i[i]); + //this->vectorBPtr[rowCounter + 1] = lambdaDeformWeightCiCij * (this->constPoint[j][1] * this->isConst_i[j] - this->constPoint[i][1] * this->isConst_i[i]); + //this->vectorBPtr[rowCounter + 2] = lambdaDeformWeightCiCij * (this->constPoint[j][2] * this->isConst_i[j] - this->constPoint[i][2] * this->isConst_i[i]); + edgeCounter++; + rowCounter += 3; + } + } + + for (int j = 0; j < half_edge_num; j++) + { + this->vectorBPtr[j * 3 + 0] += VecRiRjEdgeij[j][0]; + this->vectorBPtr[j * 3 + 1] += VecRiRjEdgeij[j][1]; + this->vectorBPtr[j * 3 + 2] += VecRiRjEdgeij[j][2]; + } + + //handle point as hard constrain + for (int i = 0; i < this->controlpoint_number.size(); i++) + { + int constrolpointid = this->controlpoint_number[i].first; + this->vectorBPtr[rowCounter + 0] = constPoint[constrolpointid][0]; + this->vectorBPtr[rowCounter + 1] = constPoint[constrolpointid][1]; + this->vectorBPtr[rowCounter + 2] = constPoint[constrolpointid][2]; + rowCounter += 3; + } + + //ȱÉÙÒ»¸öº¯ÊýÀûÓÃÕâЩ±äÁ¿½øÐÐÇó½â + long t1 = clock(); + //this->yyj_LeastSquareSolve(this->matEngine, rowNumber, columnNumber, AcsrRowIndPtr[rowNumber], AcsrRowIndPtr, AcsrColPtr, AcsrValPtr, vectorBPtr, resultX); + //this->yyj_CholeskySolve(this->matEngine, rowNumber, columnNumber, vectorBPtr, resultX); + vectorB.resize(this->vectorBSize); + for (int i = 0; i < this->vectorBSize; i++) + { + vectorB[i] = vectorBPtr[i]; + } + Eigen::VectorXd x = chol.solve(sparseAT*vectorB); + std::cout << "Global Time:" << clock() - t1 << std::endl; + + for (int i = 0; i < mesh->n_vertices(); i++) + { + VertexHandle vi(i); + //Tet_vec3d tmp_vertex(this->resultX[i * 3 + 0], this->resultX[i * 3 + 1], this->resultX[i * 3 + 2]); + Tet_vec3d tmp_vertex(x[i * 3 + 0], x[i * 3 + 1], x[i * 3 + 2]); + deformed_mesh->set_vertex(vi, tmp_vertex); + } + + long t2 = clock(); + local_step(Rots, *deformed_mesh); + std::cout << "Local Time:" << clock() - t2 << std::endl; + } // end of iteration + + string file_id = std::to_string(seq_id); + while (file_id.size() < 4) file_id = "0" + file_id; + string outputName = outputFolder + "/arap_result_" + file_id +"_.ovm"; + myWriteFile(outputName, *deformed_mesh); + //this->matEngine.EvalString("close"); + } + //this->matEngine.EvalString("exit"); +} + +//bool ARAPDeform::yyj_LeastSquareSolve(Utility::MatEngine &matEngine, int rowNum, int colNum, int Annz, int *rowPtr, int *colPtr, double *valPtr, const double *b, double *x) +//{ +// mxArray *sparseMatrixA = mxCreateSparse(colNum, rowNum, Annz, mxREAL); +// mwIndex *mxIrA = mxGetIr(sparseMatrixA); +// mwIndex *mxJcA = mxGetJc(sparseMatrixA); +// double *mxPrA = mxGetPr(sparseMatrixA); +// for (int i = 0; i < Annz; i++) +// { +// mxIrA[i] = (mwIndex)colPtr[i]; +// mxPrA[i] = (double)valPtr[i]; +// } +// for (int i = 0; i <= rowNum; i++) +// { +// mxJcA[i] = (mwIndex)rowPtr[i]; +// } +// //long t3 = clock(); +// matEngine.PutVariable("AT", sparseMatrixA); +// //std::cout << "Put AT:" << clock() - t3 << endl; +// +// mxArray* bb = mxCreateDoubleMatrix(rowNum, 1, mxREAL); +// double* bbPtr = mxGetPr(bb); +// for (int i = 0; i < vectorBSize; i++) +// { +// bbPtr[i] = b[i]; +// } +// //t3 = clock(); +// matEngine.PutVariable("b", bb); +// //std::cout << "Put b:" << clock() - t3 << endl; +// //t3 = clock(); +// matEngine.EvalString("A = AT';"); +// //matEngine.EvalString("x = AT*A\\(AT*b);"); +// matEngine.EvalString("x = A\\b;"); +// //std::cout << "Solve:" << clock() - t3 << endl; +// //t3 = clock(); +// mxArray* x1 = matEngine.GetVariable("x"); +// double* x1Ptr = mxGetPr(x1); +// for (int i = 0; i < colNum; i++) +// { +// x[i] = x1Ptr[i]; +// } +// //std::cout << "Ger result:" << clock() - t3 << endl; +// +// return true; +//} + +//bool ARAPDeform::yyj_CholeskyPre(Utility::MatEngine &matEngine, int rowNum, int colNum, int Annz, int *rowPtr, int *colPtr, double *valPtr) +//{ +// mxArray *sparseMatrixA = mxCreateSparse(colNum, rowNum, Annz, mxREAL); +// mwIndex *mxIrA = mxGetIr(sparseMatrixA); +// mwIndex *mxJcA = mxGetJc(sparseMatrixA); +// double *mxPrA = mxGetPr(sparseMatrixA); +// for (int i = 0; i < Annz; i++) +// { +// mxIrA[i] = (mwIndex)colPtr[i]; +// mxPrA[i] = (double)valPtr[i]; +// } +// for (int i = 0; i <= rowNum; i++) +// { +// mxJcA[i] = (mwIndex)rowPtr[i]; +// } +// matEngine.PutVariable("AT", sparseMatrixA); +// +// long t3 = clock(); +// matEngine.EvalString("A = AT';"); +// matEngine.EvalString("[UA, p] = chol(AT*A);"); +// std::cout << "Cholesky:" << clock() - t3 << endl; +// mxArray* UAp = matEngine.GetVariable("p"); +// double* UApPtr = mxGetPr(UAp); +// if (UApPtr[0]) +// { +// matEngine.EvalString("[LA,UA] = lu(AT*A);"); +// std::cout << "LU" << endl; +// } +// else +// { +// matEngine.EvalString("LA = UA';"); +// } +// +// return true; +//} + +//bool ARAPDeform::yyj_CholeskySolve(Utility::MatEngine &matEngine, int rowNum, int colNum, const double *b, double *x) +//{ +// mxArray* bb = mxCreateDoubleMatrix(rowNum, 1, mxREAL); +// double* bbPtr = mxGetPr(bb); +// for (int i = 0; i < vectorBSize; i++) +// { +// bbPtr[i] = b[i]; +// } +// matEngine.PutVariable("b", bb); +// +// long t3 = clock(); +// matEngine.EvalString("x_tmp = LA\\(AT*b);"); +// std::cout << "Solve1:" << clock() - t3 << endl; +// t3 = clock(); +// matEngine.EvalString("x = UA\\x_tmp;"); +// std::cout << "Solve2:" << clock() - t3 << endl; +// mxArray* x1 = matEngine.GetVariable("x"); +// double* x1Ptr = mxGetPr(x1); +// for (int i = 0; i < colNum; i++) +// { +// x[i] = x1Ptr[i]; +// } +// +// return true; +//} + +ARAPDeform::~ARAPDeform() +{ + //this->matEngine.CloseEngine(); + free(this->AcsrRowIndPtr); + free(this->AcsrColPtr); + free(this->AcsrValPtr); + free(this->vectorBPtr); +} \ No newline at end of file diff --git a/contrib/StylizedNeRF/LICENSE b/contrib/StylizedNeRF/LICENSE new file mode 100644 index 00000000..c81455a4 --- /dev/null +++ b/contrib/StylizedNeRF/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 IGLICT + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/contrib/StylizedNeRF/README.md b/contrib/StylizedNeRF/README.md new file mode 100644 index 00000000..f280e4a9 --- /dev/null +++ b/contrib/StylizedNeRF/README.md @@ -0,0 +1,57 @@ +# StylizedNeRF (Jittor): Consistent 3D Scene Stylization as Stylized NeRF via 2D-3D mutual learning + +## Introduction +This repository is code release for StylizedNeRF: Consistent 3D Scene Stylization as Stylized NeRF via 2D-3D mutual learning. + + + +3D scene stylization aims at generating stylized images of the scene from arbitrary novel views following a given set of style examples, while ensuring consistency when rendered from different views. Directly applying methods for image or video stylization to 3D scenes cannot achieve such consistency. Thanks to recently proposed neural radiance fields (NeRF), we are able to represent a 3D scene in a consistent way. Consistent 3D scene stylization can be effectively achieved by stylizing the corresponding NeRF. However, there is a significant domain gap between style examples which are 2D images and NeRF which is an implicit volumetric representation. To address this problem, we propose a novel mutual learning framework for 3D scene stylization that combines a 2D image stylization network and NeRF to fuse the stylization ability of 2D stylization network with the 3D consistency of NeRF. We first pre-train a standard NeRF of the 3D scene to be stylized and replace its color prediction module with a style network to obtain a stylized NeRF. It is followed by distilling the prior knowledge of spatial consistency from NeRF to the 2D stylization network through an introduced consistency loss. We also introduce a mimic loss to supervise the mutual learning of the NeRF style module and fine-tune the 2D stylization decoder. In order to further make our model handle ambiguities of 2D stylization results, we introduce learnable latent codes that obey the probability distributions conditioned on the style. They are attached to training samples as conditional inputs to better learn the style module in our novel stylized NeRF. Experimental results demonstrate that our method is superior to existing approaches in both visual quality and long-range consistency. + +## Installation + +The code is tested with Ubuntu 18.04, Python 3.8, Jittor 1.2.2.58, CUDA 10.0 and cuDNN v7.5. + +Set Up Environment + + [1] Run 'virtualenv stylenerf -p python3.7' to build a environment and 'source ./stylenerf/bin/activate' to activate it + [2] Run 'pip install -r requirements.txt' to install libraries (Notice that pytorch3d should be of version 0.4.0 !!!) + +For the jittor installation, please refer to [this link](https://cg.cs.tsinghua.edu.cn/jittor/download). + + +## Data preprocessing + + [1] Download the llff example data from official website http://cseweb.ucsd.edu/~viscomp/projects/LF/papers/ECCV20/nerf/nerf_example_data.zip + [2] Prepare style images in ./style for stylized NeRF training and ./all_styles for VAE + +## Pre-trained Model Preparation + + [1] Download the checkpoints of the VGG to ./pretrained + [2] Train the decoder of AdaIN from scratch by running 'python train_style_modules.py --task finetune_decoder' or put the existing checkpoints of the decoder of AdaIN to ./pretrained + [3] Run 'python train_style_modules.py --task vae' to pre-train the VAE + +## Train and Evaluate a Stylized NeRF + [1] Run 'python run_stylenerf.py --config ./configs/fern.txt' to train our model + [2] Run 'python run_stylenerf.py --config ./configs/fern.txt --render_train_style --chunk 512' to check the outputs of the traning views + [3] Run 'python run_stylenerf.py --config ./configs/fern.txt --render_valid_style --chunk 512' to check the outputs of the novel views + +## Citation + +If you find our work useful in your research, please consider citing: + + @inproceedings{Huang22StylizedNeRF, + author = {Huang, Yi-Hua and He, Yue and Yuan, Yu-Jie and Lai, Yu-Kun and Gao, Lin}, + title = {StylizedNeRF: Consistent 3D Scene Stylization as Stylized NeRF via 2D-3D Mutual Learning }, + booktitle={Computer Vision and Pattern Recognition (CVPR)}, + year = {2022}, + } + + @article{hu2020jittor, + title={Jittor: a novel deep learning framework with meta-operators and unified graph execution}, + author={Hu, Shi-Min and Liang, Dun and Yang, Guo-Ye and Yang, Guo-Wei and Zhou, Wen-Yang}, + journal={Science China Information Sciences}, + volume={63}, + number={222103}, + pages={1--21}, + year={2020} + } diff --git a/contrib/StylizedNeRF/Style_function.py b/contrib/StylizedNeRF/Style_function.py new file mode 100644 index 00000000..0c75df11 --- /dev/null +++ b/contrib/StylizedNeRF/Style_function.py @@ -0,0 +1,92 @@ +import torch +import torch.nn as nn + + +def calc_mean_std(feat, eps=1e-5): + # eps is a small value added to the variance to avoid divide-by-zero. + size = feat.size() + assert (len(size) == 4) + N, C = size[:2] + feat_var = feat.view(N, C, -1).var(dim=2) + eps + feat_std = feat_var.sqrt().view(N, C, 1, 1) + feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1) + return feat_mean, feat_std + + +def adaptive_instance_normalization(content_feat, style_feat): + assert (content_feat.size()[:2] == style_feat.size()[:2]) + size = content_feat.size() + style_mean, style_std = calc_mean_std(style_feat) + content_mean, content_std = calc_mean_std(content_feat) + + normalized_feat = (content_feat - content_mean.expand( + size)) / content_std.expand(size) + return normalized_feat * style_std.expand(size) + style_mean.expand(size) + + +def _calc_feat_flatten_mean_std(feat): + # takes 3D feat (C, H, W), return mean and std of array within channels + assert (feat.size()[0] == 3) + assert (isinstance(feat, torch.FloatTensor)) + feat_flatten = feat.view(3, -1) + mean = feat_flatten.mean(dim=-1, keepdim=True) + std = feat_flatten.std(dim=-1, keepdim=True) + return feat_flatten, mean, std + + +def _mat_sqrt(x): + U, D, V = torch.svd(x) + return torch.mm(torch.mm(U, D.pow(0.5).diag()), V.t()) + + +def coral(source, target): + # assume both source and target are 3D array (C, H, W) + # Note: flatten -> f + + source_f, source_f_mean, source_f_std = _calc_feat_flatten_mean_std(source) + source_f_norm = (source_f - source_f_mean.expand_as( + source_f)) / source_f_std.expand_as(source_f) + source_f_cov_eye = \ + torch.mm(source_f_norm, source_f_norm.t()) + torch.eye(3) + + target_f, target_f_mean, target_f_std = _calc_feat_flatten_mean_std(target) + target_f_norm = (target_f - target_f_mean.expand_as( + target_f)) / target_f_std.expand_as(target_f) + target_f_cov_eye = \ + torch.mm(target_f_norm, target_f_norm.t()) + torch.eye(3) + + source_f_norm_transfer = torch.mm( + _mat_sqrt(target_f_cov_eye), + torch.mm(torch.inverse(_mat_sqrt(source_f_cov_eye)), + source_f_norm) + ) + + source_f_transfer = source_f_norm_transfer * \ + target_f_std.expand_as(source_f_norm) + \ + target_f_mean.expand_as(source_f_norm) + + return source_f_transfer.view(source.size()) + + +def styleLoss(input, target): + ib, ic, ih, iw = input.size() + iF = input.view(ib, ic, -1) + iMean = torch.mean(iF, dim=2) + iCov = GramMatrix(input) + + tb, tc, th, tw = target.size() + tF = target.view(tb, tc, -1) + tMean = torch.mean(tF, dim=2) + tCov = GramMatrix(target) + + loss = nn.MSELoss(size_average=False)(iMean,tMean) + nn.MSELoss(size_average=False)(iCov, tCov) + return loss/tb + + +def GramMatrix(input): + b, c, h, w = input.size() + f = input.view(b, c, h*w) # bxcx(hxw) + # torch.bmm(batch1, batch2, out=None) # + # batch1: bxmxp, batch2: bxpxn -> bxmxn # + G = torch.bmm(f, f.transpose(1, 2)) # f: bxcx(hxw), f.transpose: bx(hxw)xc -> bxcxc + return G.div_(c*h*w) diff --git a/contrib/StylizedNeRF/VGGNet.py b/contrib/StylizedNeRF/VGGNet.py new file mode 100644 index 00000000..fa1477f4 --- /dev/null +++ b/contrib/StylizedNeRF/VGGNet.py @@ -0,0 +1,172 @@ +import torch.nn as nn + +from Style_function import adaptive_instance_normalization as adain +from Style_function import calc_mean_std, styleLoss, GramMatrix + +decoder = nn.Sequential( + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(512, 256, (3, 3)), + nn.ReLU(), + nn.Upsample(scale_factor=2, mode='nearest'), + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(256, 256, (3, 3)), + nn.ReLU(), + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(256, 256, (3, 3)), + nn.ReLU(), + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(256, 256, (3, 3)), + nn.ReLU(), + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(256, 128, (3, 3)), + nn.ReLU(), + nn.Upsample(scale_factor=2, mode='nearest'), + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(128, 128, (3, 3)), + nn.ReLU(), + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(128, 64, (3, 3)), + nn.ReLU(), + nn.Upsample(scale_factor=2, mode='nearest'), + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(64, 64, (3, 3)), + nn.ReLU(), + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(64, 3, (3, 3)), +) + +vgg = nn.Sequential( + nn.Conv2d(3, 3, (1, 1)), + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(3, 64, (3, 3)), + nn.ReLU(), # relu1-1 + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(64, 64, (3, 3)), + nn.ReLU(), # relu1-2 + nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True), + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(64, 128, (3, 3)), + nn.ReLU(), # relu2-1 + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(128, 128, (3, 3)), + nn.ReLU(), # relu2-2 + nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True), + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(128, 256, (3, 3)), + nn.ReLU(), # relu3-1 + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(256, 256, (3, 3)), + nn.ReLU(), # relu3-2 + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(256, 256, (3, 3)), + nn.ReLU(), # relu3-3 + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(256, 256, (3, 3)), + nn.ReLU(), # relu3-4 + nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True), + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(256, 512, (3, 3)), + nn.ReLU(), # relu4-1, this is the last layer used + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(512, 512, (3, 3)), + nn.ReLU(), # relu4-2 + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(512, 512, (3, 3)), + nn.ReLU(), # relu4-3 + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(512, 512, (3, 3)), + nn.ReLU(), # relu4-4 + nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True), + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(512, 512, (3, 3)), + nn.ReLU(), # relu5-1 + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(512, 512, (3, 3)), + nn.ReLU(), # relu5-2 + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(512, 512, (3, 3)), + nn.ReLU(), # relu5-3 + nn.ReflectionPad2d((1, 1, 1, 1)), + nn.Conv2d(512, 512, (3, 3)), + nn.ReLU() # relu5-4 +) + + +class Net(nn.Module): + def __init__(self, encoder, decoder): + super(Net, self).__init__() + enc_layers = list(encoder.children()) + self.enc_1 = nn.Sequential(*enc_layers[:4]) # input -> relu1_1 + self.enc_2 = nn.Sequential(*enc_layers[4:11]) # relu1_1 -> relu2_1 + self.enc_3 = nn.Sequential(*enc_layers[11:18]) # relu2_1 -> relu3_1 + self.enc_4 = nn.Sequential(*enc_layers[18:31]) # relu3_1 -> relu4_1 + self.decoder = decoder + self.mse_loss = nn.MSELoss() + + # fix the encoder + for name in ['enc_1', 'enc_2', 'enc_3', 'enc_4']: + for param in getattr(self, name).parameters(): + param.requires_grad = False + + # extract relu1_1, relu2_1, relu3_1, relu4_1 from input image + def encode_with_intermediate(self, input): + results = [input] + for i in range(4): + func = getattr(self, 'enc_{:d}'.format(i + 1)) + results.append(func(results[-1])) + return results[1:] + + # extract relu4_1 from input image + def encode(self, input): + for i in range(4): + input = getattr(self, 'enc_{:d}'.format(i + 1))(input) + return input + + def calc_content_loss(self, input, target): + assert (input.size() == target.size()) + assert (target.requires_grad is False) + return self.mse_loss(input, target) + + def calc_style_loss(self, input, target): + # assert (input.size() == target.size()) + assert (target.requires_grad is False) + input_mean, input_std = calc_mean_std(input) + target_mean, target_std = calc_mean_std(target) + return self.mse_loss(input_mean, target_mean) + \ + self.mse_loss(input_std, target_std) + + def calc_style_loss_gram(self, inputs, targets): + style_loss = 0 + for i in range(len(inputs)): + input, target = inputs[i], targets[i] + style_loss += styleLoss(input, target) + return style_loss + + def calc_nerf_loss(self, x, content_gt, style_gt): + fea_x = self.encode_with_intermediate(x) + fea_style_gt = self.encode_with_intermediate(style_gt) + fea_content_gt = self.encode_with_intermediate(content_gt) + # loss_s = self.calc_style_loss(fea_x[0], fea_style_gt[0]) + loss_s = self.calc_style_loss_gram(fea_x, fea_style_gt) + loss_c = self.calc_content_loss(fea_x[-1], fea_content_gt[-1]) + return loss_c, loss_s + + def forward(self, content, style, alpha=1.0, return_stylized_content=False): + assert 0 <= alpha <= 1 + style_feats = self.encode_with_intermediate(style) + content_feat = self.encode(content) + stylized_content_feat = adain(content_feat, style_feats[-1]) + stylized_content_feat = alpha * stylized_content_feat + (1 - alpha) * content_feat + + stylized_content = self.decoder(stylized_content_feat) + stylized_content_feat_encoded = self.encode_with_intermediate(stylized_content) + + loss_c = self.calc_content_loss(stylized_content_feat_encoded[-1], stylized_content_feat) + loss_s = self.calc_style_loss(stylized_content_feat_encoded[0], style_feats[0]) + for i in range(1, 4): + loss_s += self.calc_style_loss(stylized_content_feat_encoded[i], style_feats[i]) + if return_stylized_content: + return loss_c, loss_s, stylized_content + else: + return loss_c, loss_s + diff --git a/contrib/StylizedNeRF/config.py b/contrib/StylizedNeRF/config.py new file mode 100644 index 00000000..2a101fa0 --- /dev/null +++ b/contrib/StylizedNeRF/config.py @@ -0,0 +1,149 @@ +import configargparse +parser = configargparse.ArgumentParser() + + +def config_parser(): + parser.add_argument('--config', is_config_file=True, + help='config file path') + parser.add_argument("--expname", type=str, + help='experiment name') + parser.add_argument("--basedir", type=str, default='./logs/', + help='where to store ckpts and logs') + + # data set options + parser.add_argument("--datadir", type=str, default='../../scan/seven_floor/', + help='input data directory') + parser.add_argument("--styledir", type=str, default='./style/') + parser.add_argument("--decoder_pth_path", type=str, default='./pretrained/decoder.pth') + parser.add_argument("--vgg_pth_path", type=str, default='./pretrained/vgg_normalised.pth') + parser.add_argument("--vae_pth_path", type=str, default='./pretrained/vae.pth') + parser.add_argument("--dataset_type", type=str, default='llff') + parser.add_argument("--factor", type=float, default=1., + help='factor to downsample images') + parser.add_argument("--gen_factor", type=float, default=0.2, # 5, + help='factor for interpolate trace when style training') + parser.add_argument("--valid_factor", type=float, default=0.05, + help='factor for interpolate trace when validating') + parser.add_argument("--no_ndc", action='store_true', help='No NDC for llff dataset.') + parser.add_argument("--white_bkgd", action='store_true', help='White Background for blender dataset.') + parser.add_argument("--half_res", action='store_true', help='Half resolution for linemod dataset.') + parser.add_argument("--num_workers", type=int, default=0, help='Number of workers for torch dataloader.') + parser.add_argument("--spherify", action='store_true', help='Spherify camera poses or not') + parser.add_argument("--store_rays", type=int, default=1, + help='factor to downsample images') + + # training options + parser.add_argument("--use_viewdir", action='store_true', + help='use view direction as input.') + parser.add_argument("--sample_type", type=str, default='uniform', + help='Types of sampling: [uniform]') + parser.add_argument("--act_type", type=str, default='relu', + help='Types of activation: [relu, tanh, elu]') + parser.add_argument("--nerf_type", type=str, default='nerf', + help='Types of nerf: [nerf]') + parser.add_argument("--style_type", type=str, default='mlp', + help='Types of style module: [mlp]') + parser.add_argument("--latent_type", type=str, default='variational', + help='Types of latent module: [variational latent]') + parser.add_argument("--nerf_type_fine", type=str, default='nerf', + help='Types of fine nerf: [nerf]') + parser.add_argument("--sigma_noise_std", type=float, default=1e0, + help='std dev of noise added to regularize sigma output, 1e0 recommended') + parser.add_argument("--siren_sigma_mul", type=float, default=20., + help='amplify positive sigma for siren') + + parser.add_argument("--rgb_loss_lambda", type=float, default=1., + help='Coefficient for style loss') + parser.add_argument("--rgb_loss_lambda_2d", type=float, default=10., + help='Coefficient for style loss') + parser.add_argument("--style_loss_lambda", type=float, default=1., + help='Coefficient for style loss') + parser.add_argument("--content_loss_lambda", type=float, default=1., + help='Coefficient for style loss') + parser.add_argument("--logp_loss_lambda", type=float, default=0.1, + help='Coefficient for logp loss') + parser.add_argument("--logp_loss_decay", type=float, default=1., + help='Decay rate for logp loss per 1000 steps') + parser.add_argument("--lambda_u", type=float, default=0.01, + help='Nerf in the wild lambda u hyper parameter') + + # Network + parser.add_argument("--netdepth", type=int, default=8, + help='layers in network') + parser.add_argument("--netwidth", type=int, default=256, + help='channels per layer') + parser.add_argument("--netdepth_fine", type=int, default=8, + help='layers in network') + parser.add_argument("--netwidth_fine", type=int, default=256, + help='channels per layer') + parser.add_argument("--style_D", type=int, default=8, + help='style layers in network') + parser.add_argument("--style_feature_dim", type=int, default=1024, + help='style feature dimension') + + # VAE + parser.add_argument('--vae_d', type=int, default=4) + parser.add_argument('--vae_w', type=int, default=512) + parser.add_argument('--vae_latent', type=int, default=32) + parser.add_argument('--vae_kl_lambda', type=float, default=0.1) + + parser.add_argument("--embed_freq_coor", type=int, default=10, + help='frequency of coordinate embedding') + parser.add_argument("--embed_freq_dir", type=int, default=4, + help='frequency of direction embedding') + parser.add_argument("--batch_size", type=int, default=2048, + help='batch size (number of random rays per gradient step)') + parser.add_argument("--batch_size_style", type=int, default=1024, + help='batch size (number of random rays per gradient step)') + parser.add_argument("--lrate", type=float, default=5e-4, + help='learning rate') + parser.add_argument("--lrate_decay", type=int, default=100000, + help='exponential learning rate decay (in 1000 steps)') + parser.add_argument("--chunk", type=int, default=1024*32, + help='number of rays processed in parallel, decrease if running out of memory') + parser.add_argument("--no_reload", action='store_true', + help='do not reload weights from saved ckpt') + parser.add_argument("--total_step", type=int, default=50000001, + help='total training step') + parser.add_argument("--origin_step", type=int, default=250000, + help='total training step') + parser.add_argument("--decoder_step", type=int, default=3500000, + help='total training step') + parser.add_argument("--steps_per_opt", type=int, default=1, + help='Steps for gradient accumulation') + parser.add_argument("--steps_patch", type=int, default=-1, + help='Steps interval for patch sampling') + + parser.add_argument("--N_samples", type=int, default=64, + help='The number of sampling points per ray') + parser.add_argument("--N_samples_fine", type=int, default=64, + help='The number of sampling points per ray for fine network') + + # logging/saving options + parser.add_argument("--i_print", type=int, default=100, + help='frequency of console printout and metric loggin') + parser.add_argument("--i_weights", type=int, default=5000, + help='frequency of weight ckpt saving') + parser.add_argument("--i_video", type=int, default=50000*100, + help='frequency of render_poses video saving') + parser.add_argument("--ckp_num", type=int, default=3, + help='Max number of saved ckpts.') + + parser.add_argument("--render_valid", action='store_true', + help='render valid') + parser.add_argument("--render_train", action='store_true', + help='render train') + parser.add_argument("--render_valid_style", action='store_true', + help='render valid style') + parser.add_argument("--render_train_style", action='store_true', + help='render train style') + parser.add_argument("--sigma_scale", type=float, default=1.) + + # Pixel Alignment + parser.add_argument("--pixel_alignment", action='store_true', + help='Pixel Alignment with half a pixel.') + + parser.add_argument("--TT_far", type=float, default=8., help='Far value of TT dataset NeRF') + + args = parser.parse_args() + return args diff --git a/contrib/StylizedNeRF/configs/fern.txt b/contrib/StylizedNeRF/configs/fern.txt new file mode 100644 index 00000000..d942f7b3 --- /dev/null +++ b/contrib/StylizedNeRF/configs/fern.txt @@ -0,0 +1,27 @@ +expname = fern_style +basedir = ./logs +datadir = ./data/fern +styledir = ./small_style/ +dataset_type = llff + +factor = 4 + +nerf_type = style_nerf +nerf_type_fine = style_nerf +style_type = mlp +latent_type = variational + +batch_size = 2048 +batch_size_style = 1024 +N_samples = 64 +N_samples_fine = 64 + +use_viewdir +sigma_noise_std = 1e0 + +origin_step = 120001 +total_step = 5000001 + +style_D = 8 +valid_factor = 3 +gen_factor = 1 diff --git a/contrib/StylizedNeRF/configs/flower.txt b/contrib/StylizedNeRF/configs/flower.txt new file mode 100644 index 00000000..6f046f4c --- /dev/null +++ b/contrib/StylizedNeRF/configs/flower.txt @@ -0,0 +1,26 @@ +expname = flower_style +basedir = ./logs +datadir = ./data/flower +styledir = ./style/ +dataset_type = llff + +factor = 4 + +nerf_type = style_nerf +nerf_type_fine = style_nerf +style_type = mlp +latent_type = variational + +batch_size = 2048 +batch_size_style = 1024 +N_samples = 64 +N_samples_fine = 64 + +use_viewdir +sigma_noise_std = 1e0 + +origin_step = 120001 +total_step = 5000001 + +style_D = 8 +valid_factor = 0.05 diff --git a/contrib/StylizedNeRF/dataset.py b/contrib/StylizedNeRF/dataset.py new file mode 100644 index 00000000..54330921 --- /dev/null +++ b/contrib/StylizedNeRF/dataset.py @@ -0,0 +1,796 @@ +import os +import cv2 +import glob +import torch +import VGGNet +import numpy as np +from tqdm import tqdm +from PIL import Image +import torch.nn as nn +from pathlib import Path +from torchvision import transforms +from torch.utils.data import Dataset +from Style_function import adaptive_instance_normalization + +from load_llff import load_llff_data + + +def view_synthesis(cps, factor=10): + frame_num = cps.shape[0] + cps = np.array(cps) + from scipy.spatial.transform import Slerp + from scipy.spatial.transform import Rotation as R + from scipy import interpolate as intp + rots = R.from_matrix(cps[:, :3, :3]) + slerp = Slerp(np.arange(frame_num), rots) + tran = cps[:, :3, -1] + f_tran = intp.interp1d(np.arange(frame_num), tran.T) + + new_num = int(frame_num * factor) + + new_rots = slerp(np.linspace(0, frame_num - 1, new_num)).as_matrix() + new_trans = f_tran(np.linspace(0, frame_num - 1, new_num)).T + + new_cps = np.zeros([new_num, 4, 4], np.float) + new_cps[:, :3, :3] = new_rots + new_cps[:, :3, -1] = new_trans + new_cps[:, 3, 3] = 1 + return new_cps + + +def image_transform(size, crop=False): + transform_list = [] + if size != 0: + transform_list.append(transforms.Resize(size)) + if crop: + transform_list.append(transforms.CenterCrop(size)) + transform_list.append(transforms.ToTensor()) + transform = transforms.Compose(transform_list) + return transform + + +def style_transfer(vgg, decoder, content, style, alpha=1.0, + interpolation_weights=None, return_feature=False): + assert (0.0 <= alpha <= 1.0) + content_f = vgg(content) + style_f = vgg(style) + if interpolation_weights: + _, C, H, W = content_f.size() + feat = torch.FloatTensor(1, C, H, W).zero_().to(content.device) + base_feat = adaptive_instance_normalization(content_f, style_f) + for i, w in enumerate(interpolation_weights): + feat = feat + w * base_feat[i:i + 1] + content_f = content_f[0:1] + else: + feat = adaptive_instance_normalization(content_f, style_f) + feat = feat * alpha + content_f * (1 - alpha) + if not return_feature: + return decoder(feat) + else: + return torch.clamp(decoder(feat), 0., 1.), feat + + +def style_data_prepare(style_path, content_images, size=512, chunk=64, sv_path=None, decode_path='./pretrained/decoder.pth', save_geo=True): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + """VGG and Decoder""" + decoder = VGGNet.decoder + vgg = VGGNet.vgg + decoder.eval() + vgg.eval() + print('Load decoder from ', decode_path) + decoder_data = torch.load(decode_path) + if 'decoder' in decoder_data.keys(): + decoder.load_state_dict(decoder_data['decoder']) + else: + decoder.load_state_dict(decoder_data) + vgg.load_state_dict(torch.load('./pretrained/vgg_normalised.pth')) + vgg = nn.Sequential(*list(vgg.children())[:31]) + vgg.to(device) + decoder.to(device) + + images_path = glob.glob(style_path + '/*.png') + glob.glob(style_path + '/*.jpg') + glob.glob(style_path + '/*.jpeg') + glob.glob(style_path + '/*.JPG') + glob.glob(style_path + '/*.PNG') + print(style_path, images_path) + style_images, style_paths, style_names = [], [], {} + style_features = np.zeros([len(images_path), 1024], dtype=np.float32) + img_trans = image_transform(size) + for i in tqdm(range(len(images_path))): + images_path[i] = images_path[i].replace('\\', '/') + print("Style Image: " + images_path[i]) + + """Read Style Images""" + style = img_trans(Image.open(images_path[i])) + style_images.append(cv2.resize(np.moveaxis(style.numpy(), 0, -1), (512, 512))) + + """Stylization""" + stylized_images = np.zeros_like(content_images) + style_feature = np.zeros([1024], dtype=np.float32) + style = style.float().to(device).unsqueeze(0).expand([chunk, *style.shape]) + start = 0 + while start < content_images.shape[0]: + end = min(start + chunk, content_images.shape[0]) + tmp_imgs = torch.movedim(torch.from_numpy(content_images[start: end]).float().to(device), -1, 1) + with torch.no_grad(): + tmp_stylized_imgs, tmp_style_features = style_transfer(vgg=vgg, decoder=decoder, content=tmp_imgs, style=style[:tmp_imgs.shape[0]], alpha=1., return_feature=True) + tmp_stylized_imgs = np.moveaxis(tmp_stylized_imgs.cpu().numpy(), 1, -1) + for j in range(end-start): + stylized_images[start+j] = cv2.resize(tmp_stylized_imgs[j], (stylized_images.shape[2], stylized_images.shape[1])) + style_feature = np.concatenate([tmp_style_features[0].reshape(-1, 512).mean(dim=0).cpu().numpy(), tmp_style_features[0].reshape([-1, 512]).var(dim=0).cpu().numpy()]) + start = end + + """Stylized Images Saving""" + style_name = images_path[i].split('/')[-1].split('.')[0] + style_names[style_name] = i + if sv_path is not None: + if not os.path.exists(sv_path + '/' + style_name): + os.makedirs(sv_path + '/' + style_name) + for j in range(stylized_images.shape[0]): + Image.fromarray(np.array(stylized_images[j] * 255, np.uint8)).save(sv_path + '/' + style_name + '/%03d.png' % j) + if save_geo: + np.savez(sv_path + '/' + style_name + '/%03d' % j, stylized_image=stylized_images[j]) + style_paths.append(sv_path + '/' + style_name) + style_features[i] = style_feature + style_images = np.stack(style_images) + + return style_names, style_paths, style_images, style_features + + +def get_rays(H, W, K, c2w, pixel_alignment=True): + i, j = torch.meshgrid(torch.linspace(0, W-1, W), torch.linspace(0, H-1, H)) # pytorch's meshgrid has indexing='ij' + i = i.t() + j = j.t() + if pixel_alignment: + i, j = i + .5, j + .5 + dirs = torch.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -torch.ones_like(i)], -1) + # Rotate ray directions from camera frame to the world frame + rays_d = torch.sum(dirs[..., np.newaxis, :] * c2w[:3, :3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs] + # Translate camera frame's origin to the world frame. It is the origin of all rays. + rays_o = c2w[:3, -1].expand(rays_d.shape) + return rays_o, rays_d + + +def get_rays_np(H, W, K, c2w, pixel_alignment=True): + i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing='xy') + if pixel_alignment: + i, j = i + .5, j + .5 + dirs = np.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -np.ones_like(i)], -1) + # Rotate ray directions from camera frame to the world frame + rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3, :3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs] + # Translate camera frame's origin to the world frame. It is the origin of all rays. + rays_o = np.broadcast_to(c2w[:3, -1], np.shape(rays_d)) + return rays_o, rays_d + + +def ndc_rays(H, W, focal, near, rays_o, rays_d): + # Shift ray origins to near plane + t = -(near + rays_o[..., 2]) / rays_d[..., 2] + rays_o = rays_o + t[..., None] * rays_d + + # Projection + o0 = -1./(W/(2.*focal)) * rays_o[..., 0] / rays_o[..., 2] + o1 = -1./(H/(2.*focal)) * rays_o[..., 1] / rays_o[..., 2] + o2 = 1. + 2. * near / rays_o[..., 2] + + d0 = -1./(W/(2.*focal)) * (rays_d[..., 0]/rays_d[..., 2] - rays_o[..., 0]/rays_o[..., 2]) + d1 = -1./(H/(2.*focal)) * (rays_d[..., 1]/rays_d[..., 2] - rays_o[..., 1]/rays_o[..., 2]) + d2 = -2. * near / rays_o[..., 2] + + rays_o = torch.stack([o0, o1, o2], -1) + rays_d = torch.stack([d0, d1, d2], -1) + + return rays_o, rays_d + + +def ndc_rays_np(H, W, focal, near, rays_o, rays_d): + # Shift ray origins to near plane + t = -(near + rays_o[..., 2]) / rays_d[..., 2] + rays_o = rays_o + t[..., None] * rays_d + + # Projection + o0 = -1./(W/(2.*focal)) * rays_o[..., 0] / rays_o[..., 2] + o1 = -1./(H/(2.*focal)) * rays_o[..., 1] / rays_o[..., 2] + o2 = 1. + 2. * near / rays_o[..., 2] + + d0 = -1./(W/(2.*focal)) * (rays_d[..., 0]/rays_d[..., 2] - rays_o[..., 0]/rays_o[..., 2]) + d1 = -1./(H/(2.*focal)) * (rays_d[..., 1]/rays_d[..., 2] - rays_o[..., 1]/rays_o[..., 2]) + d2 = -2. * near / rays_o[..., 2] + + rays_o = np.stack([o0, o1, o2], -1) + rays_d = np.stack([d0, d1, d2], -1) + + return rays_o, rays_d + + +class RaySampler(Dataset): + def __init__(self, data_path, factor=2., mode='train', valid_factor=3, dataset_type='llff', white_bkgd=False, half_res=True, no_ndc=False, pixel_alignment=False, spherify=False, TT_far=4.): + super().__init__() + + K = None + if dataset_type == 'llff': + images, poses, bds, render_poses, i_test = load_llff_data(data_path, factor, recenter=True, bd_factor=.75, spherify=spherify) + hwf = poses[0, :3, -1] + poses = poses[:, :3, :4] + print('Loaded llff', images.shape, render_poses.shape, hwf, data_path) + print('DEFINING BOUNDS') + if no_ndc: + near = np.ndarray.min(bds) * .9 + far = np.ndarray.max(bds) * 1. + else: + near = 0. + far = 1. + print('NEAR FAR', near, far) + else: + images = poses = hwf = K = near = far = None + print('Unknown dataset type', dataset_type, 'exiting') + exit(0) + + H, W, focal = hwf + H, W = int(H), int(W) + hwf = [H, W, focal] + + if K is None: + K = np.array([ + [focal, 0, 0.5*W], + [0, focal, 0.5*H], + [0, 0, 1] + ]) + + """Validation Rays""" + cps = np.concatenate([poses[:, :3, :4], np.zeros_like(poses[:, :1, :])], axis=1) + cps[:, 3, 3] = 1. + cps_valid = view_synthesis(cps, valid_factor) + print('get rays of training and validation') + rays_o, rays_d = np.zeros([cps.shape[0], H, W, 3]), np.zeros([cps.shape[0], H, W, 3]) + for i in tqdm(range(cps.shape[0])): + tmp_rays_o, tmp_rays_d = get_rays_np(H, W, K, cps[i, :3, :4], pixel_alignment) + rays_o[i] = tmp_rays_o + rays_d[i] = tmp_rays_d + rays_o_valid, rays_d_valid = np.zeros([cps_valid.shape[0], H, W, 3]), np.zeros([cps_valid.shape[0], H, W, 3]) + for i in tqdm(range(cps_valid.shape[0])): + tmp_rays_o, tmp_rays_d = get_rays_np(H, W, K, cps_valid[i, :3, :4], pixel_alignment) + rays_o_valid[i] = tmp_rays_o + rays_d_valid[i] = tmp_rays_d + + if dataset_type == 'llff' and not no_ndc: + rays_o, rays_d = ndc_rays_np(H, W, K[0][0], 1., rays_o, rays_d) + rays_o_valid, rays_d_valid = ndc_rays_np(H, W, K[0][0], 1., rays_o_valid, rays_d_valid) + + print('K:', K) + print('Camera Pose: ', cps.shape) + + """Setting Attributes""" + self.set_mode(mode) + self.frame_num = cps.shape[0] + self.h, self.w, self.f = H, W, focal + self.hwf = hwf + self.K = K + self.cx, self.cy = W / 2., H / 2. + self.cps, self.intr, self.images = cps, K, images + self.cps_valid = cps_valid + self.rays_num = self.frame_num * self.h * self.w + self.near, self.far = near, far + self.rays_o, self.rays_d = rays_o, rays_d + self.rays_o_valid, self.rays_d_valid = rays_o_valid, rays_d_valid + + def get_item_train(self, idx): + frame_id = idx // (self.h * self.w) + pixel_id = idx % (self.h * self.w) + hid, wid = pixel_id // self.w, pixel_id % self.w + rgb = self.images[frame_id, hid, wid] + ray_o = self.rays_o[frame_id, hid, wid] + ray_d = self.rays_d[frame_id, hid, wid] + return {'rgb_gt': rgb, 'rays_o': ray_o, 'rays_d': ray_d} + + def get_item_valid(self, idx): + frame_id = idx // (self.h * self.w) + pixel_id = idx % (self.h * self.w) + hid, wid = pixel_id // self.w, pixel_id % self.w + ray_o = self.rays_o[frame_id, hid, wid] + ray_d = self.rays_d[frame_id, hid, wid] + return {'rays_o': ray_o, 'rays_d': ray_d} + + def get_patch_train(self, fid, hid, wid, patch_size=32): + min_hid, min_wid = int(min(max(hid - patch_size / 2, 0), self.h - patch_size)), int(min(max(wid - patch_size / 2, 0), self.w - patch_size)) + max_hid, max_wid = min_hid + patch_size, min_wid + patch_size + hids, wids = np.meshgrid(np.arange(min_hid, max_hid), np.arange(min_wid, max_wid)) + hids, wids = hids.reshape([-1]), wids.reshape([-1]) + rgbs = torch.from_numpy(np.stack([self.images[fid, hids[i], wids[i]] for i in range(hids.shape[0])], axis=0)).float() + rays_o = torch.from_numpy(np.stack([self.rays_o[fid, hids[i], wids[i]] for i in range(hids.shape[0])], axis=0)).float() + rays_d = torch.from_numpy(np.stack([self.rays_d[fid, hids[i], wids[i]] for i in range(hids.shape[0])], axis=0)).float() + return {'rgb_gt': rgbs, 'ray_o': rays_o, 'rays_d': rays_d} + + def set_mode(self, mode='train'): + modes = ['train', 'valid', 'train_style', 'valid_style'] + if mode not in modes: + print('Unknown mode: ', mode, ' Only supports: ', modes) + exit(-1) + self.mode = mode + + def __getitem__(self, item): + func_dict = {'train': self.get_item_train, 'valid': self.get_item_valid} + return func_dict[self.mode](item) + + def __len__(self): + if self.mode == 'train': + return self.frame_num * self.w * self.h + else: + return self.cps_valid.shape[0] * self.w * self.h + + +class StyleRaySampler(Dataset): + def __init__(self, data_path, style_path, factor=2., mode='train', valid_factor=3, dataset_type='llff', white_bkgd=False, half_res=True, no_ndc=False, pixel_alignment=False, spherify=False, TT_far=4.): + super().__init__() + + K = None + if dataset_type == 'llff': + images, poses, bds, render_poses, i_test = load_llff_data(data_path, factor, recenter=True, bd_factor=.75, spherify=spherify) + hwf = poses[0, :3, -1] + poses = poses[:, :3, :4] + print('Loaded llff', images.shape, render_poses.shape, hwf, data_path) + print('DEFINING BOUNDS') + if no_ndc: + near = np.ndarray.min(bds) * .9 + far = np.ndarray.max(bds) * 1. + else: + near = 0. + far = 1. + print('NEAR FAR', near, far) + else: + images = poses = hwf = K = near = far = None + print('Unknown dataset type', dataset_type, 'exiting') + exit(0) + + H, W, focal = hwf + H, W = int(H), int(W) + hwf = [H, W, focal] + + if K is None: + K = np.array([ + [focal, 0, 0.5*W], + [0, focal, 0.5*H], + [0, 0, 1] + ]) + + """Validation Rays""" + cps = np.concatenate([poses[:, :3, :4], np.zeros_like(poses[:, :1, :])], axis=1) + cps[:, 3, 3] = 1. + cps_valid = view_synthesis(cps, valid_factor) + print('get rays of training and validation') + rays_o, rays_d = np.zeros([cps.shape[0], H, W, 3]), np.zeros([cps.shape[0], H, W, 3]) + for i in tqdm(range(cps.shape[0])): + tmp_rays_o, tmp_rays_d = get_rays_np(H, W, K, cps[i, :3, :4], pixel_alignment) + rays_o[i] = tmp_rays_o + rays_d[i] = tmp_rays_d + rays_o_valid, rays_d_valid = np.zeros([cps_valid.shape[0], H, W, 3]), np.zeros([cps_valid.shape[0], H, W, 3]) + for i in tqdm(range(cps_valid.shape[0])): + tmp_rays_o, tmp_rays_d = get_rays_np(H, W, K, cps_valid[i, :3, :4], pixel_alignment) + rays_o_valid[i] = tmp_rays_o + rays_d_valid[i] = tmp_rays_d + + if dataset_type == 'llff' and not no_ndc: + rays_o, rays_d = ndc_rays_np(H, W, K[0][0], 1., rays_o, rays_d) + rays_o_valid, rays_d_valid = ndc_rays_np(H, W, K[0][0], 1., rays_o_valid, rays_d_valid) + + """Style Data""" + if not os.path.exists(data_path + '/stylized_' + str(factor) + '/' + '/stylized_data.npz'): + print("Stylizing training data ...") + style_names, style_paths, style_images, style_features = style_data_prepare(style_path, images, size=512, chunk=8, sv_path=data_path + '/stylized_' + str(factor) + '/', decode_path='./pretrained/decoder.pth') + np.savez(data_path + '/stylized_' + str(factor) + '/' + '/stylized_data', style_names=style_names, style_paths=style_paths, style_images=style_images, style_features=style_features) + else: + print("Stylized data from " + data_path + '/stylized_' + str(factor) + '/' + '/stylized_data.npz') + stylized_data = np.load(data_path + '/stylized_' + str(factor) + '/' + '/stylized_data.npz', allow_pickle=True) + style_names, style_paths, style_images, style_features = stylized_data['style_names'], stylized_data['style_paths'], stylized_data['style_images'], stylized_data['style_features'] + print("Dataset Creation Done !") + + """Setting Attributes""" + self.set_mode(mode) + self.frame_num = cps.shape[0] + self.h, self.w, self.f = H, W, focal + self.hwf = hwf + self.K = K + self.cx, self.cy = K[0, 2], K[1, 2] + self.cps, self.intr, self.images = cps, K, images + self.cps_valid = cps_valid + self.rays_num = self.frame_num * self.h * self.w + self.near, self.far = near, far + + self.style_names = style_names + self.style_images = style_images + self.style_features = style_features + self.style_paths = style_paths + + self.style_num = self.style_images.shape[0] + self.rays_o, self.rays_d = rays_o, rays_d + self.rays_o_valid, self.rays_d_valid = rays_o_valid, rays_d_valid + + def get_item_train(self, idx): + frame_id = idx // (self.h * self.w) + pixel_id = idx % (self.h * self.w) + hid, wid = pixel_id // self.w, pixel_id % self.w + rgb = self.images[frame_id, hid, wid] + ray_o = self.rays_o[frame_id, hid, wid] + ray_d = self.rays_d[frame_id, hid, wid] + return {'rgb_gt': rgb, 'rays_o': ray_o, 'rays_d': ray_d} + + def get_item_train_style(self, idx): + style_id = idx // (self.frame_num * self.h * self.w) + frame_id = (idx % (self.frame_num * self.h * self.w)) // (self.h * self.w) + hid = (idx % (self.h * self.w)) // self.w + wid = idx % self.w + stylized_contents = np.load(self.style_paths[style_id] + '/%03d.npz' % frame_id)['stylized_image'] + rgb = stylized_contents[hid, wid] + rgb_origin = self.images[frame_id, hid, wid] + style_feature = self.style_features[style_id] + ray_o = self.rays_o[frame_id, hid, wid] + ray_d = self.rays_d[frame_id, hid, wid] + return {'rgb_gt': rgb, 'rays_o': ray_o, 'rays_d': ray_d, 'style_feature': style_feature, 'rgb_origin': rgb_origin, 'style_id': style_id, 'frame_id': frame_id} + + def get_item_valid(self, idx): + frame_id = idx // (self.h * self.w) + pixel_id = idx % (self.h * self.w) + hid, wid = pixel_id // self.w, pixel_id % self.w + ray_o = self.rays_o_valid[frame_id, hid, wid] + ray_d = self.rays_d_valid[frame_id, hid, wid] + return {'rays_o': ray_o, 'rays_d': ray_d} + + def get_item_valid_style(self, idx): + style_id = idx // (self.cps_valid.shape[0] * self.h * self.w) + frame_id = (idx % (self.cps_valid.shape[0] * self.h * self.w)) // (self.h * self.w) + hid = (idx % (self.h * self.w)) // self.w + wid = idx % self.w + ray_o = self.rays_o_valid[frame_id, hid, wid] + ray_d = self.rays_d_valid[frame_id, hid, wid] + style_image = torch.from_numpy(self.style_images[style_id]).float() + return {'rays_o': ray_o, 'rays_d': ray_d, 'style_image': style_image, 'style_id': style_id, 'frame_id': frame_id} + + def get_patch_train(self, fid, hid, wid, patch_size=32): + min_hid, min_wid = int(min(max(hid - patch_size / 2, 0), self.h - patch_size)), int(min(max(wid - patch_size / 2, 0), self.w - patch_size)) + max_hid, max_wid = min_hid + patch_size, min_wid + patch_size + hids, wids = np.meshgrid(np.arange(min_hid, max_hid), np.arange(min_wid, max_wid)) + hids, wids = hids.reshape([-1]), wids.reshape([-1]) + rgbs = torch.from_numpy(np.stack([self.images[fid, hids[i], wids[i]] for i in range(hids.shape[0])], axis=0)).float() + rays_o = torch.from_numpy(np.stack([self.rays_o[fid, hids[i], wids[i]] for i in range(hids.shape[0])], axis=0)).float() + rays_d = torch.from_numpy(np.stack([self.rays_d[fid, hids[i], wids[i]] for i in range(hids.shape[0])], axis=0)).float() + return {'rgb_gt': rgbs, 'ray_o': rays_o, 'rays_d': rays_d} + + def get_patch_train_style(self, style_id, fid, min_hid, min_wid, patch_size=32): + max_hid, max_wid = min_hid + patch_size, min_wid + patch_size + rgbs_origin = torch.from_numpy(self.images[fid, min_hid: max_hid, min_wid: max_wid]).float() + style_image = torch.from_numpy(self.style_images[style_id]).float() + rays_o = torch.from_numpy(self.rays_o[fid, min_hid: max_hid, min_wid: max_wid]).float() + rays_d = torch.from_numpy(self.rays_d[fid, min_hid: max_hid, min_wid: max_wid]).float() + style_id = torch.tensor(style_id).expand([patch_size**2]).long() + frame_id = torch.tensor(fid).expand([patch_size**2]).long() + content_image = torch.from_numpy(self.images[frame_id]).float() + return {'style_image': style_image, 'content_image': content_image, 'rays_o': rays_o, 'rays_d': rays_d, 'rgb_origin': rgbs_origin, 'style_id': style_id, 'frame_id': frame_id} + + def set_mode(self, mode='train'): + modes = ['train', 'valid', 'train_style', 'valid_style'] + if mode not in modes: + print('Unknown mode: ', mode, ' Only supports: ', modes) + exit(-1) + self.mode = mode + + def __getitem__(self, item): + func_dict = {'train': self.get_item_train, 'valid': self.get_item_valid, 'train_style': self.get_item_train_style, 'valid_style': self.get_item_valid_style} + return func_dict[self.mode](item) + + def __len__(self): + if self.mode == 'train': + return self.frame_num * self.w * self.h + elif self.mode == 'valid': + return self.cps_valid.shape[0] * self.w * self.h + elif self.mode == 'train_style': + return self.style_num * self.frame_num * self.w * self.h + else: + return self.style_num * self.cps_valid.shape[0] * self.w * self.h + + +def get_rays_from_id(hid, wid, focal, c2w, cx=None, cy=None): + dir = np.stack([(wid - cx) / focal, - (hid - cy) / focal, -np.ones_like(wid)], axis=-1) + ray_d = np.einsum('wc,c->w', c2w[:3, :3], dir) + ray_d = ray_d / np.linalg.norm(ray_d) + ray_o = c2w[:3, -1] + ray_o, ray_d = np.array(ray_o, dtype=np.float32), np.array(ray_d, dtype=np.float32) + return ray_o, ray_d + + +class StyleRaySampler_gen(Dataset): + def __init__(self, data_path, style_path, gen_path, factor=2., mode='train', valid_factor=0.05, dataset_type='llff', white_bkgd=False, half_res=True, no_ndc=False, pixel_alignment=False, spherify=False, decode_path='./pretrained/decoder.pth', store_rays=True, TT_far=4., collect_stylized_images=True): + super().__init__() + + K = None + if dataset_type == 'llff': + images, poses, bds, render_poses, i_test = load_llff_data(data_path, factor, recenter=True, bd_factor=.75, spherify=spherify) + hwf = poses[0, :3, -1] + poses = poses[:, :3, :4] + print('Loaded llff', images.shape, render_poses.shape, hwf, data_path) + print('DEFINING BOUNDS') + if no_ndc: + near = np.ndarray.min(bds) * .9 + far = np.ndarray.max(bds) * 1. + else: + near = 0. + far = 1. + print('NEAR FAR', near, far) + else: + poses = hwf = K = near = far = None + print('Unknown dataset type', dataset_type, 'exiting') + exit(0) + + H, W, focal = hwf + H, W = int(H), int(W) + hwf = [H, W, focal] + + if K is None: + K = np.array([ + [focal, 0, 0.5*W], + [0, focal, 0.5*H], + [0, 0, 1] + ]) + + self.gen_path = gen_path + self.image_paths = sorted(list(Path(self.gen_path).glob('rgb_*.png'))) + self.geo_paths = sorted(list(Path(self.gen_path).glob('geometry_*.npz'))) + data = np.load(str(self.geo_paths[0])) + self.hwf = data['hwf'] + frame_num = len(self.image_paths) + images = np.zeros([frame_num, H, W, 3], np.float32) + cps = np.zeros([frame_num, 4, 4], np.float32) + for i in range(frame_num): + images[i] = np.array(Image.open(str(self.image_paths[i])).convert('RGB'), dtype=np.float32) / 255. + cps[i] = np.load(str(self.geo_paths[i]))['cps'] + + """Validation Rays""" + cps_valid = view_synthesis(cps, valid_factor) + if store_rays: + print('get rays of training and validation') + rays_o, rays_d = np.zeros([cps.shape[0], H, W, 3]), np.zeros([cps.shape[0], H, W, 3]) + for i in tqdm(range(cps.shape[0])): + tmp_rays_o, tmp_rays_d = get_rays_np(H, W, K, cps[i, :3, :4], pixel_alignment) + rays_o[i] = tmp_rays_o + rays_d[i] = tmp_rays_d + rays_o_valid, rays_d_valid = np.zeros([cps_valid.shape[0], H, W, 3]), np.zeros([cps_valid.shape[0], H, W, 3]) + for i in tqdm(range(cps_valid.shape[0])): + tmp_rays_o, tmp_rays_d = get_rays_np(H, W, K, cps_valid[i, :3, :4], pixel_alignment) + rays_o_valid[i] = tmp_rays_o + rays_d_valid[i] = tmp_rays_d + + if dataset_type == 'llff' and not no_ndc: + rays_o, rays_d = ndc_rays_np(H, W, K[0][0], 1., rays_o, rays_d) + rays_o_valid, rays_d_valid = ndc_rays_np(H, W, K[0][0], 1., rays_o_valid, rays_d_valid) + else: + rays_o, rays_d, rays_o_valid, rays_d_valid = None, None, None, None + + """Style Data""" + if not os.path.exists(data_path + '/stylized_gen_' + str(factor) + '/' + '/stylized_data.npz'): + print("Stylizing training data ...") + style_names, style_paths, style_images, style_features = style_data_prepare(style_path, images, size=512, chunk=8, sv_path=data_path + '/stylized_gen_' + str(factor) + '/', decode_path=decode_path) + np.savez(data_path + '/stylized_gen_' + str(factor) + '/' + '/stylized_data', style_names=style_names, style_paths=style_paths, style_images=style_images, style_features=style_features) + else: + print("Stylized data from " + data_path + '/stylized_gen_' + str(factor) + '/' + '/stylized_data.npz') + stylized_data = np.load(data_path + '/stylized_gen_' + str(factor) + '/' + '/stylized_data.npz', allow_pickle=True) + style_names, style_paths, style_images, style_features = stylized_data['style_names'][()], stylized_data['style_paths'], stylized_data['style_images'], stylized_data['style_features'] + + """Setting Attributes""" + self.set_mode(mode) + self.frame_num = cps.shape[0] + self.h, self.w, self.f = H, W, focal + self.hwf = hwf + self.K = K + self.cx, self.cy = K[0, 2], K[1, 2] + self.cps, self.intr, self.images = cps, K, images + self.cps_valid = cps_valid + self.rays_num = self.frame_num * self.h * self.w + self.near, self.far = near, far + + self.style_names = style_names + self.style_names_t = {y: x for x, y in self.style_names.items()} + self.style_images = style_images + self.style_paths = style_paths + self.style_features = style_features + self.style_num = self.style_images.shape[0] + + self.store_rays = store_rays + self.is_ndc = (dataset_type == 'llff' and not no_ndc) + self.rays_o, self.rays_d = rays_o, rays_d + self.rays_o_valid, self.rays_d_valid = rays_o_valid, rays_d_valid + self.stylized_images_uint8 = None + if collect_stylized_images: + self.collect_all_stylized_images() + print("Dataset Creation Done !") + + def collect_all_stylized_images(self): + print(self.style_names.keys()) + if self.stylized_images_uint8 is not None: + return + self.stylized_images_uint8 = np.zeros([self.style_num, self.frame_num, self.h, self.w, 3], dtype=np.uint8) + for i in range(self.style_num): + print('Collecting style: ' + self.style_names_t[i]) + for j in tqdm(range(self.frame_num)): + img = np.array(Image.open(self.style_paths[i] + '/%03d.png' % j).convert('RGB'), np.uint8) + self.stylized_images_uint8[i, j] = img + + def get_item_train(self, idx): + frame_id = idx // (self.h * self.w) + pixel_id = idx % (self.h * self.w) + hid, wid = pixel_id // self.w, pixel_id % self.w + rgb = self.images[frame_id, hid, wid] + if self.store_rays: + ray_o = self.rays_o[frame_id, hid, wid] + ray_d = self.rays_d[frame_id, hid, wid] + else: + ray_o, ray_d = get_rays_from_id(hid, wid, self.f, self.cps[frame_id], self.cx, self.cy) + if self.is_ndc: + ray_o, ray_d = ndc_rays_np(self.h, self.w, self.f, 1., ray_o[np.newaxis], ray_d[np.newaxis]) + ray_o, ray_d = ray_o[0], ray_d[0] + return {'rgb_gt': rgb, 'rays_o': ray_o, 'rays_d': ray_d} + + def get_item_train_style(self, idx): + style_id = idx // (self.frame_num * self.h * self.w) + frame_id = (idx % (self.frame_num * self.h * self.w)) // (self.h * self.w) + hid = (idx % (self.h * self.w)) // self.w + wid = idx % self.w + if self.stylized_images_uint8 is None: + stylized_contents = np.load(self.style_paths[style_id] + '/%03d.npz' % frame_id)['stylized_image'] + rgb = stylized_contents[hid, wid] + else: + rgb = np.float32(self.stylized_images_uint8[style_id, frame_id, hid, wid]) / 255 + rgb_origin = self.images[frame_id, hid, wid] + style_feature = self.style_features[style_id] + if self.store_rays: + ray_o = self.rays_o[frame_id, hid, wid] + ray_d = self.rays_d[frame_id, hid, wid] + else: + ray_o, ray_d = get_rays_from_id(hid, wid, self.f, self.cps[frame_id], self.cx, self.cy) + if self.is_ndc: + ray_o, ray_d = ndc_rays_np(self.h, self.w, self.f, 1., ray_o[np.newaxis], ray_d[np.newaxis]) + ray_o, ray_d = ray_o[0], ray_d[0] + return {'rgb_gt': rgb, 'rays_o': ray_o, 'rays_d': ray_d, 'style_feature': style_feature, 'rgb_origin': rgb_origin, 'style_id': style_id, 'frame_id': frame_id} + + def get_item_valid(self, idx): + frame_id = idx // (self.h * self.w) + pixel_id = idx % (self.h * self.w) + hid, wid = pixel_id // self.w, pixel_id % self.w + if self.store_rays: + ray_o = self.rays_o_valid[frame_id, hid, wid] + ray_d = self.rays_d_valid[frame_id, hid, wid] + else: + ray_o, ray_d = get_rays_from_id(hid, wid, self.f, self.cps_valid[frame_id], self.cx, self.cy) + if self.is_ndc: + ray_o, ray_d = ndc_rays_np(self.h, self.w, self.f, 1., ray_o[np.newaxis], ray_d[np.newaxis]) + ray_o, ray_d = ray_o[0], ray_d[0] + return {'rays_o': ray_o, 'rays_d': ray_d} + + def get_item_valid_style(self, idx): + style_id = idx // (self.cps_valid.shape[0] * self.h * self.w) + frame_id = (idx % (self.cps_valid.shape[0] * self.h * self.w)) // (self.h * self.w) + hid = (idx % (self.h * self.w)) // self.w + wid = idx % self.w + style_feature = self.style_features[style_id] + if self.store_rays: + ray_o = self.rays_o_valid[frame_id, hid, wid] + ray_d = self.rays_d_valid[frame_id, hid, wid] + else: + ray_o, ray_d = get_rays_from_id(hid, wid, self.f, self.cps_valid[frame_id], self.cx, self.cy) + if self.is_ndc: + ray_o, ray_d = ndc_rays_np(self.h, self.w, self.f, 1., ray_o[np.newaxis], ray_d[np.newaxis]) + ray_o, ray_d = ray_o[0], ray_d[0] + return {'rays_o': ray_o, 'rays_d': ray_d, 'style_feature': style_feature, 'style_id': style_id, 'frame_id': frame_id} + + def get_patch_train(self, fid, hid, wid, patch_size=32): + min_hid, min_wid = int(min(max(hid - patch_size / 2, 0), self.h - patch_size)), int(min(max(wid - patch_size / 2, 0), self.w - patch_size)) + max_hid, max_wid = min_hid + patch_size, min_wid + patch_size + hids, wids = np.meshgrid(np.arange(min_hid, max_hid), np.arange(min_wid, max_wid)) + hids, wids = hids.reshape([-1]), wids.reshape([-1]) + rgbs = torch.from_numpy(np.stack([self.images[fid, hids[i], wids[i]] for i in range(hids.shape[0])], axis=0)).float() + if self.store_rays: + rays_o = np.stack([self.rays_o[fid, hids[i], wids[i]] for i in range(hids.shape[0])], axis=0) + rays_d = np.stack([self.rays_d[fid, hids[i], wids[i]] for i in range(hids.shape[0])], axis=0) + else: + rays_od = np.stack([get_rays_from_id(hids[i], wids[i], self.f, self.cps[fid], self.cx, self.cy) for i in range(hids.shape[0])]) + rays_o, rays_d = rays_od[:, 0], rays_od[:, 1] + if self.is_ndc: + rays_o, rays_d = ndc_rays_np(self.h, self.w, self.f, 1., rays_o, rays_d) + rays_o = torch.from_numpy(rays_o).float() + rays_d = torch.from_numpy(rays_d).float() + return {'rgb_gt': rgbs, 'ray_o': rays_o, 'rays_d': rays_d} + + def get_patch_train_style(self, style_id, fid, hid, wid, patch_size=32): + min_hid, min_wid = int(min(max(hid - patch_size / 2, 0), self.h - patch_size)), int(min(max(wid - patch_size / 2, 0), self.w - patch_size)) + max_hid, max_wid = min_hid + patch_size, min_wid + patch_size + hids, wids = np.meshgrid(np.arange(min_hid, max_hid), np.arange(min_wid, max_wid)) + hids, wids = hids.T.reshape([-1]), wids.T.reshape([-1]) # .T to keep the orientation of the image + if self.stylized_images_uint8 is None: + stylized_contents = np.load(self.style_paths[style_id] + '/%03d.npz' % fid)['stylized_image'] + rgbs = torch.from_numpy(np.stack([stylized_contents[hids[i], wids[i]] for i in range(patch_size ** 2)], axis=0)).float() + else: + rgbs = torch.from_numpy(np.stack([np.float32(self.stylized_images_uint8[style_id, fid, hids[i], wids[i]]) / 255 for i in range(patch_size ** 2)], axis=0)).float() + rgbs_origin = torch.from_numpy(np.stack([self.images[fid, hids[i], wids[i]] for i in range(hids.shape[0])], axis=0)).float() + if self.store_rays: + rays_o = np.stack([self.rays_o[fid, hids[i], wids[i]] for i in range(hids.shape[0])], axis=0) + rays_d = np.stack([self.rays_d[fid, hids[i], wids[i]] for i in range(hids.shape[0])], axis=0) + else: + rays_od = np.stack([get_rays_from_id(hids[i], wids[i], self.f, self.cps[fid], self.cx, self.cy) for i in range(hids.shape[0])]) + rays_o, rays_d = rays_od[:, 0], rays_od[:, 1] + if self.is_ndc: + rays_o, rays_d = ndc_rays_np(self.h, self.w, self.f, 1., rays_o, rays_d) + rays_o = torch.from_numpy(rays_o).float() + rays_d = torch.from_numpy(rays_d).float() + style_image = torch.from_numpy(self.style_images[style_id:style_id+1]).float() + style_id = torch.tensor(style_id).expand([patch_size**2]).long() + frame_id = torch.tensor(fid).expand([patch_size**2]).long() + return {'style_image': style_image, 'rgb_gt': rgbs, 'rays_o': rays_o, 'rays_d': rays_d, 'rgb_origin': rgbs_origin, 'style_id': style_id, 'frame_id': frame_id} + + def set_mode(self, mode='train'): + modes = ['train', 'valid', 'train_style', 'valid_style'] + if mode not in modes: + print('Unknown mode: ', mode, ' Only supports: ', modes) + exit(-1) + self.mode = mode + + def __getitem__(self, item): + func_dict = {'train': self.get_item_train, 'valid': self.get_item_valid, 'train_style': self.get_item_train_style, 'valid_style': self.get_item_valid_style} + return func_dict[self.mode](item) + + def __len__(self): + if self.mode == 'train': + return self.frame_num * self.w * self.h + elif self.mode == 'valid': + return self.cps_valid.shape[0] * self.w * self.h + elif self.mode == 'train_style': + return self.style_num * self.frame_num * self.w * self.h + else: + return self.style_num * self.cps_valid.shape[0] * self.w * self.h + + +class LightDataLoader: + def __init__(self, dataset, batch_size, shuffle=True, **kwargs): + self.dataset = dataset + self.batch_size = batch_size + self.shuffle = shuffle + self.data_num = len(dataset) + self.data_idx = np.arange(self.data_num) + if self.shuffle: + np.random.shuffle(self.data_idx) + self.start = 0 + data0 = self.dataset.__getitem__(0) + self.keys = data0.keys() + + def get_batch(self): + if self.batch_size >= self.data_num: + idx = np.random.choice(self.data_idx, self.batch_size, replace=True) + # Initialize + batch_data = {} + for key in self.keys: + batch_data[key] = [] + # Append data + for i in range(self.batch_size): + data = self.dataset.__getitem__(idx[i]) + for key in data.keys(): + batch_data[key].append(data[key]) + self.start += self.batch_size + # To tensor + for key in self.keys: + batch_data[key] = torch.from_numpy(np.stack(batch_data[key])) + return batch_data + + # Check if shuffle again + if self.start + self.batch_size >= self.data_num: + self.start = 0 + np.random.shuffle(self.data_idx) + # Initialize + batch_data = {} + for key in self.keys: + batch_data[key] = [] + # Append data + for i in range(self.batch_size): + data = self.dataset.__getitem__(self.data_idx[self.start + i]) + for key in data.keys(): + batch_data[key].append(data[key]) + self.start += self.batch_size + # To tensor + for key in self.keys: + batch_data[key] = torch.from_numpy(np.stack(batch_data[key])).float() + return batch_data + diff --git a/contrib/StylizedNeRF/load_llff.py b/contrib/StylizedNeRF/load_llff.py new file mode 100644 index 00000000..f533f2ac --- /dev/null +++ b/contrib/StylizedNeRF/load_llff.py @@ -0,0 +1,313 @@ +import numpy as np +import os, imageio + + +########## Slightly modified version of LLFF data loading code +########## see https://github.com/Fyusion/LLFF for original + +def _minify(basedir, factors=[], resolutions=[]): + needtoload = False + for r in factors: + imgdir = os.path.join(basedir, 'images_{}'.format(r)) + if not os.path.exists(imgdir): + needtoload = True + for r in resolutions: + imgdir = os.path.join(basedir, 'images_{}x{}'.format(r[1], r[0])) + if not os.path.exists(imgdir): + needtoload = True + if not needtoload: + return + + from shutil import copy + from subprocess import check_output + + imgdir = os.path.join(basedir, 'images') + imgs = [os.path.join(imgdir, f) for f in sorted(os.listdir(imgdir))] + imgs = [f for f in imgs if any([f.endswith(ex) for ex in ['JPG', 'jpg', 'png', 'jpeg', 'PNG']])] + imgdir_orig = imgdir + + wd = os.getcwd() + + for r in factors + resolutions: + if isinstance(r, int) or isinstance(r, float): + name = 'images_{}'.format(r) + resizearg = '{}%'.format(100./r) + else: + name = 'images_{}x{}'.format(r[1], r[0]) + resizearg = '{}x{}'.format(r[1], r[0]) + imgdir = os.path.join(basedir, name) + if os.path.exists(imgdir): + continue + + print('Minifying', r, basedir) + + os.makedirs(imgdir) + check_output('cp {}/* {}'.format(imgdir_orig, imgdir), shell=True) + + ext = imgs[0].split('.')[-1] + args = ' '.join(['mogrify', '-resize', resizearg, '-format', 'png', '*.{}'.format(ext)]) + print(args) + os.chdir(imgdir) + check_output(args, shell=True) + os.chdir(wd) + + if ext != 'png': + check_output('rm {}/*.{}'.format(imgdir, ext), shell=True) + print('Removed duplicates') + print('Done') + + +def _load_data(basedir, factor=None, width=None, height=None, load_imgs=True): + + poses_arr = np.load(os.path.join(basedir, 'poses_bounds.npy')) + poses = poses_arr[:, :-2].reshape([-1, 3, 5]).transpose([1,2,0]) + bds = poses_arr[:, -2:].transpose([1,0]) + + img0 = [os.path.join(basedir, 'images', f) for f in sorted(os.listdir(os.path.join(basedir, 'images'))) \ + if f.endswith('JPG') or f.endswith('jpg') or f.endswith('png')][0] + sh = imageio.imread(img0).shape + + sfx = '' + + if factor is not None: + sfx = '_{}'.format(factor) + _minify(basedir, factors=[factor]) + factor = factor + elif height is not None: + factor = sh[0] / float(height) + width = int(sh[1] / factor) + _minify(basedir, resolutions=[[height, width]]) + sfx = '_{}x{}'.format(width, height) + elif width is not None: + factor = sh[1] / float(width) + height = int(sh[0] / factor) + _minify(basedir, resolutions=[[height, width]]) + sfx = '_{}x{}'.format(width, height) + else: + factor = 1 + + imgdir = os.path.join(basedir, 'images' + sfx) + if not os.path.exists(imgdir): + print( imgdir, 'does not exist, returning' ) + return + + imgfiles = [os.path.join(imgdir, f) for f in sorted(os.listdir(imgdir)) if f.endswith('JPG') or f.endswith('jpg') or f.endswith('png')] + if poses.shape[-1] != len(imgfiles): + print( 'Mismatch between imgs {} and poses {} !!!!'.format(len(imgfiles), poses.shape[-1]) ) + return + + sh = imageio.imread(imgfiles[0]).shape + poses[:2, 4, :] = np.array(sh[:2]).reshape([2, 1]) + poses[2, 4, :] = poses[2, 4, :] * 1./factor + + if not load_imgs: + return poses, bds + + def imread(f): + if f.endswith('png'): + return imageio.imread(f, ignoregamma=True) + else: + return imageio.imread(f) + + imgs = [imread(f)[...,:3]/255. for f in imgfiles] + imgs = np.stack(imgs, -1) + + print('Loaded image data', imgs.shape, poses[:,-1,0]) + return poses, bds, imgs + + + + + + +def normalize(x): + return x / np.linalg.norm(x) + +def viewmatrix(z, up, pos): + vec2 = normalize(z) + vec1_avg = up + vec0 = normalize(np.cross(vec1_avg, vec2)) + vec1 = normalize(np.cross(vec2, vec0)) + m = np.stack([vec0, vec1, vec2, pos], 1) + return m + +def ptstocam(pts, c2w): + tt = np.matmul(c2w[:3,:3].T, (pts-c2w[:3,3])[...,np.newaxis])[...,0] + return tt + +def poses_avg(poses): + + hwf = poses[0, :3, -1:] + + center = poses[:, :3, 3].mean(0) + vec2 = normalize(poses[:, :3, 2].sum(0)) + up = poses[:, :3, 1].sum(0) + c2w = np.concatenate([viewmatrix(vec2, up, center), hwf], 1) + + return c2w + + +def render_path_spiral(c2w, up, rads, focal, zdelta, zrate, rots, N): + render_poses = [] + rads = np.array(list(rads) + [1.]) + hwf = c2w[:,4:5] + + for theta in np.linspace(0., 2. * np.pi * rots, N+1)[:-1]: + c = np.dot(c2w[:3,:4], np.array([np.cos(theta), -np.sin(theta), -np.sin(theta*zrate), 1.]) * rads) + z = normalize(c - np.dot(c2w[:3,:4], np.array([0,0,-focal, 1.]))) + render_poses.append(np.concatenate([viewmatrix(z, up, c), hwf], 1)) + return render_poses + + + +def recenter_poses(poses): + + poses_ = poses+0 + bottom = np.reshape([0,0,0,1.], [1,4]) + c2w = poses_avg(poses) + c2w = np.concatenate([c2w[:3,:4], bottom], -2) + bottom = np.tile(np.reshape(bottom, [1,1,4]), [poses.shape[0],1,1]) + poses = np.concatenate([poses[:,:3,:4], bottom], -2) + + poses = np.linalg.inv(c2w) @ poses + poses_[:,:3,:4] = poses[:,:3,:4] + poses = poses_ + return poses + + +##################### + + +def spherify_poses(poses, bds): + + p34_to_44 = lambda p : np.concatenate([p, np.tile(np.reshape(np.eye(4)[-1,:], [1,1,4]), [p.shape[0], 1,1])], 1) + + rays_d = poses[:,:3,2:3] + rays_o = poses[:,:3,3:4] + + def min_line_dist(rays_o, rays_d): + A_i = np.eye(3) - rays_d * np.transpose(rays_d, [0,2,1]) + b_i = -A_i @ rays_o + pt_mindist = np.squeeze(-np.linalg.inv((np.transpose(A_i, [0,2,1]) @ A_i).mean(0)) @ (b_i).mean(0)) + return pt_mindist + + pt_mindist = min_line_dist(rays_o, rays_d) + + center = pt_mindist + up = (poses[:,:3,3] - center).mean(0) + + vec0 = normalize(up) + vec1 = normalize(np.cross([.1,.2,.3], vec0)) + vec2 = normalize(np.cross(vec0, vec1)) + pos = center + c2w = np.stack([vec1, vec2, vec0, pos], 1) + + poses_reset = np.linalg.inv(p34_to_44(c2w[None])) @ p34_to_44(poses[:,:3,:4]) + + rad = np.sqrt(np.mean(np.sum(np.square(poses_reset[:,:3,3]), -1))) + + sc = 1./rad + poses_reset[:,:3,3] *= sc + bds *= sc + rad *= sc + + centroid = np.mean(poses_reset[:,:3,3], 0) + zh = centroid[2] + radcircle = np.sqrt(rad**2-zh**2) + new_poses = [] + + for th in np.linspace(0.,2.*np.pi, 120): + + camorigin = np.array([radcircle * np.cos(th), radcircle * np.sin(th), zh]) + up = np.array([0,0,-1.]) + + vec2 = normalize(camorigin) + vec0 = normalize(np.cross(vec2, up)) + vec1 = normalize(np.cross(vec2, vec0)) + pos = camorigin + p = np.stack([vec0, vec1, vec2, pos], 1) + + new_poses.append(p) + + new_poses = np.stack(new_poses, 0) + + new_poses = np.concatenate([new_poses, np.broadcast_to(poses[0,:3,-1:], new_poses[:,:3,-1:].shape)], -1) + poses_reset = np.concatenate([poses_reset[:,:3,:4], np.broadcast_to(poses[0,:3,-1:], poses_reset[:,:3,-1:].shape)], -1) + + return poses_reset, new_poses, bds + + +def load_llff_data(basedir, factor=8, recenter=True, bd_factor=.75, spherify=False, path_zflat=False): + + poses, bds, imgs = _load_data(basedir, factor=factor) # factor=8 downsamples original imgs by 8x + print('Loaded', basedir, bds.min(), bds.max()) + + # Correct rotation matrix ordering and move variable dim to axis 0 + poses = np.concatenate([poses[:, 1:2, :], -poses[:, 0:1, :], poses[:, 2:, :]], 1) + poses = np.moveaxis(poses, -1, 0).astype(np.float32) + imgs = np.moveaxis(imgs, -1, 0).astype(np.float32) + images = imgs + bds = np.moveaxis(bds, -1, 0).astype(np.float32) + + # Rescale if bd_factor is provided + sc = 1. if bd_factor is None else 1./(bds.min() * bd_factor) + poses[:,:3,3] *= sc + bds *= sc + + if recenter: + poses = recenter_poses(poses) + + if spherify: + poses, render_poses, bds = spherify_poses(poses, bds) + + else: + + c2w = poses_avg(poses) + print('recentered', c2w.shape) + print(c2w[:3,:4]) + + ## Get spiral + # Get average pose + up = normalize(poses[:, :3, 1].sum(0)) + + # Find a reasonable "focus depth" for this dataset + close_depth, inf_depth = bds.min()*.9, bds.max()*5. + dt = .75 + mean_dz = 1./((1.-dt)/close_depth + dt/inf_depth) + focal = mean_dz + + # Get radii for spiral path + shrink_factor = .8 + zdelta = close_depth * .2 + tt = poses[:, :3, 3] # ptstocam(poses[:3,3,:].T, c2w).T + rads = np.percentile(np.abs(tt), 90, 0) + c2w_path = c2w + N_views = 120 + N_rots = 2 + if path_zflat: + zloc = -close_depth * .1 + c2w_path[:3, 3] = c2w_path[:3, 3] + zloc * c2w_path[:3, 2] + rads[2] = 0. + N_rots = 1 + N_views /= 2 + + # Generate poses for spiral path + render_poses = render_path_spiral(c2w_path, up, rads, focal, zdelta, zrate=.5, rots=N_rots, N=N_views) + + render_poses = np.array(render_poses).astype(np.float32) + + c2w = poses_avg(poses) + print('Data:') + print(poses.shape, images.shape, bds.shape) + + dists = np.sum(np.square(c2w[:3,3] - poses[:,:3,3]), -1) + i_test = np.argmin(dists) + print('HOLDOUT view is', i_test) + + images = images.astype(np.float32) + poses = poses.astype(np.float32) + + return images, poses, bds, render_poses, i_test + + + diff --git a/contrib/StylizedNeRF/models_jt.py b/contrib/StylizedNeRF/models_jt.py new file mode 100644 index 00000000..b7dda911 --- /dev/null +++ b/contrib/StylizedNeRF/models_jt.py @@ -0,0 +1,749 @@ +import torch +import numpy as np + +import torch.nn as torch_nn +from torch.nn import Parameter +import torch.nn.functional as F +from collections import OrderedDict +from scipy.spatial.transform import Rotation as R + +import platform + +# torch.set_default_tensor_type('torch.cuda.FloatTensor') + +import jittor as jt +from jittor import Module +from jittor import nn + + +class Sine(Module): + def __init(self, w0=30.): + super().__init__() + self.w0 = w0 + + def forward(self, input): + return jt.sin(self.w0 * input) + + +act_dict = {'relu': nn.ReLU, 'sigmoid': nn.Sigmoid, 'elu': nn.ELU, 'tanh': nn.Tanh, 'sine': Sine} + + +class Embedder(Module): + def __init__(self, input_dim, max_freq_log2, N_freqs, + log_sampling=True, include_input=True, + periodic_fns=(jt.sin, jt.cos)): + ''' + :param input_dim: dimension of input to be embedded + :param max_freq_log2: log2 of max freq; min freq is 1 by default + :param N_freqs: number of frequency bands + :param log_sampling: if True, frequency bands are linerly sampled in log-space + :param include_input: if True, raw input is included in the embedding + :param periodic_fns: periodic functions used to embed input + ''' + super().__init__() + + self.input_dim = input_dim + self.include_input = include_input + self.periodic_fns = periodic_fns + + self.out_dim = 0 + if self.include_input: + self.out_dim += self.input_dim + + self.out_dim += self.input_dim * N_freqs * len(self.periodic_fns) + + if log_sampling: + self.freq_bands = 2. ** np.linspace(0., max_freq_log2, N_freqs) + else: + self.freq_bands = np.linspace(2. ** 0., 2. ** max_freq_log2, N_freqs) + + self.freq_bands = self.freq_bands.tolist() + + def execute(self, x): + ''' + :param x: tensor of shape [..., self.input_dim] + :return: tensor of shape [..., self.out_dim] + ''' + assert (x.shape[-1] == self.input_dim) + + out = [] + if self.include_input: + out.append(x) + + for i in range(len(self.freq_bands)): + freq = self.freq_bands[i] + for p_fn in self.periodic_fns: + out.append(p_fn(x * freq)) + out = jt.concat(out, dim=-1) + + assert (out.shape[-1] == self.out_dim) + return out + + +class MLP(Module): + def __init__(self, D=8, W=256, input_ch=3, input_ch_viewdirs=3, skips=[4], act_func=nn.ReLU, use_viewdir=True, + sigma_mul=0.): + ''' + :param D: network depth + :param W: network width + :param input_ch: input channels for encodings of (x, y, z) + :param input_ch_viewdirs: input channels for encodings of view directions + :param skips: skip connection in network + ''' + super().__init__() + self.input_ch = input_ch + self.input_ch_viewdirs = input_ch_viewdirs + self.skips = skips + self.use_viewdir = use_viewdir + self.sigma_mul = sigma_mul + + # base + self.base_layers = [] + dim = self.input_ch + for i in range(D): + self.base_layers.append(nn.Linear(in_features=dim, out_features=W, bias=True)) + dim = W + if i in self.skips and i != (D - 1): # skip connection after i^th layer + dim += input_ch + self.base_layers = nn.ModuleList(self.base_layers) + self.act = act_func() + + # sigma + sigma_layer = nn.Linear(dim, 1) # sigma must be positive + self.sigma_layer = sigma_layer + + # remap + base_remap_layer = nn.Linear(dim, 256) + self.base_remap_layer = base_remap_layer + + # rgb + self.rgb_layers = [] + dim = 256 + self.input_ch_viewdirs if self.use_viewdir else 256 + self.rgb_layers.append(nn.Linear(dim, W // 2)) + self.rgb_layers.append(nn.Linear(W // 2, 3)) + self.rgb_layers = nn.ModuleList(self.rgb_layers) + + self.layers = [*self.base_layers, self.sigma_layer, self.base_remap_layer, *self.rgb_layers] + + def execute(self, pts, dirs): + ''' + :param input: [..., input_ch+input_ch_viewdirs] + :return [..., 4] + ''' + base = self.base_layers[0](pts) + for i in range(len(self.base_layers) - 1): + if i in self.skips: + base = torch.cat((pts, base), dim=-1) + base = self.act(self.base_layers[i + 1](base)) + + sigma = self.sigma_layer(base) + sigma = sigma + nn.relu(sigma) * self.sigma_mul + + base_remap = self.act(self.base_remap_layer(base)) + if self.use_viewdir: + rgb_fea = self.act(self.rgb_layers[0](torch.cat((base_remap, dirs), dim=-1))) + else: + rgb_fea = self.act(self.rgb_layers[0](base_remap)) + rgb = jt.sigmoid(self.rgb_layers[1](rgb_fea)) + + ret = OrderedDict([('rgb', rgb), + ('sigma', sigma.squeeze(-1))]) + return ret + + def get_grads(self, only_last=False): + if only_last: + layers = [self.layers[-1], self.layers[-4]] + else: + layers = self.layers + grads = None + for layer in layers: + grad = layer.get_grads() + grads = grad if grads is None else np.concatenate([grads, grad], axis=-1) + return grads + + +class MLP_style(Module): + def __init__(self, D=8, W=256, input_ch=3, input_ch_viewdirs=3, skips=[4], act_func=nn.ReLU(), use_viewdir=True, + sigma_mul=0., enable_style=False): + ''' + :param D: network depth + :param W: network width + :param input_ch: input channels for encodings of (x, y, z) + :param input_ch_viewdirs: input channels for encodings of view directions + :param skips: skip connection in network + ''' + super().__init__() + self.input_ch = input_ch + self.input_ch_viewdirs = input_ch_viewdirs + self.skips = skips + self.use_viewdir = use_viewdir + self.sigma_mul = sigma_mul + self.enable_style = enable_style + self.act = act_func() + + # base + self.base_layers = [] + dim = self.input_ch + for i in range(D): + self.base_layers.append(nn.Linear(in_features=dim, out_features=W)) + dim = W + if i in self.skips and i != (D - 1): # skip connection after i^th layer + dim += input_ch + self.base_layers = nn.ModuleList(self.base_layers) + + # sigma + sigma_layer = nn.Linear(dim, 1) # sigma must be positive + self.sigma_layer = sigma_layer + + # remap + base_remap_layer = nn.Linear(dim, 256) + self.base_remap_layer = base_remap_layer + + # rgb + self.rgb_layers = [] + dim = 256 + self.input_ch_viewdirs if self.use_viewdir else 256 + self.rgb_layers.append(nn.Linear(dim, W // 2)) + self.rgb_layers.append(nn.Linear(W // 2, 3)) + self.rgb_layers = nn.ModuleList(self.rgb_layers) + + self.layers = [*self.base_layers, self.sigma_layer, self.base_remap_layer, *self.rgb_layers] + + def execute(self, **kwargs): + pts, dirs = kwargs['pts'], kwargs['dirs'] + base = self.act(self.base_layers[0](pts)) + for i in range(len(self.base_layers) - 1): + if i in self.skips: + base = jt.concat((pts, base), dim=-1) + base = self.act(self.base_layers[i + 1](base)) + + sigma = self.sigma_layer(base) + sigma = sigma + jt.nn.relu(sigma) * self.sigma_mul + + base_remap = self.act(self.base_remap_layer(base)) + if self.use_viewdir: + rgb_fea = self.act(self.rgb_layers[0](jt.concat((base_remap, dirs), dim=-1))) + else: + rgb_fea = self.act(self.rgb_layers[0](base_remap)) + rgb = jt.sigmoid(self.rgb_layers[1](rgb_fea)) + + if self.enable_style: + ret = OrderedDict([('rgb', rgb), + # ('base', base), # for base input style nerf + ('pts', pts), + ('sigma', sigma.squeeze(-1))]) + return ret + else: + ret = OrderedDict([('rgb', rgb), + ('sigma', sigma.squeeze(-1))]) + return ret + + +class Nerf(Module): + def __init__(self, args, mode='coarse'): + super().__init__() + self.use_viewdir = args.use_viewdir + """Activation Function""" + act_func = act_dict[args.act_type] + self.is_siren = (args.act_type == 'sine') + + """Embedding""" + if not self.is_siren: + self.embedder_coor = Embedder(input_dim=3, max_freq_log2=args.embed_freq_coor - 1, + N_freqs=args.embed_freq_coor) + self.embedder_dir = Embedder(input_dim=3, max_freq_log2=args.embed_freq_dir - 1, + N_freqs=args.embed_freq_dir) + input_ch, input_ch_viewdirs = self.embedder_coor.out_dim, self.embedder_dir.out_dim + skips = [4] + self.sigma_mul = 0. + else: + input_ch, input_ch_viewdirs = 3, 3 + skips = [] + self.sigma_mul = args.siren_sigma_mul + + """Neural Network""" + if mode == 'coarse': + net_depth, net_width = args.netdepth, args.netwidth + else: + net_depth, net_width = args.netdepth_fine, args.netwidth_fine + + self.net = MLP(D=net_depth, W=net_width, input_ch=input_ch, input_ch_viewdirs=input_ch_viewdirs, + skips=skips, use_viewdir=self.use_viewdir, act_func=act_func, sigma_mul=self.sigma_mul) + + def execute(self, pts, dirs): + if not self.is_siren: + pts = self.embedder_coor(pts) + dirs = self.embedder_dir(dirs) + ret = self.net(pts, dirs) + return ret + + +class StyleMLP(Module): + def __init__(self, args): + super().__init__() + self.D = args.style_D + self.input_ch = args.embed_freq_coor * 3 * 2 + 3 + args.vae_latent + self.layers = [] + self.skips = [4] + dim = self.input_ch + for i in range(self.D-1): + if i in self.skips: + dim += self.input_ch + self.layers.append(nn.Linear(dim, args.netwidth)) + dim = args.netwidth + self.layers.append(nn.Linear(args.netwidth, 3)) + self.layers = nn.ModuleList(self.layers) + + def execute(self, **kwargs): + x = kwargs['x'] + h = x + for i in range(len(self.layers)-1): + if i in self.skips: + h = jt.concat([h, x], dim=-1) + h = self.layers[i](h) + h = nn.relu(h) + h = self.layers[-1](h) + h = jt.sigmoid(h) + return {'rgb': h} + + +class StyleMLP_Wild_multilayers(Module): + def __init__(self, args): + super().__init__() + self.D = args.style_D + self.input_ch = args.embed_freq_coor * 3 * 2 + 3 + args.vae_latent + self.layers = [] + self.skips = [4] + dim = self.input_ch + for i in range(self.D-1): + if i in self.skips: + dim += (args.embed_freq_coor * 3 * 2 + 3) + self.layers.append(nn.Linear(dim, args.netwidth)) + dim = args.netwidth + args.vae_latent + self.layers.append(nn.Linear(args.netwidth + args.vae_latent, 3)) + self.layers = nn.ModuleList(self.layers) + + def execute(self, **kwargs): + x = kwargs['x'] + l = kwargs['latent'] + h = x + for i in range(len(self.layers)-1): + h = jt.concat([h, l], dim=-1) + if i in self.skips: + h = jt.concat([h, x], dim=-1) + h = self.layers[i](h) + h = nn.relu(h) + h = jt.concat([h, l], dim=-1) + h = self.layers[-1](h) + h = jt.sigmoid(h) + return {'rgb': h} + + +class StyleNerf(Module): + def __init__(self, args, mode='coarse', enable_style=False): + super().__init__() + self.use_viewdir = args.use_viewdir + """Activation Function""" + act_func = act_dict[args.act_type] + self.is_siren = (args.act_type == 'sine') + + """Embedding""" + if not self.is_siren: + self.embedder_coor = Embedder(input_dim=3, max_freq_log2=args.embed_freq_coor - 1, + N_freqs=args.embed_freq_coor) + self.embedder_dir = Embedder(input_dim=3, max_freq_log2=args.embed_freq_dir - 1, + N_freqs=args.embed_freq_dir) + input_ch, input_ch_viewdirs = self.embedder_coor.out_dim, self.embedder_dir.out_dim + skips = [4] + self.sigma_mul = 0. + else: + input_ch, input_ch_viewdirs = 3, 3 + skips = [] + self.sigma_mul = args.siren_sigma_mul + + """Neural Network""" + if mode == 'coarse': + net_depth, net_width = args.netdepth, args.netwidth + else: + net_depth, net_width = args.netdepth_fine, args.netwidth_fine + + self.net = MLP_style(D=net_depth, W=net_width, input_ch=input_ch, input_ch_viewdirs=input_ch_viewdirs, + skips=skips, use_viewdir=self.use_viewdir, act_func=act_func, sigma_mul=self.sigma_mul, enable_style=enable_style) + self.enable_style = enable_style + + def set_enable_style(self, enable_style=False): + self.enable_style = enable_style + self.net.enable_style = enable_style + + def execute(self, **kwargs): + # mode consistency + self.net.enable_style = self.enable_style + if not self.is_siren: + kwargs['pts'] = self.embedder_coor(kwargs['pts']) + kwargs['dirs'] = self.embedder_dir(kwargs['dirs']) + ret = self.net(**kwargs) + ret['dirs'] = kwargs['dirs'] + return ret + + +def vec2skew(v): + """ + :param v: (N, 3, ) torch tensor + :return: (N, 3, 3) + """ + zero = jt.zeros([v.shape[0], 1], dtype=jt.float32, device=v.device) + skew_v0 = jt.concat([zero, -v[:, 2:3], v[:, 1:2]], dim=-1) # (N, 3) + skew_v1 = jt.concat([v[:, 2:3], zero, -v[:, 0:1]], dim=-1) + skew_v2 = jt.concat([-v[:, 1:2], v[:, 0:1], zero], dim=-1) + skew_v = jt.stack([skew_v0, skew_v1, skew_v2], dim=-1) # (N, 3, 3) + return skew_v + + +def Exp(r): + """so(3) vector to SO(3) matrix + :param r: (N, 3) axis-angle, torch tensor + :return: (N, 3, 3) + """ + skew_r = vec2skew(r) # (N, 3, 3) + norm_r = r.norm(dim=1, keepdim=True).unsqueeze(-1) + 1e-15 # (N, 1, 1) + eye = jt.init.eye(3).unsqueeze(0) # (1, 3, 3) + R = eye + (jt.sin(norm_r) / norm_r) * skew_r + ((1 - jt.cos(norm_r)) / norm_r ** 2) * jt.matmul(skew_r, skew_r) + return R + + +def make_c2w(r, t): + """ + :param r: (N, 3, ) axis-angle torch tensor + :param t: (N, 3, ) translation vector torch tensor + :return: (N, 4, 4) + """ + R = Exp(r) # (N, 3, 3) + c2w = jt.concat([R, t.unsqueeze(-1)], dim=-1) # (N, 3, 4) + c2w = jt.concat([c2w, jt.zeros_like(c2w[:, :1])], dim=1) # (N, 4, 4) + c2w[:, 3, 3] = 1. + return c2w + + +def idx2img(idx, fea, pad=0): + batch_size, h, w, z = idx.shape + batch_size_p, point_num, dim = fea.shape + assert batch_size == batch_size_p, 'Batch Size Do Not Match' + idx_img = idx.reshape([batch_size, h*w*z, 1]).expand([batch_size, h*w*z, dim]).long() + idx_lst = point_num * torch.ones_like(idx_img) + idx_img = torch.where(idx_img >= 0, idx_img, idx_lst) + fea_pad = fea.reshape([1, batch_size*point_num, dim]).expand([batch_size, batch_size*point_num, dim]) + fea_pad = torch.cat([fea_pad, pad * torch.ones([batch_size, 1, dim]).to(idx.device)], dim=1) + fea_img = torch.gather(fea_pad, 1, idx_img).reshape([batch_size, h, w, z, dim]) + return fea_img + + +class Camera: + def __init__(self, projectionMatrix=None, cameraPose=None, device=torch.device("cpu")): + super().__init__() + self.device = device + self.tensor_list = ['projectionMatrix', 'cameraPose', 'w2c_matrix'] + for attr in self.tensor_list: + setattr(self, attr, None) + self.set(projectionMatrix=projectionMatrix, cameraPose=cameraPose) + + def set(self, **kwargs): + keys = kwargs.keys() + func_map = {'projectionMatrix': self.set_project, 'cameraPose': self.set_pose} + for name in keys: + try: + if name in func_map.keys(): + func_map[name](kwargs[name]) + else: + raise ValueError(name + f'is not in{keys}') + except ValueError as e: + print(repr(e)) + + def set_pose(self, cameraPose): + if cameraPose is None: + self.cameraPose = self.w2c_matrix = None + return + elif type(cameraPose) is np.ndarray: + cameraPose = torch.from_numpy(cameraPose) + self.cameraPose = cameraPose.float() + self.w2c_matrix = torch.inverse(self.cameraPose).float() + self.to(self.device) + + def set_project(self, projectionMatrix): + if projectionMatrix is None: + self.projectionMatrix = None + return + elif type(projectionMatrix) is np.ndarray: + projectionMatrix = torch.from_numpy(projectionMatrix) + self.projectionMatrix = projectionMatrix.float() + self.to(self.device) + + def to(self, device): + if type(device) is str: + device = torch.device(device) + self.device = device + for tensor in self.tensor_list: + if getattr(self, tensor) is not None: + setattr(self, tensor, getattr(self, tensor).to(self.device)) + return self + + def WorldtoCamera(self, coor_world): + coor_world = coor_world.clone() + if len(coor_world.shape) == 2: + coor_world = torch.cat([coor_world, torch.ones([coor_world.shape[0], 1]).to(self.device)], -1) + coor_camera = torch.einsum('bcw,nw->bnc', self.w2c_matrix, coor_world) + else: + coor_world = self.homogeneous(coor_world) + coor_camera = torch.einsum('bcw,bnw->bnc', self.w2c_matrix, coor_world) + return coor_camera + + def CameratoWorld(self, coor_camera): + coor_camera = coor_camera.clone() + coor_camera = self.homogeneous(coor_camera) + coor_world = torch.einsum('bwc,bnc->bnw', self.cameraPose, coor_camera)[:, :, :3] + return coor_world + + def WorldtoCVV(self, coor_world): + coor_camera = self.WorldtoCamera(coor_world) + coor_cvv = torch.einsum('vc,bnc->bnv', self.projectionMatrix, coor_camera) + coor_cvv = coor_cvv[..., :-1] / coor_cvv[..., -1:] + return coor_cvv + + def homogeneous(self, coor3d, force=False): + if coor3d.shape[-1] == 3 or force: + coor3d = torch.cat([coor3d, torch.ones_like(coor3d[..., :1]).to(self.device)], -1) + return coor3d + + def rasterize(self, coor_world, rgb, h=192, w=256, k=1.5, z=1): + from pytorch3d.structures import Pointclouds + from pytorch3d.renderer import compositing + from pytorch3d.renderer.points import rasterize_points + + def PixeltoCvv(h, w, hid=0, wid=0): + cvv = torch.tensor([[[1., 0., 0.], [-1., 0., 0.], [0., 1., 0.]]]).float() + pts = Pointclouds(points=cvv, features=cvv) + idx, _, dist2 = rasterize_points(pts, [h, w], 1e10, 3) + a2, b2, c2 = (dist2.cpu().numpy())[0, hid, wid] + x2 = (a2 + b2) / 2 - 1 + cosa = (x2 + 1 - a2) / (2 * x2**0.5) + sina_abs = (1 - cosa**2)**0.5 + u = (x2 ** 0.5) * cosa + v = (x2 ** 0.5) * sina_abs + if np.abs((u**2 + (v-1)**2)**0.5 - c2**0.5) > 1e-5: + v = - (x2 ** 0.5) * sina_abs + if(np.abs((u**2 + (v-1)**2)**0.5 - c2**0.5) > 1e-5): + print(np.abs((u**2 + (v-1)**2)**0.5 - c2**0.5), ' is too large...') + print(f"Found pixel {[hid, wid]} has uv: {(u, v)} But something wrong !!!") + print(f"a: {a2**0.5}, b: {b2**0.5}, c: {c2**0.5}, idx: {idx[0, 0, 0]}, dist2: {dist2[0, 0, 0]}") + os.exit(-1) + return u, v + + batch_size = self.cameraPose.shape[0] + point_num = rgb.shape[-2] + coor_cvv = self.WorldtoCVV(coor_world).reshape([batch_size, point_num, 3]) # (batch_size, point, 3) + umax, vmax = PixeltoCvv(h=h, w=w, hid=0, wid=0) + umin, vmin = PixeltoCvv(h=h, w=w, hid=h-1, wid=w-1) + cvv_backup = coor_cvv.clone() + coor_cvv[..., 0] = (coor_cvv[..., 0] + 1) / 2 * (umax - umin) + umin + coor_cvv[..., 1] = (coor_cvv[..., 1] + 1) / 2 * (vmax - vmin) + vmin + + rgb = rgb.reshape([1, point_num, rgb.shape[-1]]) # (1, point, 3) + rgb_coor = torch.cat([rgb, coor_world.unsqueeze(0)], dim=-1).expand([batch_size, point_num, 6]) # (1, point, 6) + + if platform.system() == 'Windows': + # Bug of pytorch3D on windows + hw = np.array([h, w]) + mindim, maxdim = np.argmin(hw), np.argmax(hw) + aspect_ration = hw[maxdim] / hw[mindim] + coor_cvv[:, :, mindim] *= aspect_ration + + pts3D = Pointclouds(points=coor_cvv, features=rgb_coor) + radius = float(2. / max(w, h) * k) + idx, _, _ = rasterize_points(pts3D, [h, w], radius, z) + alphas = torch.ones_like(idx.float()) + img = compositing.alpha_composite( + idx.permute(0, 3, 1, 2).long(), + alphas.permute(0, 3, 1, 2), + pts3D.features_packed().permute(1, 0), + ) + img = img.permute([0, 2, 3, 1]).contiguous() # (batch, h, w, 6) + rgb_map, coor_map = img[..., :3], img[..., 3:] # (batch, h, w, 3) + msk = (idx[:, :, :, :1] != -1).float() # (batch, h, w, 1) + + return rgb_map, coor_map, msk + + def rasterize_pyramid(self, coor_world, rgb, density=None, h=192, w=256, k=np.array([0.7, 1.2, 1.7, 2.2])): + if density is None: + density = torch.ones(coor_world.shape[0], 1).to(coor_world.device) + mask = None + image = None + for ksize in k: + img, _, msk = self.rasterize(coor_world, rgb, h, w, ksize, 10) + mask = msk if mask is None else mask * msk + image = img if image is None else image + img * mask.unsqueeze(-1).expand(img.shape) + return image, mask + + +class VAE_encoder(torch_nn.Module): + def __init__(self, data_dim, latent_dim, W=512, D=4): + super().__init__() + self.data_dim = data_dim + self.latent_dim = latent_dim + self.W = W + self.D = D + + """Fully Connected Layers""" + self.fc_layers = [] + current_dim = self.data_dim + for i in range(self.D - 1): + self.fc_layers.append(torch_nn.Linear(current_dim, self.W)) + current_dim = self.W + self.fc_layers = torch_nn.ModuleList(self.fc_layers) + + self.fc_layer_mu = torch_nn.Linear(current_dim, self.latent_dim) + self.fc_layer_log_var = torch_nn.Linear(current_dim, self.latent_dim) + + def forward(self, x): + for layer in self.fc_layers: + x = torch.relu(layer(x)) + mu = self.fc_layer_mu(x) + log_var = self.fc_layer_log_var(x) + return mu, log_var + + +class VAE_decoder(torch_nn.Module): + def __init__(self, data_dim, latent_dim, W=512, D=4): + super().__init__() + self.data_dim = data_dim + self.latent_dim = latent_dim + self.W = W + self.D = D + + """Fully Connected Layers""" + self.fc_layers = [] + current_dim = self.latent_dim + for i in range(self.D - 1): + self.fc_layers.append(torch_nn.Linear(current_dim, self.W)) + current_dim = self.W + self.fc_layers = torch_nn.ModuleList(self.fc_layers) + self.output_layer = torch_nn.Linear(current_dim, self.data_dim) + + def forward(self, x): + for layer in self.fc_layers: + x = torch.relu(layer(x)) + x = self.output_layer(x) + return x + + +def reparameterize(mu, log_var, factor=1.): + std = torch.exp(0.5 * log_var) * factor + eps = torch.randn_like(std) + return eps * std + mu + + +def reparameterize_jt(mu, log_var, factor=1.): + std = jt.exp(0.5 * log_var) * factor + eps = jt.randn_like(std) + return eps * std + mu + + +class VAE(torch_nn.Module): + def __init__(self, data_dim, latent_dim, W=512, D=4, kl_lambda=0.1): + super().__init__() + self.data_dim = data_dim + self.latent_dim = latent_dim + self.W = W + self.D = D + self.kl_lambda = kl_lambda + self.encoder = VAE_encoder(data_dim=data_dim, latent_dim=latent_dim, W=W, D=D) + self.decoder = VAE_decoder(data_dim=data_dim, latent_dim=latent_dim, W=W, D=D) + + def forward(self, x, various=True): + """Forward Function""" + z, mu, log_var = self.encode(x, various) + y = self.decode(z) + return y, z, mu, log_var + + def recon(self, x, various=False): + """Reconstruction shapes""" + z, _, _ = self.encode(x, various) + y = self.decode(z) + return y + + def encode(self, x, various=True): + mu, log_var = self.encoder(x) + z = reparameterize(mu, log_var) if various else mu + return z, mu, log_var + + def decode(self, z): + y = self.decoder(z) + return y + + def loss(self, x, y, mu, log_var, return_losses=False): + kl_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim=1), dim=0) + recon_loss = torch.sum(torch.mean(torch.square(x - y), dim=0)) + loss = recon_loss + self.kl_lambda * kl_loss + if return_losses: + return loss, recon_loss, self.kl_lambda * kl_loss + else: + return loss + + def sample(self, num_samples, current_device): + z = torch.randn(num_samples, + self.latent_dim) + z = z.to(current_device) + samples = self.decode(z) + return samples + + +class StyleLatents_variational(Module): + def __init__(self, **kwargs): + super().__init__() + style_num, frame_num, latent_dim = kwargs['style_num'], kwargs['frame_num'], kwargs['latent_dim'] + self.style_num = style_num + self.frame_num = frame_num + self.latent_dim = latent_dim + self.latents = jt.Var(jt.randn(self.style_num, self.frame_num, self.latent_dim)) + self.style_latents_mu = jt.Var(jt.randn(self.style_num, self.latent_dim)) + self.style_latents_logvar = jt.Var(jt.randn(self.style_num, self.latent_dim)) + self.sigma_scale = 1. + self.set_requires_grad() + self.latent_optimizer = None + + def set_requires_grad(self): + self.latents.requires_grad = True + self.style_latents_mu.requires_grad = False + self.style_latents_logvar.requires_grad = False + + def rescale_sigma(self, sigma_scale=1.): + self.sigma_scale = sigma_scale + + def execute(self, **kwargs): + # style_ids, frame_ids of shape [batch] + style_ids, frame_ids = kwargs['style_ids'], kwargs['frame_ids'] + flat_ids = style_ids * self.frame_num + frame_ids # [batch] + latents = self.latents.reshape([-1, self.latent_dim])[flat_ids] # [batch, latent_dim] + mu = self.style_latents_mu[style_ids] + latents = mu + self.sigma_scale * (latents - mu) + return latents + + def minus_logp(self, **kwargs): + epsilon = 1e-3 + style_ids, frame_ids = kwargs['style_ids'], kwargs['frame_ids'] + latents = self(style_ids=style_ids, frame_ids=frame_ids) + mu = self.style_latents_mu[style_ids] + logvar = self.style_latents_logvar[style_ids] + loss_logp = jt.sum((latents - mu.detach()) ** 2 / (jt.exp(0.5 * logvar.detach()) + epsilon), -1).mean() + return loss_logp + + def set_latents(self): + all_style_latents_mu, all_style_latents_logvar = self.style_latents_mu.unsqueeze(1).expand(list(self.latents.shape)),\ + self.style_latents_logvar.unsqueeze(1).expand(list(self.latents.shape)) + latents = reparameterize_jt(all_style_latents_mu, all_style_latents_logvar, factor=1.) + self.latents = jt.Var(latents) + self.set_requires_grad() + + def set_optimizer(self): + self.latent_optimizer = jt.nn.Adam([self.latents], lr=1e-3) + + def optimize(self, loss): + if self.latent_optimizer is not None: + self.latent_optimizer.step(loss) diff --git a/contrib/StylizedNeRF/rendering.py b/contrib/StylizedNeRF/rendering.py new file mode 100644 index 00000000..44e81dd7 --- /dev/null +++ b/contrib/StylizedNeRF/rendering.py @@ -0,0 +1,476 @@ +from utils import * + + +def render(model_forward, samp_func, dataloader, args, device, sv_path=None, model_forward_fine=None, samp_func_fine=None): + """Render Scene into Images""" + save_makedir(sv_path) + dataset = dataloader.dataset + frame_num, h, w = dataset.frame_num, dataset.h, dataset.w + resolution = h * w + img_id = 0 + rgb_map, t_map = None, None + rgb_map_fine, t_map_fine = None, None + for batch_idx, batch_data in enumerate(tqdm(dataloader)): + # To Device as Tensor + for key in batch_data: + batch_data[key] = jt.array(batch_data[key].numpy()) + + # Get data and forward + rays_o, rays_d = batch_data['rays_o'], batch_data['rays_d'] + pts, ts = samp_func(rays_o=rays_o, rays_d=rays_d, N_samples=args.N_samples, near=dataset.near, far=dataset.far) + ray_num, pts_num = rays_o.shape[0], args.N_samples + rays_d_forward = rays_d.unsqueeze(1).expand([ray_num, pts_num, 3]) + ret = model_forward(pts=pts, dirs=rays_d_forward) + pts_rgb, pts_sigma = ret['rgb'], ret['sigma'] + rgb_exp, t_exp, weights = alpha_composition(pts_rgb, pts_sigma, ts, 0) + # Gather outputs + rgb_exp, t_exp = rgb_exp.detach().numpy(), t_exp.detach().numpy() + rgb_map = rgb_exp if rgb_map is None else np.concatenate([rgb_map, rgb_exp], axis=0) + t_map = t_exp if t_map is None else np.concatenate([t_map, t_exp], axis=0) + + if args.N_samples_fine > 0: + pts_fine, ts_fine = samp_func_fine(rays_o, rays_d, ts, weights, args.N_samples_fine) + pts_num = args.N_samples + args.N_samples_fine + rays_d_forward = rays_d.unsqueeze(1).expand([ray_num, pts_num, 3]) + ret = model_forward_fine(pts=pts_fine, dirs=rays_d_forward) + pts_rgb_fine, pts_sigma_fine = ret['rgb'], ret['sigma'] + rgb_exp_fine, t_exp_fine, _ = alpha_composition(pts_rgb_fine, pts_sigma_fine, ts_fine, 0) + # Gather outputs + rgb_exp_fine, t_exp_fine = rgb_exp_fine.detach().numpy(), t_exp_fine.detach().numpy() + rgb_map_fine = rgb_exp_fine if rgb_map_fine is None else np.concatenate([rgb_map_fine, rgb_exp_fine], axis=0) + t_map_fine = t_exp_fine if t_map_fine is None else np.concatenate([t_map_fine, t_exp_fine], axis=0) + + # Write to svpath + img_num_gathered = (rgb_map.shape[0] // resolution) - img_id + if img_num_gathered > 0 and sv_path is not None: + sv_rgb = np.array(rgb_map[img_id * resolution: (img_id + img_num_gathered) * resolution], np.float32).reshape([img_num_gathered, h, w, 3]) + sv_t = np.array(t_map[img_id * resolution: (img_id + img_num_gathered) * resolution], np.float32).reshape([img_num_gathered, -1]) + sv_t = (sv_t - np.min(sv_t, axis=1, keepdims=True)) / (np.max(sv_t, axis=1, keepdims=True) - np.min(sv_t, axis=1, keepdims=True) + 1e-7) + sv_t = sv_t.reshape([img_num_gathered, h, w]) + sv_rgb, sv_t = np.array(sv_rgb * 255, np.int32), np.array(sv_t * 255, np.int32) + for i in range(img_num_gathered): + imageio.imwrite(sv_path + '/coarse_%05d.png' % (i + img_id), to8b(sv_rgb[i])) + imageio.imwrite(sv_path + '/coarse_depth_%05d.png' % (i + img_id), to8b(sv_t[i])) + + if args.N_samples_fine > 0: + sv_rgb = np.array(rgb_map_fine[img_id * resolution: (img_id + img_num_gathered) * resolution], + np.float32).reshape([img_num_gathered, h, w, 3]) + sv_t = np.array(t_map_fine[img_id * resolution: (img_id + img_num_gathered) * resolution], + np.float32).reshape([img_num_gathered, -1]) + sv_t = (sv_t - np.min(sv_t, axis=1, keepdims=True)) / ( + np.max(sv_t, axis=1, keepdims=True) - np.min(sv_t, axis=1, keepdims=True) + 1e-7) + sv_t = sv_t.reshape([img_num_gathered, h, w]) + sv_rgb, sv_t = np.array(sv_rgb * 255, np.int32), np.array(sv_t * 255, np.int32) + for i in range(img_num_gathered): + imageio.imwrite(sv_path + '/fine_%05d.png' % (i + img_id), to8b(sv_rgb[i])) + imageio.imwrite(sv_path + '/fine_depth_%05d.png' % (i + img_id), to8b(sv_t[i])) + + img_id += img_num_gathered + + rgb_map, t_map = np.array(rgb_map).reshape([-1, h, w, 3]), np.array(t_map).reshape([-1, h, w, 1]) + t_map_show = np.broadcast_to(t_map, [t_map.shape[0], h, w, 3]) + t_map_show = (t_map_show - t_map_show.min()) / (t_map_show.max() - t_map_show.min() + 1e-10) + if sv_path is not None: + imageio.mimwrite(sv_path + '/coarse_rgb.mp4', to8b(rgb_map), fps=30, quality=8) + imageio.mimwrite(sv_path + '/coarse_depth.mp4', to8b(t_map_show), fps=30, quality=8) + + if args.N_samples_fine > 0: + rgb_map_fine, t_map_fine = np.array(rgb_map_fine).reshape([-1, h, w, 3]), np.array(t_map_fine).reshape([-1, h, w, 1]) + t_map_show = np.broadcast_to(t_map_fine, [t_map_fine.shape[0], h, w, 3]) + t_map_show = (t_map_show - t_map_show.min()) / (t_map_show.max() - t_map_show.min() + 1e-10) + if sv_path is not None: + imageio.mimwrite(sv_path + '/fine_rgb.mp4', to8b(rgb_map), fps=30, quality=8) + imageio.mimwrite(sv_path + '/fine_depth.mp4', to8b(t_map_show), fps=30, quality=8) + + return rgb_map, t_map, rgb_map_fine, t_map_fine + + +def render_train(samp_func, model_forward, dataset, args, device, sv_path=None, model_forward_fine=None, samp_func_fine=None): + save_makedir(sv_path) + frame_num, h, w = dataset.frame_num, dataset.h, dataset.w + + # The largest factor of h*w closest to chunk. + batch_size = args.chunk + while int(h * w) % batch_size != 0: + batch_size -= 1 + # Iteration times of each image and current iteration + iter_img = int(h * w / batch_size) + iter = 0 + img_count = 0 + print('Pick batch size: ', batch_size) + + dataloader = DataLoader(dataset, shuffle=False, batch_size=batch_size, num_workers=args.num_workers, pin_memory=(args.num_workers > 0)) + + pred_rgb, gt_rgb, pred_t = [], [], [] + pred_rgb_fine, pred_t_fine = [], [] + # Iteration + for batch_idx, batch_data in enumerate(tqdm(dataloader)): + # To Device as Tensor + for key in batch_data: + batch_data[key] = jt.array(batch_data[key].numpy()) + + # Get batch data + rgb_gt, rays_o, rays_d = batch_data['rgb_gt'], batch_data['rays_o'], batch_data['rays_d'] + pts, ts = samp_func(rays_o=rays_o, rays_d=rays_d, N_samples=args.N_samples, near=dataset.near, far=dataset.far) + ray_num, pts_num = rays_o.shape[0], args.N_samples + rays_d_forward = rays_d.unsqueeze(1).expand([ray_num, pts_num, 3]) + + # Forward and Composition + ret = model_forward(pts=pts, dirs=rays_d_forward) + pts_rgb, pts_sigma = ret['rgb'], ret['sigma'] + rgb_exp, t_exp, weights = alpha_composition(pts_rgb, pts_sigma, ts, 0) + + pred_rgb.append(rgb_exp.detach().numpy()) + pred_t.append(t_exp.detach().numpy()) + gt_rgb.append(rgb_gt.detach().numpy()) + + if args.N_samples_fine > 0: + pts_fine, ts_fine = samp_func_fine(rays_o, rays_d, ts, weights, args.N_samples_fine) + pts_num = args.N_samples + args.N_samples_fine + rays_d_forward = rays_d.unsqueeze(1).expand([ray_num, pts_num, 3]) + ret = model_forward_fine(pts=pts_fine, dirs=rays_d_forward) + pts_rgb_fine, pts_sigma_fine = ret['rgb'], ret['sigma'] + rgb_exp_fine, t_exp_fine, _ = alpha_composition(pts_rgb_fine, pts_sigma_fine, ts_fine, 0) + pred_rgb_fine.append(rgb_exp_fine.detach().numpy()) + pred_t_fine.append(t_exp_fine.detach().numpy()) + + iter += 1 + + # Complete per image + if iter == iter_img: + # Reshape + pred_rgb = np.concatenate(pred_rgb, axis=0).reshape([h, w, 3]) + pred_t = np.concatenate(pred_t, axis=0).reshape([h, w]) + gt_rgb = np.concatenate(gt_rgb, axis=0).reshape([h, w, 3]) + + # Broadcast + pred_t = np.broadcast_to(pred_t[..., np.newaxis], [h, w, 3]) + # Normalize + pred_t = (pred_t - np.min(pred_t)) / (np.max(pred_t) - np.min(pred_t)) + + # To 255 + pred_rgb, pred_t = np.array(pred_rgb * 255, np.int32), np.array(pred_t * 255, np.int32) + gt_rgb = np.array(gt_rgb * 255, np.int32) + + # Saving images + imageio.imwrite(sv_path + '/coarse_%05d.png' % img_count, to8b(pred_rgb)) + imageio.imwrite(sv_path + '/coarse_depth_%05d.png' % img_count, to8b(pred_t)) + imageio.imwrite(sv_path + '/gt_%05d.png' % img_count, to8b(gt_rgb)) + + if args.N_samples_fine > 0: + pred_rgb_fine = np.concatenate(pred_rgb_fine, axis=0).reshape([h, w, 3]) + pred_t_fine = np.concatenate(pred_t_fine, axis=0).reshape([h, w]) + pred_t_fine = np.broadcast_to(pred_t_fine[..., np.newaxis], [h, w, 3]) + pred_t_fine = (pred_t_fine - pred_t_fine.min()) / (pred_t_fine.max() - pred_t_fine.min()) + pred_rgb_fine, pred_t_fine = np.array(pred_rgb_fine * 255, np.int32), np.array(pred_t_fine * 255, np.int32) + imageio.imwrite(sv_path + '/fine_%05d.png' % img_count, to8b(pred_rgb_fine)) + imageio.imwrite(sv_path + '/fine_depth_%05d.png' % img_count, to8b(pred_t_fine)) + + img_count += 1 + print("Finish %d Image ..." % img_count) + iter = 0 + pred_rgb, gt_rgb, pred_t, gt_t, depth_masks = [], [], [], [], [] + pred_rgb_fine, pred_t_fine = [], [] + + +def cal_geometry(model_forward, samp_func, dataloader, args, device, sv_path=None, model_forward_fine=None, samp_func_fine=None): + """Render Scene into Images""" + save_makedir(sv_path) + dataset = dataloader.dataset + cps = dataset.cps if 'train' in dataset.mode else dataset.cps_valid + hwf = dataset.hwf + near, far = dataset.near, dataset.far + frame_num, h, w = dataset.frame_num if 'train' in dataset.mode else dataset.cps_valid.shape[0], dataset.h, dataset.w + resolution = h * w + img_id, pixel_id = 0, 0 + rgb_map, t_map = np.zeros([frame_num*h*w, 3], dtype=np.float32), np.zeros([frame_num*h*w], dtype=np.float32) + coor_map = np.zeros([frame_num*h*w, 3], dtype=np.float32) + for batch_idx, batch_data in enumerate(tqdm(dataloader)): + # To Device as Tensor + for key in batch_data: + batch_data[key] = jt.array(batch_data[key].numpy()) + + # Get data and forward + rays_o, rays_d = batch_data['rays_o'], batch_data['rays_d'] + pts, ts = samp_func(rays_o=rays_o, rays_d=rays_d, N_samples=args.N_samples, near=dataset.near, far=dataset.far) + ray_num, pts_num = rays_o.shape[0], args.N_samples + rays_d_forward = rays_d.unsqueeze(1).expand([ray_num, pts_num, 3]) + ret = model_forward(pts=pts, dirs=rays_d_forward) + pts_rgb, pts_sigma = ret['rgb'], ret['sigma'] + rgb_exp, t_exp, weights = alpha_composition(pts_rgb, pts_sigma, ts, 0) + # Gather outputs + if not args.N_samples > 0: + rgb_exp_tmp, t_exp_tmp = rgb_exp.detach().numpy(), t_exp.detach().numpy() + coor_tmp = t_exp_tmp[..., np.newaxis] * rays_d.numpy() + rays_o.numpy() + else: + pts_fine, ts_fine = samp_func_fine(rays_o, rays_d, ts, weights, args.N_samples_fine) + pts_num = args.N_samples + args.N_samples_fine + rays_d_forward = rays_d.unsqueeze(1).expand([ray_num, pts_num, 3]) + ret = model_forward_fine(pts=pts_fine, dirs=rays_d_forward) + pts_rgb_fine, pts_sigma_fine = ret['rgb'], ret['sigma'] + rgb_exp_fine, t_exp_fine, _ = alpha_composition(pts_rgb_fine, pts_sigma_fine, ts_fine, 0) + # Gather outputs + rgb_exp_tmp, t_exp_tmp = rgb_exp_fine.detach().numpy(), t_exp_fine.detach().numpy() + coor_tmp = t_exp_tmp[..., np.newaxis] * rays_d.numpy() + rays_o.numpy() + + batch_size = coor_tmp.shape[0] + rgb_map[pixel_id: pixel_id+batch_size] = rgb_exp_tmp + t_map[pixel_id: pixel_id+batch_size] = t_exp_tmp + coor_map[pixel_id: pixel_id+batch_size] = coor_tmp + pixel_id += batch_size + + # Write to svpath + img_num_gathered = (pixel_id // resolution) - img_id + if img_num_gathered > 0 and sv_path is not None: + sv_rgb = np.array(rgb_map[img_id * resolution: (img_id + img_num_gathered) * resolution], np.float32).reshape([img_num_gathered, h, w, 3]) + sv_t = np.array(t_map[img_id * resolution: (img_id + img_num_gathered) * resolution], np.float32).reshape([img_num_gathered, -1]) + sv_coor_map = np.array(coor_map[img_id * resolution: (img_id + img_num_gathered) * resolution], np.float32).reshape([img_num_gathered, h, w, 3]) + sv_t = (sv_t - np.min(sv_t, axis=1, keepdims=True)) / (np.max(sv_t, axis=1, keepdims=True) - np.min(sv_t, axis=1, keepdims=True) + 1e-7) + sv_t = sv_t.reshape([img_num_gathered, h, w]) + sv_rgb, sv_t = np.array(sv_rgb * 255, np.int32), np.array(sv_t * 255, np.int32) + for i in range(img_num_gathered): + imageio.imwrite(sv_path + '/rgb_%05d.png' % (i + img_id), to8b(sv_rgb[i])) + imageio.imwrite(sv_path + '/depth_%05d.png' % (i + img_id), to8b(sv_t[i])) + np.savez(sv_path + '/geometry_%05d' % (i + img_id), coor_map=sv_coor_map[i], cps=cps[i + img_id], hwf=hwf, near=near, far=far) + img_id += img_num_gathered + + rgb_map, t_map = np.array(rgb_map).reshape([-1, h, w, 3]), np.array(t_map).reshape([-1, h, w, 1]) + coor_map = np.array(coor_map).reshape([-1, h, w, 3]) + np.savez(sv_path + '/geometry', coor_map=coor_map, cps=cps, hwf=hwf, near=near, far=far) + + t_map_show = np.broadcast_to(t_map, [t_map.shape[0], h, w, 3]) + t_map_show = (t_map_show - t_map_show.min()) / (t_map_show.max() - t_map_show.min() + 1e-10) + if sv_path is not None: + imageio.mimwrite(sv_path + '/rgb.mp4', to8b(rgb_map), fps=30, quality=8) + imageio.mimwrite(sv_path + '/depth.mp4', to8b(t_map_show), fps=30, quality=8) + + return rgb_map, t_map + + +def render_style(model_forward, samp_func, style_forward, latents_model, dataloader, args, device, sv_path=None, model_forward_fine=None, samp_func_fine=None, sigma_scale=0.): + """Render Scene into Images""" + latents_model.rescale_sigma(sigma_scale=sigma_scale) + save_makedir(sv_path) + dataset = dataloader.dataset + dataset.mode = 'valid_style' + frame_num, h, w = dataset.cps_valid.shape[0], dataset.h, dataset.w + resolution = h * w + img_id = 0 + rgb_map, t_map = None, None + rgb_map_fine, t_map_fine = None, None + for batch_idx, batch_data in enumerate(tqdm(dataloader)): + # To Device as Tensor + for key in batch_data: + batch_data[key] = jt.array(batch_data[key].numpy()) + + # Get data and forward + rays_o, rays_d, style_feature = batch_data['rays_o'], batch_data['rays_d'], batch_data['style_feature'] + style_id, frame_id = batch_data['style_id'].long(), batch_data['frame_id'].long() + # Sample + pts, ts = samp_func(rays_o=rays_o, rays_d=rays_d, N_samples=args.N_samples, near=dataset.near, far=dataset.far, perturb=True) + ray_num, pts_num = rays_o.shape[0], args.N_samples + rays_d_forward = rays_d.unsqueeze(1).expand([ray_num, pts_num, 3]) + # Forward + ret = model_forward(pts=pts, dirs=rays_d_forward) + pts_sigma, pts_embed = ret['sigma'], ret['pts'] + # Stylize + style_latents = latents_model(style_ids=style_id, frame_ids=frame_id) + style_latents_forward = style_latents.unsqueeze(1).expand([ray_num, pts_num, style_latents.shape[-1]]) + ret_style = style_forward(x=pts_embed, latent=style_latents_forward) + pts_rgb_style = ret_style['rgb'] + # Composition + rgb_exp_style, t_exp, weights = alpha_composition(pts_rgb_style, pts_sigma, ts, 0) + + # Gather outputs + rgb_exp, t_exp = rgb_exp_style.detach().numpy(), t_exp.detach().numpy() + rgb_map = rgb_exp if rgb_map is None else np.concatenate([rgb_map, rgb_exp], axis=0) + t_map = t_exp if t_map is None else np.concatenate([t_map, t_exp], axis=0) + + if args.N_samples_fine > 0: + # Sample + pts_fine, ts_fine = samp_func_fine(rays_o, rays_d, ts, weights, args.N_samples_fine) + pts_num = args.N_samples + args.N_samples_fine + rays_d_forward = rays_d.unsqueeze(1).expand([ray_num, pts_num, 3]) + # Forward + ret = model_forward_fine(pts=pts_fine, dirs=rays_d_forward) + pts_sigma_fine, pts_embed_fine = ret['sigma'], ret['pts'] + # Stylize + style_latents_forward = style_latents.unsqueeze(1).expand([ray_num, pts_num, style_latents.shape[-1]]) + ret_style = style_forward(x=pts_embed_fine, latent=style_latents_forward) + pts_rgb_style_fine = ret_style['rgb'] + # Composition + rgb_exp_style_fine, t_exp_fine, _ = alpha_composition(pts_rgb_style_fine, pts_sigma_fine, ts_fine, 0) + # Gather outputs + rgb_exp_fine, t_exp_fine = rgb_exp_style_fine.detach().numpy(), t_exp_fine.detach().numpy() + rgb_map_fine = rgb_exp_fine if rgb_map_fine is None else np.concatenate([rgb_map_fine, rgb_exp_fine], axis=0) + t_map_fine = t_exp_fine if t_map_fine is None else np.concatenate([t_map_fine, t_exp_fine], axis=0) + + # Write to svpath + img_num_gathered = (rgb_map.shape[0] // resolution) - img_id + if img_num_gathered > 0 and sv_path is not None: + sv_rgb = np.array(rgb_map[img_id * resolution: (img_id + img_num_gathered) * resolution], np.float32).reshape([img_num_gathered, h, w, 3]) + sv_t = np.array(t_map[img_id * resolution: (img_id + img_num_gathered) * resolution], np.float32).reshape([img_num_gathered, -1]) + sv_t = (sv_t - np.min(sv_t, axis=1, keepdims=True)) / (np.max(sv_t, axis=1, keepdims=True) - np.min(sv_t, axis=1, keepdims=True) + 1e-7) + sv_t = sv_t.reshape([img_num_gathered, h, w]) + sv_rgb, sv_t = np.array(sv_rgb * 255, np.int32), np.array(sv_t * 255, np.int32) + for i in range(img_num_gathered): + style_id = (i + img_id) // frame_num + image_id = (i + img_id) % frame_num + imageio.imwrite(sv_path + '/style_%05d_coarse_%05d.png' % (style_id, image_id), to8b(sv_rgb[i])) + imageio.imwrite(sv_path + '/style_%05d_coarse_depth_%05d.png' % (style_id, image_id), to8b(sv_t[i])) + + if args.N_samples_fine > 0: + sv_rgb = np.array(rgb_map_fine[img_id * resolution: (img_id + img_num_gathered) * resolution], + np.float32).reshape([img_num_gathered, h, w, 3]) + sv_t = np.array(t_map_fine[img_id * resolution: (img_id + img_num_gathered) * resolution], + np.float32).reshape([img_num_gathered, -1]) + sv_t = (sv_t - np.min(sv_t, axis=1, keepdims=True)) / ( + np.max(sv_t, axis=1, keepdims=True) - np.min(sv_t, axis=1, keepdims=True) + 1e-7) + sv_t = sv_t.reshape([img_num_gathered, h, w]) + sv_rgb, sv_t = np.array(sv_rgb * 255, np.int32), np.array(sv_t * 255, np.int32) + for i in range(img_num_gathered): + style_id = (i + img_id) // frame_num + image_id = (i + img_id) % frame_num + imageio.imwrite(sv_path + '/style_%05d_fine_%05d.png' % (style_id, image_id), to8b(sv_rgb[i])) + imageio.imwrite(sv_path + '/style_%05d_fine_depth_%05d.png' % (style_id, image_id), to8b(sv_t[i])) + + img_id += img_num_gathered + + rgb_map, t_map = np.array(rgb_map).reshape([-1, h, w, 3]), np.array(t_map).reshape([-1, h, w, 1]) + t_map_show = np.broadcast_to(t_map, [t_map.shape[0], h, w, 3]) + t_map_show = (t_map_show - t_map_show.min()) / (t_map_show.max() - t_map_show.min() + 1e-10) + if sv_path is not None: + imageio.mimwrite(sv_path + '/coarse_rgb.mp4', to8b(rgb_map), fps=30, quality=8) + imageio.mimwrite(sv_path + '/coarse_depth.mp4', to8b(t_map_show), fps=30, quality=8) + + if args.N_samples_fine > 0: + rgb_map_fine, t_map_fine = np.array(rgb_map_fine).reshape([-1, h, w, 3]), np.array(t_map_fine).reshape([-1, h, w, 1]) + t_map_show = np.broadcast_to(t_map_fine, [t_map_fine.shape[0], h, w, 3]) + t_map_show = (t_map_show - t_map_show.min()) / (t_map_show.max() - t_map_show.min() + 1e-10) + if sv_path is not None: + imageio.mimwrite(sv_path + '/fine_rgb.mp4', to8b(rgb_map), fps=30, quality=8) + imageio.mimwrite(sv_path + '/fine_depth.mp4', to8b(t_map_show), fps=30, quality=8) + + return rgb_map, t_map, rgb_map_fine, t_map_fine + + +def render_train_style(samp_func, model_forward, style_forward, latents_model, dataset, args, device, sv_path=None, model_forward_fine=None, samp_func_fine=None, sigma_scale=0.): + save_makedir(sv_path) + latents_model.rescale_sigma(sigma_scale=sigma_scale) + frame_num, h, w = dataset.frame_num, dataset.h, dataset.w + dataset.mode = 'train_style' + + # The largest factor of h*w closest to chunk. + batch_size = args.chunk + while int(h * w) % batch_size != 0: + batch_size -= 1 + # Iteration times of each image and current iteration + iter_img = int(h * w / batch_size) + iter = 0 + img_count = 0 + print('Pick batch size: ', batch_size) + + dataloader = DataLoader(dataset, shuffle=False, batch_size=batch_size, num_workers=args.num_workers, pin_memory=(args.num_workers > 0)) + + pred_rgb, gt_rgb, pred_t = [], [], [] + pred_rgb_fine, pred_t_fine = [], [] + # Iteration + for batch_idx, batch_data in enumerate(tqdm(dataloader)): + + style_id_check = img_count // frame_num + image_id_check = img_count % frame_num + img_path_check = sv_path + '/style_%05d_fine_%05d.png' % (style_id_check, image_id_check) + if not os.path.exists(img_path_check): + # To Device as Tensor + for key in batch_data: + batch_data[key] = jt.array(batch_data[key].numpy()) + + # Get batch data + rgb_gt, rays_o, rays_d, rgb_origin = batch_data['rgb_gt'], batch_data['rays_o'], batch_data['rays_d'], \ + batch_data['rgb_origin'] + style_id, frame_id = batch_data['style_id'].long(), batch_data['frame_id'].long() + + # Sample + pts, ts = samp_func(rays_o=rays_o, rays_d=rays_d, N_samples=args.N_samples, near=dataset.near, + far=dataset.far, perturb=True) + ray_num, pts_num = rays_o.shape[0], args.N_samples + rays_d_forward = rays_d.unsqueeze(1).expand([ray_num, pts_num, 3]) + + # Forward + ret = model_forward(pts=pts, dirs=rays_d_forward) + pts_sigma, pts_embed = ret['sigma'], ret['pts'] + # Stylize + style_latents = latents_model(style_ids=style_id, frame_ids=frame_id) + style_latents_forward = style_latents.unsqueeze(1).expand([ray_num, pts_num, style_latents.shape[-1]]) + ret_style = style_forward(x=pts_embed, latent=style_latents_forward) + pts_rgb_style = ret_style['rgb'] + # Composition + rgb_exp_style, t_exp, weights = alpha_composition(pts_rgb_style, pts_sigma, ts, 0) + + pred_rgb.append(jt.clamp(rgb_exp_style, 0., 1.).detach().numpy()) + pred_t.append(t_exp.detach().numpy()) + gt_rgb.append(rgb_gt.detach().numpy()) + + if args.N_samples_fine > 0: + # Sample + pts_fine, ts_fine = samp_func_fine(rays_o, rays_d, ts, weights, args.N_samples_fine) + pts_num = args.N_samples + args.N_samples_fine + rays_d_forward = rays_d.unsqueeze(1).expand([ray_num, pts_num, 3]) + # Forward + ret = model_forward_fine(pts=pts_fine, dirs=rays_d_forward) + pts_sigma_fine, pts_embed_fine = ret['sigma'], ret['pts'] + # Stylize + style_latents_forward = style_latents.unsqueeze(1).expand([ray_num, pts_num, style_latents.shape[-1]]) + ret_style = style_forward(x=pts_embed_fine, latent=style_latents_forward) + pts_rgb_style_fine = ret_style['rgb'] + # Composition + rgb_exp_style_fine, t_exp_fine, _ = alpha_composition(pts_rgb_style_fine, pts_sigma_fine, ts_fine, 0) + pred_rgb_fine.append(jt.clamp(rgb_exp_style_fine, 0., 1.).detach().numpy()) + pred_t_fine.append(t_exp_fine.detach().numpy()) + + iter += 1 + + # Complete per image + if iter == iter_img: + if not os.path.exists(img_path_check): + # Reshape + pred_rgb = np.concatenate(pred_rgb, axis=0).reshape([h, w, 3]) + pred_t = np.concatenate(pred_t, axis=0).reshape([h, w]) + gt_rgb = np.concatenate(gt_rgb, axis=0).reshape([h, w, 3]) + + # Broadcast + pred_t = np.broadcast_to(pred_t[..., np.newaxis], [h, w, 3]) + # Normalize + pred_t = (pred_t - np.min(pred_t)) / (np.max(pred_t) - np.min(pred_t)) + + # To 255 + pred_rgb, pred_t = np.array(pred_rgb * 255, np.int32), np.array(pred_t * 255, np.int32) + gt_rgb = np.array(gt_rgb * 255, np.int32) + + # Saving images + style_id = img_count // frame_num + image_id = img_count % frame_num + imageio.imwrite(sv_path + '/style_%05d_coarse_%05d.png' % (style_id, image_id), to8b(pred_rgb)) + imageio.imwrite(sv_path + '/style_%05d_coarse_depth_%05d.png' % (style_id, image_id), to8b(pred_t)) + imageio.imwrite(sv_path + '/style_%05d_2d_%05d.png' % (style_id, image_id), to8b(gt_rgb)) + + if args.N_samples_fine > 0: + pred_rgb_fine = np.concatenate(pred_rgb_fine, axis=0).reshape([h, w, 3]) + pred_t_fine = np.concatenate(pred_t_fine, axis=0).reshape([h, w]) + pred_t_fine = np.broadcast_to(pred_t_fine[..., np.newaxis], [h, w, 3]) + pred_t_fine = (pred_t_fine - pred_t_fine.min()) / (pred_t_fine.max() - pred_t_fine.min()) + pred_rgb_fine, pred_t_fine = np.array(pred_rgb_fine * 255, np.int32), np.array(pred_t_fine * 255, np.int32) + imageio.imwrite(sv_path + '/style_%05d_fine_%05d.png' % (style_id, image_id), to8b(pred_rgb_fine)) + imageio.imwrite(sv_path + '/style_%05d_fine_depth_%05d.png' % (style_id, image_id), to8b(pred_t_fine)) + img_count += 1 + print("Finish %d Image ..." % img_count) + iter = 0 + pred_rgb, gt_rgb, pred_t, gt_t, depth_masks = [], [], [], [], [] + pred_rgb_fine, pred_t_fine = [], [] + else: + img_count += 1 + print("Skip %d Image ..." % img_count) + iter = 0 + pred_rgb, gt_rgb, pred_t, gt_t, depth_masks = [], [], [], [], [] + pred_rgb_fine, pred_t_fine = [], [] + diff --git a/contrib/StylizedNeRF/requirements.txt b/contrib/StylizedNeRF/requirements.txt new file mode 100644 index 00000000..acf1ba7f --- /dev/null +++ b/contrib/StylizedNeRF/requirements.txt @@ -0,0 +1,17 @@ +torch +torch-vision +opencv-python +opencv-contrib-python +imageio +psutil +plyfile +pyrender +tqdm +matplotlib +scikit-image +natsort +configargparse +open3d +tensorboardX +pytorch3d +jittor diff --git a/contrib/StylizedNeRF/run_stylenerf.py b/contrib/StylizedNeRF/run_stylenerf.py new file mode 100644 index 00000000..d5b93517 --- /dev/null +++ b/contrib/StylizedNeRF/run_stylenerf.py @@ -0,0 +1,601 @@ +import time +import shutil +import VGGNet +from rendering import * + +import jittor as jt +from jittor import nn + +import torch.nn as torch_nn +from dataset import RaySampler, StyleRaySampler, StyleRaySampler_gen, LightDataLoader +from models_jt import StyleNerf, StyleMLP_Wild_multilayers, VAE, StyleLatents_variational +from train_style_modules import train_temporal_invoke, train_temporal_invoke_pl +from config import config_parser + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +jt.flags.use_cuda = 1 + + +def train(args): + + """Check Nerf Type""" + nerf_dict = {'style_nerf': StyleNerf} + nerf_type_str = '' + for nerf_type in nerf_dict.keys(): + nerf_type_str += (nerf_type + ' ') + assert args.nerf_type in nerf_dict.keys(), 'Unknown nerf type: ' + args.nerf_type + '. Only support: ' + nerf_type_str + print('Type of nerf: ', args.nerf_type) + + """Style Module Type""" + style_module_dict = {'mlp': StyleMLP_Wild_multilayers} + style_type_str = '' + for style_type in style_module_dict.keys(): + style_type_str += (style_type + ' ') + assert args.style_type in style_module_dict.keys(), 'Unknown style type: ' + args.style_type + '. Only support: ' + style_type_str + print('Type of style: ', args.style_type) + + """Latent Module Type""" + latent_module_dict = {'variational': StyleLatents_variational} + latent_type_str = '' + for latent_type in latent_module_dict.keys(): + latent_type_str += (latent_type + ' ') + assert args.latent_type in latent_module_dict.keys(), 'Unknown latent type: ' + args.latent_type + '. Only support: ' + latent_type_str + print('Type of latent: ', args.latent_type) + + """Check Sampling Type""" + samp_dict = {'uniform': sampling_pts_uniform} + samp_type_str = '' + for samp_type in samp_dict.keys(): + samp_type_str += (samp_type + ' ') + assert args.sample_type in samp_dict.keys(), 'Unknown nerf type: ' + args.sample_type + '. Only support: ' + samp_type_str + print('Sampling Strategy: ', args.sample_type) + samp_func = samp_dict[args.sample_type] + if args.N_samples_fine > 0: + samp_func_fine = sampling_pts_fine_jt + + """Saving Configuration""" + use_viewdir_str = '_UseViewDir_' if args.use_viewdir else '' + sv_path = os.path.join(args.basedir, args.expname + '_' + args.nerf_type + '_' + args.act_type + use_viewdir_str + 'ImgFactor' + str(int(args.factor))) + save_makedir(sv_path) + shutil.copy(args.config, sv_path) + nerf_gen_data_path = sv_path + '/nerf_gen_data2/' + + """Create Nerfs""" + nerf = nerf_dict[args.nerf_type] + model = nerf(args=args, mode='coarse') + model.train() + grad_vars = list(model.parameters()) + model_forward = batchify(lambda **kwargs: model(**kwargs), args.chunk) + if args.N_samples_fine > 0: + nerf_fine = nerf_dict[args.nerf_type_fine] + model_fine = nerf_fine(args=args, mode='fine') + model_fine.train() + grad_vars += list(model_fine.parameters()) + model_forward_fine = batchify(lambda **kwargs: model_fine(**kwargs), args.chunk) + optimizer = nn.Adam(params=grad_vars, lr=args.lrate, betas=(0.9, 0.999)) + + """Create Style Module""" + style = style_module_dict[args.style_type] + style_model = style(args) + style_model.train() + style_vars = style_model.parameters() + style_forward = batchify(lambda **kwargs: style_model(**kwargs), args.chunk) + style_optimizer = nn.Adam(params=style_vars, lr=args.lrate, betas=(0.9, 0.999)) + + """VGG and Decoder""" + decoder = VGGNet.decoder + vgg = VGGNet.vgg + decoder.eval() + vgg.eval() + decoder.load_state_dict(torch.load('./pretrained/decoder.pth')) + vgg.load_state_dict(torch.load('./pretrained/vgg_normalised.pth')) + vgg = torch_nn.Sequential(*list(vgg.children())[:31]) + vgg.to(device) + decoder.to(device) + + """Load Check Point""" + global_step = 0 + ckpts_path = sv_path + save_makedir(ckpts_path) + ckpts = [os.path.join(ckpts_path, f) for f in sorted(os.listdir(ckpts_path)) if 'tar' in f and 'style' not in f and 'latent' not in f] + print('Found ckpts', ckpts, ' from ', ckpts_path) + if len(ckpts) > 0 and not args.no_reload: + ckpt_path = ckpts[-1] + print('Reloading Nerf Model from ', ckpt_path) + ckpt = jt.load(ckpt_path) + global_step = ckpt['global_step'] + # Load model + model.load_state_dict(ckpt['model']) + # Load optimizer + optimizer.load_state_dict(ckpt['optimizer']) + if args.N_samples_fine > 0: + model_fine.load_state_dict((ckpt['model_fine'])) + ckpts_style = [os.path.join(ckpts_path, f) for f in sorted(os.listdir(ckpts_path)) if 'tar' in f and 'style' in f and 'latent' not in f] + if len(ckpts_style) > 0 and not args.no_reload: + ckpt_path_style = ckpts_style[-1] + print('Reloading Style Model from ', ckpt_path_style) + ckpt_style = jt.load(ckpt_path_style) + global_step = ckpt_style['global_step'] + style_model.load_state_dict(ckpt_style['model']) + style_optimizer.load_state_dict(ckpt_style['optimizer']) + + def Prepare_Style_data(nerf_gen_data_path): + """Dataset Creation""" + tmp_dataset = StyleRaySampler(data_path=args.datadir, style_path=args.styledir, factor=args.factor, + mode='valid', valid_factor=args.gen_factor, dataset_type=args.dataset_type, + white_bkgd=args.white_bkgd, half_res=args.half_res, no_ndc=args.no_ndc, + pixel_alignment=args.pixel_alignment, spherify=args.spherify, TT_far=args.TT_far) + tmp_dataloader = DataLoader(tmp_dataset, args.batch_size_style, shuffle=False, num_workers=args.num_workers, + pin_memory=(args.num_workers > 0)) + print("Preparing nerf data for style training ...") + cal_geometry(model_forward=model_forward, samp_func=samp_func, dataloader=tmp_dataloader, args=args, + device=device, + sv_path=nerf_gen_data_path, model_forward_fine=model_forward_fine, + samp_func_fine=samp_func_fine) + + """Train 2D Style""" + if not global_step + 1 < args.origin_step: + sv_name = '/decoder.pth' + is_ndc = (args.dataset_type == 'llff' and not args.no_ndc) + if not os.path.exists(sv_path + sv_name): + if not os.path.exists(nerf_gen_data_path): + Prepare_Style_data(nerf_gen_data_path=nerf_gen_data_path) + print('Training 2D Style Module') + if args.dataset_type == 'llff': + train_temporal_invoke(save_dir=sv_path, sv_name=sv_name, log_dir=sv_path + '/style_decoder/', is_ndc=is_ndc, + nerf_content_dir=nerf_gen_data_path, style_dir=args.styledir, batch_size=4) + else: + train_temporal_invoke_pl(save_dir=sv_path, sv_name=sv_name, log_dir=sv_path + '/style_decoder/', + nerf_content_dir=nerf_gen_data_path, style_dir=args.styledir, batch_size=4) + + """Dataset Creation""" + if global_step + 1 < args.origin_step and not os.path.exists(nerf_gen_data_path): + train_dataset = RaySampler(data_path=args.datadir, factor=args.factor, + mode='train', valid_factor=args.valid_factor, dataset_type=args.dataset_type, + white_bkgd=args.white_bkgd, half_res=args.half_res, no_ndc=args.no_ndc, + pixel_alignment=args.pixel_alignment, spherify=args.spherify, TT_far=args.TT_far) + else: + + if not os.path.exists(nerf_gen_data_path): + Prepare_Style_data(nerf_gen_data_path=nerf_gen_data_path) + train_dataset = StyleRaySampler_gen(data_path=args.datadir, gen_path=nerf_gen_data_path, style_path=args.styledir, + factor=args.factor, + mode='train', valid_factor=args.valid_factor, dataset_type=args.dataset_type, + white_bkgd=args.white_bkgd, half_res=args.half_res, no_ndc=args.no_ndc, + pixel_alignment=args.pixel_alignment, spherify=args.spherify, + decode_path=sv_path+'/decoder.pth', + store_rays=args.store_rays, TT_far=args.TT_far) + """VAE""" + vae = VAE(data_dim=1024, latent_dim=args.vae_latent, W=args.vae_w, D=args.vae_d, + kl_lambda=args.vae_kl_lambda) + vae.eval() + vae_ckpt = args.vae_pth_path + vae.load_state_dict(torch.load(vae_ckpt)) + + """Latents Module""" + latent_model_class = latent_module_dict[args.latent_type] + latents_model = latent_model_class(style_num=train_dataset.style_num, frame_num=train_dataset.frame_num, latent_dim=args.vae_latent) + vae.to(device) + latent_ckpts = [os.path.join(ckpts_path, f) for f in sorted(os.listdir(ckpts_path)) if 'tar' in f and 'style' not in f and 'latent' in f] + print('Found ckpts', latent_ckpts, ' from ', ckpts_path, ' For Latents Module.') + if len(latent_ckpts) > 0 and not args.no_reload: + latent_ckpt_path = latent_ckpts[-1] + print('Reloading Latent Model from ', latent_ckpt_path) + latent_ckpt = jt.load(latent_ckpt_path) + latents_model.load_state_dict(latent_ckpt['train_set']) + else: + vae.to(device) + print("Initializing Latent Model") + # Calculate and Initialize Style Latents + all_style_features = torch.from_numpy(train_dataset.style_features).float().to(device) + _, style_latents_mu, style_latents_logvar = vae.encode(all_style_features) + # all_style_latents = all_style_latents_mu + # Set Latents + latents_model.style_latents_mu = jt.Var(style_latents_mu.detach().cpu().numpy()) + latents_model.style_latents_logvar = jt.Var(style_latents_logvar.detach().cpu().numpy()) + latents_model.set_latents() + latents_model + vae.cpu() + + train_dataloader = DataLoader(train_dataset, args.batch_size, shuffle=True, num_workers=args.num_workers, + pin_memory=(args.num_workers > 0)) + + # Render valid + if args.render_valid: + render_path = os.path.join(sv_path, 'render_valid_' + str(global_step)) + valid_dataset = train_dataset + valid_dataset.mode = 'valid' + valid_dataloader = DataLoader(valid_dataset, args.batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=(args.num_workers > 0)) + with torch.no_grad(): + if args.N_samples_fine > 0: + rgb_map, t_map, rgb_map_fine, t_map_fine = render(model_forward=model_forward, samp_func=samp_func, dataloader=valid_dataloader, + args=args, device=device, sv_path=render_path, model_forward_fine=model_forward_fine, + samp_func_fine=samp_func_fine) + else: + rgb_map, t_map, _, _ = render(model_forward=model_forward, samp_func=samp_func, dataloader=valid_dataloader, + args=args, device=device, sv_path=render_path) + print('Done, saving', rgb_map.shape, t_map.shape) + exit(0) + + # Render train + if args.render_train: + render_path = os.path.join(sv_path, 'render_train_' + str(global_step)) + render_dataset = train_dataset + if args.N_samples_fine > 0: + render_train(samp_func=samp_func, model_forward=model_forward, dataset=render_dataset, args=args, device=device, sv_path=render_path, model_forward_fine=model_forward_fine, samp_func_fine=samp_func_fine) + else: + render_train(samp_func=samp_func, model_forward=model_forward, dataset=render_dataset, args=args, device=device, sv_path=render_path) + exit(0) + + # Render valid style + if args.render_valid_style: + render_path = os.path.join(sv_path, 'render_valid_' + str(global_step)) + # Enable style + model.set_enable_style(True) + if args.N_samples_fine > 0: + model_fine.set_enable_style(True) + valid_dataset = train_dataset + valid_dataset.mode = 'valid_style' + valid_dataloader = DataLoader(valid_dataset, args.batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=(args.num_workers > 0)) + with torch.no_grad(): + if args.N_samples_fine > 0: + rgb_map, t_map, rgb_map_fine, t_map_fine = render_style(model_forward=model_forward, samp_func=samp_func, style_forward=style_forward, latents_model=latents_model, + dataloader=valid_dataloader, args=args, device=device, sv_path=render_path, + model_forward_fine=model_forward_fine, samp_func_fine=samp_func_fine, sigma_scale=args.sigma_scale) + else: + rgb_map, t_map, _, _ = render_style(model_forward=model_forward, samp_func=samp_func, style_forward=style_forward, latents_model=latents_model, dataloader=valid_dataloader, + args=args, device=device, sv_path=render_path, sigma_scale=args.sigma_scale) + print('Done, saving', rgb_map.shape, t_map.shape) + return + + # Render train style + if args.render_train_style: + render_path = os.path.join(sv_path, 'render_train_' + str(global_step)) + # Enable style + model.set_enable_style(True) + if args.N_samples_fine > 0: + model_fine.set_enable_style(True) + render_dataset = train_dataset + render_dataset.mode = 'train_style' + if args.N_samples_fine > 0: + render_train_style(samp_func=samp_func, model_forward=model_forward, style_forward=style_forward, latents_model=latents_model, dataset=render_dataset, args=args, device=device, sv_path=render_path, model_forward_fine=model_forward_fine, samp_func_fine=samp_func_fine, sigma_scale=args.sigma_scale) + else: + render_train_style(samp_func=samp_func, model_forward=model_forward, style_forward=style_forward, latents_model=latents_model, dataset=render_dataset, args=args, device=device, sv_path=render_path, sigma_scale=args.sigma_scale) + return + + # Training Loop + def Origin_train(global_step): + # Elapse Measurement + data_time, model_time, opt_time = 0, 0, 0 + fine_time = 0 + while True: + for batch_idx, batch_data in enumerate(train_dataloader): + + # To Device as Tensor + for key in batch_data: + batch_data[key] = jt.array(batch_data[key].numpy()) + + # Get batch data + start_t = time.time() + rgb_gt, rays_o, rays_d = batch_data['rgb_gt'], batch_data['rays_o'], batch_data['rays_d'] + + pts, ts = samp_func(rays_o=rays_o, rays_d=rays_d, N_samples=args.N_samples, near=train_dataset.near, far=train_dataset.far, perturb=True) + ray_num, pts_num = rays_o.shape[0], args.N_samples + rays_d_forward = rays_d.unsqueeze(1).expand([ray_num, pts_num, 3]) + + # Forward and Composition + forward_t = time.time() + ret = model_forward(pts=pts, dirs=rays_d_forward) + pts_rgb, pts_sigma = ret['rgb'], ret['sigma'] + rgb_exp, t_exp, weights = alpha_composition(pts_rgb, pts_sigma, ts, args.sigma_noise_std) + + # Calculate Loss + loss_rgb = img2mse(rgb_gt, rgb_exp) + # opt_t = time.time() + loss = loss_rgb + + fine_t = time.time() + if args.N_samples_fine > 0: + pts_fine, ts_fine = samp_func_fine(rays_o, rays_d, ts, weights, args.N_samples_fine) + # print(pts_fine) + pts_num = args.N_samples + args.N_samples_fine + rays_d_forward = rays_d.unsqueeze(1).expand([ray_num, pts_num, 3]) + ret = model_forward_fine(pts=pts_fine, dirs=rays_d_forward) + pts_rgb_fine, pts_sigma_fine = ret['rgb'], ret['sigma'] + rgb_exp_fine, t_exp_fine, _ = alpha_composition(pts_rgb_fine, pts_sigma_fine, ts_fine, args.sigma_noise_std) + loss_rgb_fine = img2mse(rgb_gt, rgb_exp_fine) + loss = loss + loss_rgb_fine + + # Backward and Optimize + optimizer.step(loss) + + if global_step % args.i_print == 0: + psnr = mse2psnr(loss_rgb) + if args.N_samples_fine > 0: + psnr_fine = mse2psnr(loss_rgb_fine) + tqdm.write( + f"[ORIGIN TRAIN] Iter: {global_step} Loss: {loss.data[0]} PSNR: {psnr.data[0]} PSNR Fine: {psnr_fine.data[0]} RGB Loss: {loss_rgb.data[0]} RGB Fine Loss: {loss_rgb_fine.data[0]}" + f" Data time: {np.round(data_time, 2)}s Model time: {np.round(model_time, 2)}s Fine time: {np.round(fine_time, 2)}s Optimization time: {np.round(opt_time, 2)}s") + # tqdm.write( + # f"[ORIGIN TRAIN] Iter: {global_step} Loss: {loss_rgb.item()} PSNR: {psnr.item()} RGB Loss: {loss_rgb.item()}" + # f" Data time: {np.round(data_time, 2)}s Model time: {np.round(model_time, 2)}s Fine time: {np.round(fine_time, 2)}s Optimization time: {np.round(opt_time, 2)}s") + else: + # tqdm.write(f"[ORIGIN TRAIN] Iter: {global_step}") + tqdm.write( + f"[ORIGIN TRAIN] Iter: {global_step} Loss: {loss_rgb.item()} PSNR: {psnr.item()} RGB Loss: {loss_rgb.item()}" + f" Data time: {np.round(data_time, 2)}s Model time: {np.round(model_time, 2)}s Fine time: {np.round(fine_time, 2)}s Optimization time: {np.round(opt_time, 2)}s") + + data_time, model_time, opt_time = 0, 0, 0 + fine_time = 0 + + # Update Learning Rate + decay_rate = 0.1 + decay_steps = args.lrate_decay + new_lrate = args.lrate * (decay_rate ** (global_step / decay_steps)) + for param_group in optimizer.param_groups: + param_group['lr'] = new_lrate + + # Time Measuring + end_t = time.time() + data_time += (forward_t - start_t) + model_time += (fine_t - forward_t) + fine_time += 0 + opt_time += (end_t - fine_t) + + # Rest is logging + if global_step % args.i_weights == 0 and global_step > 0 or global_step >= args.origin_step: + path = os.path.join(ckpts_path, '{:06d}.tar'.format(global_step)) + if args.N_samples_fine > 0: + jt.save({ + 'global_step': global_step, + 'model': model.state_dict(), + 'model_fine': model_fine.state_dict(), + 'optimizer': optimizer.state_dict(), + 'style_optimizer': style_optimizer.state_dict() + }, path) + else: + jt.save({ + 'global_step': global_step, + 'model': model.state_dict(), + 'optimizer': optimizer.state_dict(), + 'style_optimizer': style_optimizer.state_dict() + }, path) + print('Saved checkpoints at', path) + + # Delete ckpts + ckpts = [os.path.join(ckpts_path, f) for f in sorted(os.listdir(ckpts_path)) if 'tar' in f] + if len(ckpts) > args.ckp_num: + os.remove(ckpts[0]) + + global_step += 1 + if global_step > args.origin_step: + return global_step + + def Style_train(global_step, train_dataset): + # Elapse Measurement + data_time, model_time, opt_time = 0, 0, 0 + fine_time = 0 + + """VGG Net""" + decoder = VGGNet.decoder + vgg = VGGNet.vgg + + decoder_data = torch.load(sv_path+'/decoder.pth') + if 'decoder' in decoder_data.keys(): + decoder.load_state_dict(decoder_data['decoder']) + else: + decoder.load_state_dict(decoder_data) + vgg.load_state_dict(torch.load(args.vgg_pth_path)) + vgg = torch_nn.Sequential(*list(vgg.children())[:31]) + style_net = VGGNet.Net(vgg, decoder) + # style_net.eval() + style_net.to(device) + # decoder_optimizer = torch.optim.Adam(decoder.parameters(), lr=1e-7) + + """Dataset Mode for Style""" + if not type(train_dataset) is StyleRaySampler_gen: + train_dataset = StyleRaySampler_gen(data_path=args.datadir, gen_path=nerf_gen_data_path, style_path=args.styledir, factor=args.factor, + mode='train', valid_factor=args.valid_factor, dataset_type=args.dataset_type, + white_bkgd=args.white_bkgd, half_res=args.half_res, no_ndc=args.no_ndc, TT_far=args.TT_far, + pixel_alignment=args.pixel_alignment, spherify=args.spherify, decode_path=sv_path+'/decoder.pth', store_rays=args.store_rays) + else: + train_dataset.collect_all_stylized_images() + train_dataset.set_mode('train_style') + train_dataloader = LightDataLoader(train_dataset, batch_size=args.batch_size_style, shuffle=True, num_workers=args.num_workers, pin_memory=(args.num_workers > 0)) + rounds_per_epoch = int(train_dataloader.data_num / train_dataloader.batch_size) + print('DataLoader Creation Done !') + + """Model Mode for Style""" + model.set_enable_style(True) + if args.N_samples_fine > 0: + model_fine.set_enable_style(True) + model.eval() + + latents_model.set_optimizer() + + patch_size = 512 # for 512 * 512 (or = 1024 for 1024 * 1024) + resample_layer = torch_nn.Upsample(size=(patch_size, patch_size), mode='bilinear', align_corners=True) + + loss_c, loss_s = torch.tensor(0.), torch.tensor(0.) + while True: + for _ in range(rounds_per_epoch): + batch_data = train_dataloader.get_batch() + # # Patch Sampling + # if global_step > args.decoder_step: + # style_id, fid, hid, wid = np.random.randint(0, train_dataset.style_num), \ + # np.random.randint(0, train_dataset.frame_num), \ + # np.random.randint(0, train_dataset.h), \ + # np.random.randint(0, train_dataset.w) + # batch_data = train_dataset.get_patch_train_style(style_id=style_id, fid=fid, hid=hid, wid=wid, patch_size=patch_size) + # content_images = torch.movedim(batch_data['rgb_origin'].to(device).float().reshape([1, patch_size, patch_size, 3]), -1, 1) + # style_images = torch.movedim(batch_data['style_image'].to(device).float(), -1, 1) + # loss_c, loss_s, stylized_content = style_net(content_images, style_images, + # return_stylized_content=True) + # loss_c, loss_s = args.content_loss_lambda * loss_c, args.style_loss_lambda * loss_s + # stylized_content = resample_layer(stylized_content) + # + # samp_idx = np.random.choice(np.arange(patch_size ** 2), [args.batch_size_style], replace=False) + # batch_data['rgb_gt'] = torch.clip(torch.movedim(stylized_content, 1, -1).reshape([-1, 3])[samp_idx].detach(), 0, 1) + # rgb_2d = torch.clip(torch.movedim(stylized_content, 1, -1).reshape([-1, 3])[samp_idx], 0, 1) + # + # samp_keys = ['rays_o', 'rays_d', 'frame_id', 'style_id', 'rgb_origin'] + # for key in samp_keys: + # batch_data[key] = batch_data[key][samp_idx] + + # To Device as Tensor + # To Device as Tensor + for key in batch_data: + batch_data[key] = jt.array(batch_data[key].numpy()) + + # Get batch data + start_t = time.time() + rgb_gt, rays_o, rays_d, rgb_origin = batch_data['rgb_gt'], batch_data['rays_o'], batch_data['rays_d'], batch_data['rgb_origin'] + style_id, frame_id = batch_data['style_id'].long(), batch_data['frame_id'].long() + + # Sample + pts, ts = samp_func(rays_o=rays_o, rays_d=rays_d, N_samples=args.N_samples, near=train_dataset.near, far=train_dataset.far, perturb=True) + ray_num, pts_num = rays_o.shape[0], args.N_samples + rays_d_forward = rays_d.unsqueeze(1).expand([ray_num, pts_num, 3]) + + # Forward + forward_t = time.time() + ret = model_forward(pts=pts, dirs=rays_d_forward) + pts_sigma, pts_embed = ret['sigma'], ret['pts'] + # Stylize + style_latents = latents_model(style_ids=style_id, frame_ids=frame_id) + style_latents_forward = style_latents.unsqueeze(1).expand([ray_num, pts_num, style_latents.shape[-1]]) + ret_style = style_forward(x=pts_embed, latent=style_latents_forward) + pts_rgb_style = ret_style['rgb'] + # Composition + rgb_exp_style, _, weights = alpha_composition(pts_rgb_style, pts_sigma, ts, args.sigma_noise_std) + # Pixel-wise Loss + loss_rgb = args.rgb_loss_lambda * img2mse(rgb_exp_style, rgb_gt) + # Latent LogP loss + logp_loss_lambda = args.logp_loss_lambda * (args.logp_loss_decay ** int((global_step - args.origin_step) / 1000)) + loss_logp = logp_loss_lambda * latents_model.minus_logp(style_ids=style_id, frame_ids=frame_id) + + fine_t = time.time() + if args.N_samples_fine > 0: + # Sample + pts_fine, ts_fine = samp_func_fine(rays_o, rays_d, ts, weights, args.N_samples_fine) + pts_num = args.N_samples + args.N_samples_fine + rays_d_forward = rays_d.unsqueeze(1).expand([ray_num, pts_num, 3]) + # Forward + ret = model_forward_fine(pts=pts_fine, dirs=rays_d_forward) + pts_sigma_fine, pts_embed_fine = ret['sigma'], ret['pts'] + # Stylize + style_latents_forward = style_latents.unsqueeze(1).expand([ray_num, pts_num, style_latents.shape[-1]]) + ret_style = style_forward(x=pts_embed_fine, latent=style_latents_forward) + pts_rgb_style_fine = ret_style['rgb'] + # Composition + rgb_exp_style_fine, _, _ = alpha_composition(pts_rgb_style_fine, pts_sigma_fine, ts_fine, args.sigma_noise_std) + loss_rgb_fine = args.rgb_loss_lambda * img2mse(rgb_exp_style_fine, rgb_gt) + loss_rgb += loss_rgb_fine + + # Loss for stylized NeRF + loss_mimic = loss_rgb + loss = loss_mimic + loss_logp + + # # Loss for 2D stylization network + # if global_step > args.decoder_step: + # loss_rgb_2d = args.rgb_loss_lambda_2d * img2mse(rgb_2d, rgb_exp_style.detach()) + # if args.N_samples_fine > 0: + # loss_rgb_2d_fine = args.rgb_loss_lambda_2d * img2mse(rgb_2d, rgb_exp_style_fine.detach()) + # loss_rgb_2d += loss_rgb_2d + # loss_mimic = loss_rgb_2d + # loss_2d = loss_mimic + loss_c + loss_s + + # Backward and Optimize + opt_t = time.time() + # if global_step > args.decoder_step: + # loss_2d.backward() + # decoder_optimizer.step() + style_optimizer.step(loss) + latents_model.optimize(loss) + + # Update Learning Rate + decay_rate = 0.1 + decay_steps = args.lrate_decay + new_lrate = args.lrate * (decay_rate ** ((global_step - args.origin_step) / decay_steps)) + for param_group in optimizer.param_groups: + param_group['lr'] = new_lrate + + # Time Measuring + end_t = time.time() + data_time += (forward_t - start_t) + model_time += (fine_t - forward_t) + fine_time += (opt_t - fine_t) + opt_time += (end_t - fine_t) + + # Rest is logging + if global_step % args.i_weights == 0 and global_step > 0: + # Saving Style module + path = os.path.join(ckpts_path, 'style_{:06d}.tar'.format(global_step)) + jt.save({ + 'global_step': global_step, + 'model': style_model.state_dict(), + 'optimizer': style_optimizer.state_dict() + }, path) + print('Saved checkpoints at', path) + # Delete ckpts + ckpts = [os.path.join(ckpts_path, f) for f in sorted(os.listdir(ckpts_path)) if 'tar' in f and 'style' in f and 'latent' not in f] + if len(ckpts) > args.ckp_num: + os.remove(ckpts[0]) + + # Saving Latent Model + path = os.path.join(ckpts_path, 'latent_{:06d}.tar'.format(global_step)) + jt.save({ + 'global_step': global_step, + 'train_set': latents_model.state_dict() + }, path) + print('Saved checkpoints at', path) + # Delete ckpts + ckpts = [os.path.join(ckpts_path, f) for f in sorted(os.listdir(ckpts_path)) if 'tar' in f and 'style' not in f and 'latent' in f] + if len(ckpts) > args.ckp_num: + os.remove(ckpts[0]) + + # # Saving 2D stylization method + # state_dict = style_net.decoder.state_dict() + # for key in state_dict.keys(): + # state_dict[key] = state_dict[key] + # sv_dict = {'decoder': state_dict, 'step': global_step} + # torch.save(sv_dict, ckpts_path + '/decoder.pth') + + if global_step % args.i_print == 0: + tqdm.write(f"[STYLE TRAIN] Iter: {global_step} Loss: {loss.item()} Pixel RGB Loss: {loss_rgb.item()} -Log(p) Loss: {loss_logp.item()} Loss Content: {loss_c.item()} Loss Style: {loss_s.item()}" + f" Data time: {np.round(data_time, 2)}s Model time: {np.round(model_time, 2)}s Fine time: {np.round(fine_time, 2)}s Optimization time: {np.round(opt_time, 2)}s") + data_time, model_time, opt_time = 0, 0, 0 + fine_time = 0 + + global_step += 1 + if global_step > args.total_step: + return global_step + + if global_step + 1 < args.origin_step: + print('Global Step: ', global_step, ' Origin Step: ', args.origin_step) + print('Origin Train') + Origin_train(global_step) + else: + sv_name = '/decoder.pth' + is_ndc = (args.dataset_type == 'llff' and not args.no_ndc) + if not os.path.exists(sv_path + sv_name): + if args.dataset_type == 'llff': + train_temporal_invoke(save_dir=sv_path, sv_name=sv_name, log_dir=sv_path + '/style_decoder/', is_ndc=is_ndc, + nerf_content_dir=nerf_gen_data_path, style_dir=args.styledir, batch_size=4) + else: + train_temporal_invoke_pl(save_dir=sv_path, sv_name=sv_name, log_dir=sv_path + '/style_decoder/', + nerf_content_dir=nerf_gen_data_path, style_dir=args.styledir, batch_size=4) + + if not os.path.exists(nerf_gen_data_path): + Prepare_Style_data(nerf_gen_data_path=nerf_gen_data_path) + + Style_train(global_step, train_dataset) + exit(0) + return + + +if __name__ == '__main__': + args = config_parser() + while True: + train(args=args) diff --git a/contrib/StylizedNeRF/teaser.png b/contrib/StylizedNeRF/teaser.png new file mode 100644 index 00000000..254fc5a4 Binary files /dev/null and b/contrib/StylizedNeRF/teaser.png differ diff --git a/contrib/StylizedNeRF/train_style_modules.py b/contrib/StylizedNeRF/train_style_modules.py new file mode 100644 index 00000000..139dff81 --- /dev/null +++ b/contrib/StylizedNeRF/train_style_modules.py @@ -0,0 +1,809 @@ +import os +import torch +import shutil +import VGGNet +import argparse +import numpy as np +from tqdm import tqdm +import torch.nn as nn +from models_jt import VAE +from pathlib import Path +from models_jt import Camera +import torch.utils.data as data +from PIL import Image, ImageFile +from torchvision import transforms +import torch.backends.cudnn as cudnn +from tensorboardX import SummaryWriter +from plyfile import PlyElement, PlyData +from Style_function import calc_mean_std + +# from pytorch3d.structures import Pointclouds +# from pytorch3d.renderer import compositing +# from pytorch3d.renderer.points import rasterize_points + +# cudnn.benchmark = True +# Image.MAX_IMAGE_PIXELS = None # Disable DecompressionBombError +# # Disable OSError: image file is truncated +# ImageFile.LOAD_TRUNCATED_IMAGES = True + + +def InfiniteSampler(n): + # i = 0 + i = n - 1 + order = np.random.permutation(n) + while True: + yield order[i] + i += 1 + if i >= n: + np.random.seed() + order = np.random.permutation(n) + i = 0 + + +class InfiniteSamplerWrapper(data.sampler.Sampler): + def __init__(self, data_source): + # super(InfiniteSamplerWrapper, self).__init__() + self.num_samples = len(data_source) + + def __iter__(self): + return iter(InfiniteSampler(self.num_samples)) + + def __len__(self): + return 2 ** 31 + + +def train_transform(): + transform_list = [ + transforms.Resize(size=(512, 512)), + transforms.RandomCrop(256), + transforms.ToTensor() + ] + return transforms.Compose(transform_list) + + +def train_transform2(): + transform_list = [ + transforms.Resize(size=(512, 512)), + transforms.ToTensor() + ] + return transforms.Compose(transform_list) + + +def default_transform(): + transform_list = [ + transforms.ToTensor() + ] + return transforms.Compose(transform_list) + + +class FlatFolderDataset(data.Dataset): + def __init__(self, root, transform=None): + super(FlatFolderDataset, self).__init__() + self.root = root + self.paths = list(Path(self.root).glob('*')) + transform = default_transform() if transform is None else transform + self.transform = transform + + def __getitem__(self, index): + path = self.paths[index] + img = Image.open(str(path)).convert('RGB') + img = self.transform(img) + return img + + def __len__(self): + return len(self.paths) + + def name(self): + return 'FlatFolderDataset' + + +class CoorImageDataset(data.Dataset): + def __init__(self, root): + super(CoorImageDataset, self).__init__() + self.root = root + self.image_paths = sorted(list(Path(self.root).glob('rgb_*.png'))) + self.geo_paths = sorted(list(Path(self.root).glob('geometry_*.npz'))) + data = np.load(str(self.geo_paths[0])) + self.hwf = data['hwf'] + # self.near, self.far = data['near'], data['far'] + self.near, self.far = 0., 1. + self.transform = default_transform() + + def __getitem__(self, index): + image_path, geo_path = self.image_paths[index], self.geo_paths[index] + img = Image.open(str(image_path)).convert('RGB') + img = self.transform(img) + geo = np.load(str(geo_path)) + coor_map, cps = geo['coor_map'], geo['cps'] + return img, coor_map, cps + + def __len__(self): + return len(self.image_paths) + + def name(self): + return 'FlatFolderDataset' + + +class CoorImageDataset_pl(data.Dataset): + def __init__(self, root, factor=0.01): + super(CoorImageDataset_pl, self).__init__() + self.root = root + self.image_paths = sorted(list(Path(self.root).glob('rgb_*.png'))) + self.geo_paths = sorted(list(Path(self.root).glob('geometry_*.npz'))) + data = np.load(str(self.geo_paths[0])) + self.hwf = data['hwf'] + # self.near, self.far = data['near'], data['far'] + self.near, self.far = 0., 1. + self.factor = factor + self.transform = default_transform() + + ts = np.zeros([len(self.geo_paths), 3], dtype=np.float32) + for i in range(len(self.geo_paths)): + ts[i] = np.load(str(self.geo_paths[i]))['cps'][:3, 3] + + dist = ts[np.newaxis] - ts[:, np.newaxis] + dist = dist ** 2 + dist = dist.sum(-1) ** 0.5 + self.dist = dist + + def get_batch(self, batch_size, index=None): + if index is None: + index = np.random.randint(0, len(self.image_paths)) + dists = self.dist[index] + inds = np.argsort(dists) + prange = max(int(self.factor*len(self.image_paths)), batch_size) + inds = inds[:prange] + inds = np.random.choice(inds, [batch_size], replace=(prange <= batch_size)) + imgs, coor_maps, cps = [], [], [] + for i in range(batch_size): + img, coor_map, cp = self.__getitem__(inds[i]) + imgs.append(img) + coor_maps.append(coor_map) + cps.append(cp) + imgs = torch.stack(imgs).float() + coor_maps = torch.from_numpy(np.stack(coor_maps)).float() + cps = torch.from_numpy(np.stack(cps)).float() + return imgs, coor_maps, cps + + def __getitem__(self, index): + image_path, geo_path = self.image_paths[index], self.geo_paths[index] + img = Image.open(str(image_path)).convert('RGB') + img = self.transform(img) + geo = np.load(str(geo_path)) + coor_map, cps = geo['coor_map'], geo['cps'] + return img, coor_map, cps + + def __len__(self): + return len(self.image_paths) + + def name(self): + return 'FlatFolderDataset' + + +def adjust_learning_rate(optimizer, iteration_count): + """Imitating the original implementation""" + lr = args.lr / (1.0 + args.lr_decay * iteration_count) + for param_group in optimizer.param_groups: + param_group['lr'] = lr + + +def finetune_decoder(args): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + save_dir = Path(args.save_dir) + save_dir.mkdir(exist_ok=True, parents=True) + log_dir = Path(args.log_dir) + log_dir.mkdir(exist_ok=True, parents=True) + writer = SummaryWriter(log_dir=str(log_dir)) + + decoder = VGGNet.decoder + vgg = VGGNet.vgg + + decoder.load_state_dict(torch.load('./models/decoder.pth')) + vgg.load_state_dict(torch.load('./models/vgg_normalised.pth')) + + vgg.load_state_dict(torch.load(args.vgg)) + vgg = nn.Sequential(*list(vgg.children())[:31]) + network = VGGNet.Net(vgg, decoder) + network.train() + network.to(device) + + content_tf = train_transform() + style_tf = train_transform() + + content_dataset = FlatFolderDataset(args.content_dir, content_tf) + style_dataset = FlatFolderDataset(args.style_dir, style_tf) + + content_iter = iter(data.DataLoader( + content_dataset, batch_size=args.batch_size, + sampler=InfiniteSamplerWrapper(content_dataset), + num_workers=args.n_threads)) + style_iter = iter(data.DataLoader( + style_dataset, batch_size=args.batch_size, + sampler=InfiniteSamplerWrapper(style_dataset), + num_workers=args.n_threads)) + + optimizer = torch.optim.Adam(network.decoder.parameters(), lr=args.lr) + + for i in tqdm(range(args.max_iter)): + adjust_learning_rate(optimizer, iteration_count=i) + content_images = next(content_iter).to(device) + style_images = next(style_iter).to(device) + loss_c, loss_s = network(content_images, style_images) + loss_c = args.content_weight * loss_c + loss_s = args.style_weight * loss_s + loss = loss_c + loss_s + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + writer.add_scalar('loss_content', loss_c.item(), i + 1) + writer.add_scalar('loss_style', loss_s.item(), i + 1) + + if (i + 1) % args.save_model_interval == 0 or (i + 1) == args.max_iter: + state_dict = network.decoder.state_dict() + for key in state_dict.keys(): + state_dict[key] = state_dict[key].to(torch.device('cpu')) + torch.save(state_dict, save_dir / + 'decoder_iter_{:d}.pth.tar'.format(i + 1)) + writer.close() + + +def train_vae(args): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + save_dir = Path(args.save_dir) + save_dir.mkdir(exist_ok=True, parents=True) + log_dir = Path(args.log_dir) + log_dir.mkdir(exist_ok=True, parents=True) + writer = SummaryWriter(log_dir=str(log_dir)) + + vgg = VGGNet.vgg + vgg.load_state_dict(torch.load('./pretrained/vgg_normalised.pth')) + vgg.load_state_dict(torch.load(args.vgg)) + vgg = nn.Sequential(*list(vgg.children())[:31]) + vgg.eval() + vgg.to(device) + + style_tf = train_transform() + style_dataset = FlatFolderDataset(args.style_dir, style_tf) + style_iter = iter(data.DataLoader( + style_dataset, batch_size=args.batch_size, + sampler=InfiniteSamplerWrapper(style_dataset), + num_workers=args.n_threads)) + + vae = VAE(data_dim=1024, latent_dim=args.vae_latent, W=args.vae_w, D=args.vae_d, kl_lambda=args.vae_kl_lambda) + vae.train() + vae.to(device) + vae_ckpt = './pretrained/vae.pth' + if os.path.exists(vae_ckpt): + vae.load_state_dict(torch.load(vae_ckpt)) + optimizer = torch.optim.Adam(vae.parameters(), lr=args.lr) + + for i in tqdm(range(args.max_iter)): + adjust_learning_rate(optimizer, iteration_count=i) + style_images = next(style_iter).to(device) + style_features = vgg(style_images) + style_mean, style_std = calc_mean_std(style_features) + style_features = torch.cat([style_mean.squeeze(), style_std.squeeze()], dim=-1) + recon, _, mu, logvar = vae(style_features) + loss, recon_loss, kl_loss = vae.loss(style_features, recon, mu, logvar, return_losses=True) + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + writer.add_scalar('Reconstruction Loss', recon_loss.item(), i + 1) + writer.add_scalar('KL Loss', kl_loss.item(), i + 1) + + if (i + 1) % 100 == 0: + print("Loss: %.3f | Recon Loss: %.3f| KL Loss: %.3f" % (loss.item(), recon_loss.item(), kl_loss.item())) + + if (i + 1) % args.save_model_interval == 0 or (i + 1) == args.max_iter: + state_dict = vae.state_dict() + for key in state_dict.keys(): + state_dict[key] = state_dict[key].to(torch.device('cpu')) + torch.save(state_dict, vae_ckpt) + writer.close() + + +def train_temporal_invoke(save_dir, sv_name, log_dir, is_ndc, nerf_content_dir, style_dir, batch_size, n_threads=8, lr=1e-3, max_iter=1000): + if is_ndc: + print("Using NDC Coordinate System! Check Nerf and dataset to be LLFF !!!!!!!") + temporal_weight, content_weight, style_weight = 50., 1.0, 1. + else: + temporal_weight, content_weight, style_weight = 50., 1.0, 1. + + print_interval = 20 + save_model_interval = 200 + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + save_dir = Path(save_dir) + save_dir.mkdir(exist_ok=True, parents=True) + log_dir = Path(log_dir) + log_dir.mkdir(exist_ok=True, parents=True) + writer = SummaryWriter(log_dir=str(log_dir)) + save_dir, log_dir = str(save_dir), str(log_dir) + + decoder = VGGNet.decoder + vgg = VGGNet.vgg + + ckpts = [os.path.join(save_dir, f) for f in sorted(os.listdir(save_dir)) if sv_name in f] + if len(ckpts) > 0: + ld_dict = torch.load(ckpts[-1]) + decoder.load_state_dict(ld_dict['decoder']) + step = ld_dict['step'] + else: + print('From original pth file') + decoder.load_state_dict(torch.load('./pretrained/decoder.pth')) + shutil.copy('./pretrained/decoder.pth', save_dir + '/' + sv_name) + step = 0 + vgg.load_state_dict(torch.load('./pretrained/vgg_normalised.pth')) + vgg = nn.Sequential(*list(vgg.children())[:31]) + network = VGGNet.Net(vgg, decoder) + network.train() + network.to(device) + + style_tf = train_transform2() + + content_dataset = CoorImageDataset(nerf_content_dir) + style_dataset = FlatFolderDataset(style_dir, style_tf) + + # Camera for Rendering + h, w, focal = content_dataset.hwf + h, w = int(h), int(w) + cx, cy = w/2, h/2 + near_prj, far_prj = 1e-3, 1e5 + projectionMatrix = np.array([[-2*focal/w, 0, 1-2*cx/w, 0], + [0, 2*focal/h, 2*cy/h-1, 0], + [0, 0, -(far_prj+near_prj)/(far_prj-near_prj), -2*far_prj*near_prj/(far_prj-near_prj)], + [0, 0, -1, 0]]) + camera = Camera(projectionMatrix=projectionMatrix) + camera.to(device) + + content_iter = iter(data.DataLoader( + content_dataset, batch_size=batch_size, + sampler=InfiniteSamplerWrapper(content_dataset), + num_workers=n_threads)) + style_iter = iter(data.DataLoader( + style_dataset, batch_size=1, + sampler=InfiniteSamplerWrapper(style_dataset), + num_workers=n_threads)) + + optimizer = torch.optim.Adam(network.decoder.parameters(), lr=lr) + + space_dist_threshold = 5e-2 + + def adjust_learning_rate_local(optimizer, iteration_count): + """Imitating the original implementation""" + lr = 1e-4 / (1.0 + 5e-5 * iteration_count) + for param_group in optimizer.param_groups: + param_group['lr'] = lr + + for i in tqdm(range(step, max_iter)): + # Sampling Patch + patch_size = 512 + if patch_size > 0: + patch_h_min, patch_w_min = np.random.randint(0, h - patch_size), np.random.randint(0, w - patch_size) + patch_h_max, patch_w_max = patch_h_min + patch_size, patch_w_min + patch_size + else: + patch_h_min, patch_w_min = 0, 0 + patch_h_max, patch_w_max = h, w + + resample_layer = nn.Upsample(size=(int(patch_h_max - patch_h_min), int(patch_w_max - patch_w_min)), + mode='bilinear', align_corners=True) + + adjust_learning_rate_local(optimizer, iteration_count=i) + content_images, coor_maps, cps = next(content_iter) + content_images, coor_maps, cps = content_images[..., patch_h_min: patch_h_max, patch_w_min: patch_w_max].to(device),\ + coor_maps[:, patch_h_min: patch_h_max, patch_w_min: patch_w_max].to(device),\ + cps.to(device) + if is_ndc: + coor_maps = ndc2world(coor_maps, h, w, focal) + + # The same style image + style_images = next(style_iter).to(device) + style_images = style_images[:1].expand([batch_size, * style_images.shape[1:]]) + + loss_c, loss_s, stylized_content = network(content_images, style_images, return_stylized_content=True) + stylized_content = resample_layer(stylized_content) + + # Set camera pose + camera.set(cameraPose=cps) + pcl_coor_world0 = coor_maps[0].reshape([-1, 3]) + pcl_rgb0 = torch.movedim(stylized_content[0], 0, -1).reshape([-1, 3]) + + warped_stylized_content0, warped_coor_map0, warped_msks = camera.rasterize(pcl_coor_world0, pcl_rgb0, h=h, w=w) + warped_stylized_content0, warped_coor_map0, warped_msks = warped_stylized_content0[:, patch_h_min: patch_h_max, patch_w_min: patch_w_max],\ + warped_coor_map0[:, patch_h_min: patch_h_max, patch_w_min: patch_w_max],\ + warped_msks[:, patch_h_min: patch_h_max, patch_w_min: patch_w_max] + coor_dist_msk = (((warped_coor_map0 - coor_maps) ** 2).sum(-1, keepdim=True) < space_dist_threshold ** 2).float() + + loss_t = (((torch.movedim(stylized_content, 1, -1) - warped_stylized_content0) ** 2) * warped_msks * coor_dist_msk).mean() + loss_t = temporal_weight * loss_t + + loss_c = content_weight * loss_c + loss_s = style_weight * loss_s + loss = loss_c + loss_s + loss_t + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + writer.add_scalar('loss_content', loss_c.item(), i + 1) + writer.add_scalar('loss_style', loss_s.item(), i + 1) + writer.add_scalar('loss_temporal', loss_t.item(), i + 1) + + if (i + 1) % print_interval == 0: + print('Iter %d Content Loss: %.3f Style Loss: %.3f Temporal Loss: %.3f' % (i, loss_c.item(), loss_s.item(), loss_t.item())) + + if i == 0 or (i + 1) % save_model_interval == 0 or (i + 1) == max_iter: + state_dict = network.decoder.state_dict() + for key in state_dict.keys(): + state_dict[key] = state_dict[key].to(torch.device('cpu')) + sv_dict = {'decoder': state_dict, 'step': (i+1)} + torch.save(sv_dict, save_dir + '/' + sv_name) + + warped_stylized_content0 = torch.clamp(warped_stylized_content0, 0, 1).detach().cpu().numpy() + coor_dist_msk = np.broadcast_to(coor_dist_msk.detach().cpu().numpy(), [*coor_dist_msk.shape[:-1], 3]) + warped_msks = np.broadcast_to(warped_msks.detach().cpu().numpy(), [*warped_msks.shape[:-1], 3]) + stylized_content = torch.movedim(torch.clamp(stylized_content, 0., 1.), 1, -1).detach().cpu().numpy() + for i in range(warped_stylized_content0.shape[0]): + Image.fromarray(np.uint8(255 * warped_stylized_content0[i])).save(log_dir + '/warped_stylized_content_%03d.png' % i) + Image.fromarray(np.uint8(255 * stylized_content[i])).save(log_dir + '/stylized_content_%03d.png' % i) + Image.fromarray(np.uint8(255 * coor_dist_msk[i])).save(log_dir + '/coor_dist_msk_%03d.png' % i) + Image.fromarray(np.uint8(255 * warped_msks[i])).save(log_dir + '/warped_mask_%03d.png' % i) + Image.fromarray(np.uint8(255*torch.movedim(style_images[0], 0, -1).detach().cpu().numpy())).save(log_dir + '/style_image.png') + writer.close() + + +def train_temporal_invoke_pl(save_dir, sv_name, log_dir, nerf_content_dir, style_dir, batch_size, n_threads=8, lr=1e-3, max_iter=5000): + + temporal_weight, content_weight, style_weight = 100., 1.0, 1. + print_interval = 20 + save_model_interval = 200 + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + save_dir = Path(save_dir) + save_dir.mkdir(exist_ok=True, parents=True) + log_dir = Path(log_dir) + log_dir.mkdir(exist_ok=True, parents=True) + writer = SummaryWriter(log_dir=str(log_dir)) + save_dir, log_dir = str(save_dir), str(log_dir) + + decoder = VGGNet.decoder + vgg = VGGNet.vgg + + ckpts = [os.path.join(save_dir, f) for f in sorted(os.listdir(save_dir)) if sv_name in f] + if len(ckpts) > 0: + ld_dict = torch.load(ckpts[-1]) + decoder.load_state_dict(ld_dict['decoder']) + step = ld_dict['step'] + else: + print('From original pth file') + decoder.load_state_dict(torch.load('./pretrained/decoder.pth')) + shutil.copy('./pretrained/decoder.pth', save_dir + '/' + sv_name) + step = 0 + vgg.load_state_dict(torch.load('./pretrained/vgg_normalised.pth')) + vgg = nn.Sequential(*list(vgg.children())[:31]) + network = VGGNet.Net(vgg, decoder) + network.train() + network.to(device) + + style_tf = train_transform2() + + content_dataset = CoorImageDataset_pl(nerf_content_dir) + style_dataset = FlatFolderDataset(style_dir, style_tf) + + # Camera for Rendering + h, w, focal = content_dataset.hwf + h, w = int(h), int(w) + cx, cy = w/2, h/2 + near_prj, far_prj = 1e-3, 1e5 + projectionMatrix = np.array([[-2*focal/w, 0, 1-2*cx/w, 0], + [0, 2*focal/h, 2*cy/h-1, 0], + [0, 0, -(far_prj+near_prj)/(far_prj-near_prj), -2*far_prj*near_prj/(far_prj-near_prj)], + [0, 0, -1, 0]]) + camera = Camera(projectionMatrix=projectionMatrix) + camera.to(device) + + style_iter = iter(data.DataLoader( + style_dataset, batch_size=1, + sampler=InfiniteSamplerWrapper(style_dataset), + num_workers=n_threads)) + + optimizer = torch.optim.Adam(network.decoder.parameters(), lr=lr) + + space_dist_threshold = 5e-2 + + def adjust_learning_rate_local(optimizer, iteration_count): + """Imitating the original implementation""" + lr = 1e-4 / (1.0 + 5e-5 * iteration_count) + for param_group in optimizer.param_groups: + param_group['lr'] = lr + + for i in tqdm(range(step, max_iter)): + # Sampling Patch + patch_size = 512 + if patch_size > 0: + patch_h_min, patch_w_min = np.random.randint(0, h - patch_size), np.random.randint(0, w - patch_size) + patch_h_max, patch_w_max = patch_h_min + patch_size, patch_w_min + patch_size + else: + patch_h_min, patch_w_min = 0, 0 + patch_h_max, patch_w_max = h, w + + resample_layer = nn.Upsample(size=(int(patch_h_max - patch_h_min), int(patch_w_max - patch_w_min)), + mode='bilinear', align_corners=True) + + adjust_learning_rate_local(optimizer, iteration_count=i) + content_images, coor_maps, cps = content_dataset.get_batch(batch_size=batch_size) + content_images, coor_maps, cps = content_images[..., patch_h_min: patch_h_max, patch_w_min: patch_w_max].to(device),\ + coor_maps[:, patch_h_min: patch_h_max, patch_w_min: patch_w_max].to(device),\ + cps.to(device) + # The same style image + style_images = next(style_iter).to(device) + style_images = style_images[:1].expand([batch_size, * style_images.shape[1:]]) + + loss_c, loss_s, stylized_content = network(content_images, style_images, return_stylized_content=True) + stylized_content = resample_layer(stylized_content) + + # Set camera pose + camera.set(cameraPose=cps) + pcl_coor_world0 = coor_maps[0].reshape([-1, 3]) + pcl_rgb0 = torch.movedim(stylized_content[0], 0, -1).reshape([-1, 3]) + + warped_stylized_content0, warped_coor_map0, warped_msks = camera.rasterize(pcl_coor_world0, pcl_rgb0, h=h, w=w) + warped_stylized_content0, warped_coor_map0, warped_msks = warped_stylized_content0[:, patch_h_min: patch_h_max, patch_w_min: patch_w_max],\ + warped_coor_map0[:, patch_h_min: patch_h_max, patch_w_min: patch_w_max],\ + warped_msks[:, patch_h_min: patch_h_max, patch_w_min: patch_w_max] + coor_dist_msk = (((warped_coor_map0 - coor_maps) ** 2).sum(-1, keepdim=True) < space_dist_threshold ** 2).float() + + loss_t = (((torch.movedim(stylized_content, 1, -1) - warped_stylized_content0) ** 2) * warped_msks * coor_dist_msk).mean() + loss_t = temporal_weight * loss_t + + loss_c = content_weight * loss_c + loss_s = style_weight * loss_s + loss = loss_c + loss_s + loss_t + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + writer.add_scalar('loss_content', loss_c.item(), i + 1) + writer.add_scalar('loss_style', loss_s.item(), i + 1) + writer.add_scalar('loss_temporal', loss_t.item(), i + 1) + + if (i + 1) % print_interval == 0: + print('Iter %d Content Loss: %.3f Style Loss: %.3f Temporal Loss: %.3f' % (i, loss_c.item(), loss_s.item(), loss_t.item())) + + if i == 0 or (i + 1) % save_model_interval == 0 or (i + 1) == max_iter: + state_dict = network.decoder.state_dict() + for key in state_dict.keys(): + state_dict[key] = state_dict[key].to(torch.device('cpu')) + sv_dict = {'decoder': state_dict, 'step': (i+1)} + torch.save(sv_dict, save_dir + '/' + sv_name) + + warped_stylized_content0 = torch.clamp(warped_stylized_content0, 0, 1).detach().cpu().numpy() + coor_dist_msk = np.broadcast_to(coor_dist_msk.detach().cpu().numpy(), [*coor_dist_msk.shape[:-1], 3]) + warped_msks = np.broadcast_to(warped_msks.detach().cpu().numpy(), [*warped_msks.shape[:-1], 3]) + stylized_content = torch.movedim(torch.clamp(stylized_content, 0., 1.), 1, -1).detach().cpu().numpy() + for i in range(warped_stylized_content0.shape[0]): + Image.fromarray(np.uint8(255 * warped_stylized_content0[i])).save(log_dir + '/warped_stylized_content_%03d.png' % i) + Image.fromarray(np.uint8(255 * stylized_content[i])).save(log_dir + '/stylized_content_%03d.png' % i) + Image.fromarray(np.uint8(255 * coor_dist_msk[i])).save(log_dir + '/coor_dist_msk_%03d.png' % i) + Image.fromarray(np.uint8(255 * warped_msks[i])).save(log_dir + '/warped_mask_%03d.png' % i) + Image.fromarray(np.uint8(255*torch.movedim(style_images[0], 0, -1).detach().cpu().numpy())).save(log_dir + '/style_image.png') + + writer.close() + + +def write_ply_rgb(points, RGB, filename): + """ Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as PLY file """ + N = points.shape[0] + vertex = [] + for i in range(N): + vertex.append((points[i, 0], points[i, 1], points[i, 2], RGB[i][0], RGB[i][1], RGB[i][2])) + vertex = np.array(vertex, + dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]) + + el = PlyElement.describe(vertex, 'vertex', comments=['vertices']) + PlyData([el], text=True).write(filename) + + +def ndc2world(coor_ndc, h, w, focal): + z = 2 / (coor_ndc[..., -1] - 1) + x = - w / 2 / focal * z * coor_ndc[..., 0] + y = - h / 2 / focal * z * coor_ndc[..., 1] + coor_world = torch.stack([x, y, z], dim=-1) + return coor_world + + +def train_temporal_decoder(args): + if not args.no_ndc: + print("Using NDC Coordinate System! Check Nerf and dataset to be LLFF !!!!!!!") + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + save_dir = Path(args.save_dir) + save_dir.mkdir(exist_ok=True, parents=True) + log_dir = Path(args.log_dir) + log_dir.mkdir(exist_ok=True, parents=True) + writer = SummaryWriter(log_dir=str(log_dir)) + + decoder = VGGNet.decoder + vgg = VGGNet.vgg + + ckpts = [os.path.join(save_dir, f) for f in sorted(os.listdir(save_dir)) if 'decoder_iter_' in f] + if len(ckpts) > 0 and not args.no_reload: + ld_dict = torch.load(ckpts[-1]) + decoder.load_state_dict(ld_dict['decoder']) + step = ld_dict['step'] + else: + print('From original pth file') + decoder.load_state_dict(torch.load('./pretrained/decoder.pth')) + step = 0 + vgg.load_state_dict(torch.load('./pretrained/vgg_normalised.pth')) + + vgg.load_state_dict(torch.load(args.vgg)) + vgg = nn.Sequential(*list(vgg.children())[:31]) + network = VGGNet.Net(vgg, decoder) + network.train() + network.to(device) + + style_tf = train_transform2() + + content_dataset = CoorImageDataset(args.nerf_content_dir) + style_dataset = FlatFolderDataset(args.style_dir, style_tf) + + # Camera for Rendering + h, w, focal = content_dataset.hwf + h, w = int(h), int(w) + cx, cy = w/2, h/2 + near_prj, far_prj = 1e-3, 1e5 + projectionMatrix = np.array([[-2*focal/w, 0, 1-2*cx/w, 0], + [0, 2*focal/h, 2*cy/h-1, 0], + [0, 0, -(far_prj+near_prj)/(far_prj-near_prj), -2*far_prj*near_prj/(far_prj-near_prj)], + [0, 0, -1, 0]]) + camera = Camera(projectionMatrix=projectionMatrix) + camera.to(device) + + content_iter = iter(data.DataLoader( + content_dataset, batch_size=args.batch_size, + sampler=InfiniteSamplerWrapper(content_dataset), + num_workers=args.n_threads)) + style_iter = iter(data.DataLoader( + style_dataset, batch_size=1, + sampler=InfiniteSamplerWrapper(style_dataset), + num_workers=args.n_threads)) + + # Sampling Patch + patch_size = 512 + if patch_size > 0: + patch_h_min, patch_w_min = np.random.randint(0, h-patch_size), np.random.randint(0, w-patch_size) + patch_h_max, patch_w_max = patch_h_min + patch_size, patch_w_min + patch_size + else: + patch_h_min, patch_w_min = 0, 0 + patch_h_max, patch_w_max = h, w + + resample_layer = nn.Upsample(size=(int(patch_h_max - patch_h_min), int(patch_w_max - patch_w_min)), mode='bilinear', align_corners=True) + optimizer = torch.optim.Adam(network.decoder.parameters(), lr=args.lr) + + space_dist_threshold = 5e-2 + + for i in tqdm(range(step, args.max_iter)): + + adjust_learning_rate(optimizer, iteration_count=i) + content_images, coor_maps, cps = next(content_iter) + content_images, coor_maps, cps = content_images[..., patch_h_min: patch_h_max, patch_w_min: patch_w_max].to(device),\ + coor_maps[:, patch_h_min: patch_h_max, patch_w_min: patch_w_max].to(device),\ + cps.to(device) + if not args.no_ndc: + coor_maps = ndc2world(coor_maps, h, w, focal) + + # The same style image + style_images = next(style_iter).to(device) + style_images = style_images[:1].expand([args.batch_size, * style_images.shape[1:]]) + + loss_c, loss_s, stylized_content = network(content_images, style_images, return_stylized_content=True) + stylized_content = resample_layer(stylized_content) + + # Set camera pose + camera.set(cameraPose=cps) + pcl_coor_world0 = coor_maps[0].reshape([-1, 3]) + pcl_rgb0 = torch.movedim(stylized_content[0], 0, -1).reshape([-1, 3]) + + warped_stylized_content0, warped_coor_map0, warped_msks = camera.rasterize(pcl_coor_world0, pcl_rgb0, h=h, w=w) + warped_stylized_content0, warped_coor_map0, warped_msks = warped_stylized_content0[:, patch_h_min: patch_h_max, patch_w_min: patch_w_max],\ + warped_coor_map0[:, patch_h_min: patch_h_max, patch_w_min: patch_w_max],\ + warped_msks[:, patch_h_min: patch_h_max, patch_w_min: patch_w_max] + coor_dist_msk = (((warped_coor_map0 - coor_maps) ** 2).sum(-1, keepdim=True) < space_dist_threshold ** 2).float() + + loss_t = (((torch.movedim(stylized_content, 1, -1) - warped_stylized_content0) ** 2) * warped_msks * coor_dist_msk).mean() + loss_t = args.temporal_weight * loss_t + + loss_c = args.content_weight * loss_c + loss_s = args.style_weight * loss_s + loss = loss_c + loss_s + loss_t + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + writer.add_scalar('loss_content', loss_c.item(), i + 1) + writer.add_scalar('loss_style', loss_s.item(), i + 1) + writer.add_scalar('loss_temporal', loss_t.item(), i + 1) + + if (i + 1) % args.print_interval == 0: + print('Iter %d Content Loss: %.3f Style Loss: %.3f Temporal Loss: %.3f' % (i, loss_c.item(), loss_s.item(), loss_t.item())) + + if i == 0 or (i + 1) % args.save_model_interval == 0 or (i + 1) == args.max_iter: + state_dict = network.decoder.state_dict() + for key in state_dict.keys(): + state_dict[key] = state_dict[key].to(torch.device('cpu')) + sv_dict = {'decoder': state_dict, 'step': (i+1)} + torch.save(sv_dict, save_dir / + 'decoder_iter_{:d}.pth.tar'.format(i + 1)) + # Delete ckpts + ckpts = [os.path.join(save_dir, f) for f in sorted(os.listdir(save_dir)) if 'decoder_iter_' in f] + if len(ckpts) > args.ckp_num: + os.remove(ckpts[0]) + + warped_stylized_content0 = torch.clamp(warped_stylized_content0, 0, 1).detach().cpu().numpy() + coor_dist_msk = np.broadcast_to(coor_dist_msk.detach().cpu().numpy(), [*coor_dist_msk.shape[:-1], 3]) + warped_msks = np.broadcast_to(warped_msks.detach().cpu().numpy(), [*warped_msks.shape[:-1], 3]) + stylized_content = torch.movedim(torch.clamp(stylized_content, 0., 1.), 1, -1).detach().cpu().numpy() + for i in range(warped_stylized_content0.shape[0]): + Image.fromarray(np.uint8(255 * warped_stylized_content0[i])).save(args.log_dir + '/warped_stylized_content_%03d.png' % i) + Image.fromarray(np.uint8(255 * stylized_content[i])).save(args.log_dir + '/stylized_content_%03d.png' % i) + Image.fromarray(np.uint8(255 * coor_dist_msk[i])).save(args.log_dir + '/coor_dist_msk_%03d.png' % i) + Image.fromarray(np.uint8(255 * warped_msks[i])).save(args.log_dir + '/warped_mask_%03d.png' % i) + Image.fromarray(np.uint8(255*torch.movedim(style_images[0], 0, -1).detach().cpu().numpy())).save(args.log_dir + '/style_image.png') + + writer.close() + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser() + parser.add_argument('--task', type=str, default='vae', + help='vae or finetune_decoder') + # Basic options + parser.add_argument('--content_dir', type=str, default='./all_contents/', + help='Directory path to a batch of content images') + parser.add_argument('--nerf_content_dir', type=str, default='./nerf_gen_data2/', + help='Directory path to a batch of content images') + parser.add_argument('--style_dir', type=str, default='./all_styles/', + help='Directory path to a batch of style images') + parser.add_argument('--vgg', type=str, default='./pretrained/vgg_normalised.pth') + + parser.add_argument('--no_ndc', action='store_true') + parser.add_argument('--no_reload', action='store_true') + + # training options + parser.add_argument('--save_dir', default='./pretrained/', + help='Directory to save the model') + parser.add_argument('--ckp_num', type=int, default=3) + parser.add_argument('--log_dir', default='./logs/stylenet/', + help='Directory to save the log') + parser.add_argument('--lr', type=float, default=1e-4) + parser.add_argument('--lr_decay', type=float, default=5e-5) + parser.add_argument('--max_iter', type=int, default=160000) + parser.add_argument('--batch_size', type=int, default=8) + parser.add_argument('--style_weight', type=float, default=2.) + parser.add_argument('--content_weight', type=float, default=1.0) + parser.add_argument('--temporal_weight', type=float, default=50.) + parser.add_argument('--n_threads', type=int, default=16) + parser.add_argument('--save_model_interval', type=int, default=200) + parser.add_argument('--print_interval', type=int, default=20) + + # train vae options + parser.add_argument('--vae_d', type=int, default=4) + parser.add_argument('--vae_w', type=int, default=512) + parser.add_argument('--vae_latent', type=int, default=32) + parser.add_argument('--vae_kl_lambda', type=float, default=0.1) + args = parser.parse_args() + + if args.task == 'finetune_decoder': + finetune_decoder(args) + elif args.task == 'vae': + train_vae(args) + else: + train_temporal_decoder() diff --git a/contrib/StylizedNeRF/utils.py b/contrib/StylizedNeRF/utils.py new file mode 100644 index 00000000..00d95653 --- /dev/null +++ b/contrib/StylizedNeRF/utils.py @@ -0,0 +1,620 @@ +import os +import cv2 +import glob +import json +import torch +import pickle +import imageio +import plyfile +import pyrender +import numpy as np +from tqdm import tqdm +from PIL import Image +import matplotlib as mpl +# mpl.use('Agg') +from skimage import feature +from natsort import natsorted +import matplotlib.pyplot as plt +from plyfile import PlyElement, PlyData +from torch.utils.data import Dataset, DataLoader + +import jittor as jt +from jittor import nn, Module + + +def json_read_rgbd(DepthImg_path, RgbImg_path, factor=1.): + with open(DepthImg_path, 'r') as file: + depth = np.array(json.load(file)) + rgb = Image.open(RgbImg_path).convert('RGB') + w, h = rgb.size + rgb = rgb.resize((int(w / factor), int(h / factor))) + depth = cv2.resize(depth, (rgb.size[0], rgb.size[1])) + rgb, depth = np.array(rgb, np.float32), np.array(depth, np.float32) + return depth, rgb + + +def read_rgbd(DepthImg_path, RgbImg_path): + depth_img = np.array(Image.open(DepthImg_path), np.float32) + rgb_image = Image.open(RgbImg_path).convert('RGB') + rgb_image = rgb_image.resize((depth_img.shape[1], depth_img.shape[0])) + rgb_image = np.array(rgb_image, np.float32) + return depth_img, rgb_image + + +def json_save_depth(path, depth): + h, w = depth.shape[0], depth.shape[1] + depth_list = [] + for i in range(h): + depth_list.append(depth[i].reshape([-1]).tolist()) + with open(path, 'w') as file: + json.dump(depth_list, file) + + +def write_obj(v, path, f=None): + v = np.array(v) + if v.shape[-1] == 3: + str_v = [f"v {vv[0]} {vv[1]} {vv[2]}\n" for vv in v] + else: + str_v = [f"v {vv[0]} {vv[1]} {vv[2]} {vv[3]} {vv[4]} {vv[5]}\n" for vv in v] + if f is not None: + str_f = [f"f {ff[0]} {ff[1]} {ff[2]}\n" for ff in f] + else: + str_f = [] + + with open(path, 'w') as meshfile: + meshfile.write(f'{"".join(str_v)}{"".join(str_f)}') + + +def write_ply_rgb(points, RGB, filename): + """ Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as PLY file """ + N = points.shape[0] + vertex = [] + for i in range(N): + vertex.append((points[i, 0], points[i, 1], points[i, 2], RGB[i][0], RGB[i][1], RGB[i][2])) + vertex = np.array(vertex, + dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]) + + el = PlyElement.describe(vertex, 'vertex', comments=['vertices']) + PlyData([el], text=True).write(filename) + + +def read_ply(path): + data = PlyData.read(path) + coor = np.stack([data['vertex']['x'], data['vertex']['y'], data['vertex']['z']], axis=-1) + return coor + + +def read_frame_pose(path): + """Read frame information from json file""" + """ + Input: + path: json path of frame. i.e. 'frame_00000.json' + Output: + projectionMatrix: (4*4 ndarray) matrix of projection matrix for clipping + intrinsic: (3*3 ndarray) intrinsic matrix of camera + cameraPose: (4*4 ndarray) matrix of camera pose + time: (float) time of frame + index: (int) index of frame + """ + with open(path, 'r') as file: + data = json.load(file) + projectionMatrix = np.reshape(data['projectionMatrix'], [4, 4]) + intrinsic = np.reshape(data['intrinsics'], [3, 3]) + cameraPose = np.reshape(data['cameraPoseARFrame'], [4, 4]) + time = float(data['time']) + index = int(data['frame_index']) + return projectionMatrix, intrinsic, cameraPose, time, index + + +def json_read_camera_parameters2(path, printout=False): + with open(path, 'r') as file: + data = json.load(file) + timeStamp = data['timeStamp'] + cameraEulerAngle = data['cameraEulerAngle'] + imageResolution = data['imageResolution'] + cameraTransform = np.reshape(data['cameraTransform'], [4, 4]) + cameraPos = data['cameraPos'] + cameraIntrinsics = np.reshape(data['cameraIntrinsics'], [3, 3]) + cameraView = np.reshape(data['cameraView'], [4, 4]) + cameraProjection = np.reshape(data['cameraProjection'], [4, 4]) + + if printout: + parameters = [timeStamp, cameraEulerAngle, imageResolution, cameraTransform, cameraPos, cameraIntrinsics, cameraView, cameraProjection] + names = ['timeStamp', 'cameraEulerAngle', 'imageResolution', 'cameraTransform', 'cameraPos', 'cameraIntrinsics', 'cameraView', 'cameraProjection'] + for i in range(len(parameters)): + print('******************************************************************************************') + print(names[i]) + print(parameters[i]) + print('******************************************************************************************') + + return timeStamp, cameraEulerAngle, imageResolution, cameraTransform, cameraPos, cameraIntrinsics, cameraView, cameraProjection + + +def json_read_camera_parameters(path, printout=False): + with open(path, 'r') as file: + data = json.load(file) + timeStamp = [] + cameraEulerAngle = [] + imageResolution = [] + cameraTransform = np.reshape(data['cameraTransform'], [4, 4]) + cameraPos = [] + cameraIntrinsics = np.reshape(data['cameraIntrinsics'], [3, 3]) + cameraView = [] + cameraProjection = [] + + if printout: + parameters = [timeStamp, cameraEulerAngle, imageResolution, cameraTransform, cameraPos, cameraIntrinsics, cameraView, cameraProjection] + names = ['timeStamp', 'cameraEulerAngle', 'imageResolution', 'cameraTransform', 'cameraPos', 'cameraIntrinsics', 'cameraView', 'cameraProjection'] + for i in range(len(parameters)): + print('******************************************************************************************') + print(names[i]) + print(parameters[i]) + print('******************************************************************************************') + + return timeStamp, cameraEulerAngle, imageResolution, cameraTransform, cameraPos, cameraIntrinsics, cameraView, cameraProjection + + +def json_save_camera_parameters(path, cp, intr): + timeStamp = [] + cameraEulerAngle = [] + imageResolution = [] + cameraTransform = np.reshape(cp, [-1]).tolist() + cameraPos = [] + cameraIntrinsics = np.reshape(intr, [-1]).tolist() + cameraView = [] + cameraProjection = [] + + parameters = [timeStamp, cameraEulerAngle, imageResolution, cameraTransform, cameraPos, cameraIntrinsics, cameraView, cameraProjection] + names = ['timeStamp', 'cameraEulerAngle', 'imageResolution', 'cameraTransform', 'cameraPos', 'cameraIntrinsics', 'cameraView', 'cameraProjection'] + save_dict = {} + for i in range(len(parameters)): + save_dict[names[i]] = parameters[i] + with open(path, 'w') as file: + json.dump(save_dict, file) + + +def write_ply(v, path): + header = f"ply\nformat ascii 1.0\nelement vertex {len(v)}\nproperty double x\nproperty double y\nproperty double z\nend_header\n" + str_v = [f"{vv[0]} {vv[1]} {vv[2]}\n" for vv in v] + + with open(path, 'w') as meshfile: + meshfile.write(f'{header}{"".join(str_v)}') + + +def load_ply(path): + data = plyfile.PlyData.read(path) + pcls = np.array([data['vertex']['x'], data['vertex']['y'], data['vertex']['z']], np.float32).T + rgbs = np.array([data['vertex']['red'], data['vertex']['green'], data['vertex']['blue']], np.float32).T + return pcls, rgbs + + +def save_makedir(path): + if not os.path.exists(path): + os.makedirs(path) + + +def view_synthesis(cps, factor=10): + frame_num = cps.shape[0] + cps = np.array(cps) + from scipy.spatial.transform import Slerp + from scipy.spatial.transform import Rotation as R + from scipy import interpolate as intp + rots = R.from_matrix(cps[:, :3, :3]) + slerp = Slerp(np.arange(frame_num), rots) + tran = cps[:, :3, -1] + f_tran = intp.interp1d(np.arange(frame_num), tran.T) + + new_num = int(frame_num * factor) + + new_rots = slerp(np.linspace(0, frame_num - 1, new_num)).as_matrix() + new_trans = f_tran(np.linspace(0, frame_num - 1, new_num)).T + + new_cps = np.zeros([new_num, 4, 4], np.float) + new_cps[:, :3, :3] = new_rots + new_cps[:, :3, -1] = new_trans + new_cps[:, 3, 3] = 1 + return new_cps + + +def normalize_cps(cps): + cps = np.array(cps, dtype=np.float32) + avg_center = min_line_dist_center(cps[:, :3, 3], cps[:, :3, 2]) + cps[:, :3, 3] -= avg_center + dists = np.linalg.norm(cps[:, :3, 3], axis=-1) + radius = 1.1 * np.max(dists) + 1e-5 + # Corresponding parameters change + cps[:, :3, 3] /= radius + return cps, radius + + +def min_line_dist_center(rays_o, rays_d): + if len(np.shape(rays_d)) == 2: + rays_o = rays_o[..., np.newaxis] + rays_d = rays_d[..., np.newaxis] + A_i = np.eye(3) - rays_d * np.transpose(rays_d, [0, 2, 1]) + b_i = -A_i @ rays_o + pt_mindist = np.squeeze(-np.linalg.inv((A_i @ A_i).mean(0)) @ (b_i).mean(0)) + return pt_mindist + + +def save_obj(path, obj): + file = open(path, 'wb') + obj_str = pickle.dumps(obj) + file.write(obj_str) + file.close() + + +def load_obj(path): + file = open(path, 'rb') + obj = pickle.loads(file.read()) + file.close() + return obj + + +class plot_chart: + def __init__(self, name='image', path='./plotting/', x_label='iter', y_label='Loss', max_len=100000): + self.name = name + self.path = path + self.x_label = x_label + self.y_label = y_label + self.max_len = max_len + self.ys, self.xs = None, None + self.path = './chart' + + def draw(self, y, x): + self.ys = np.array([y]) if self.ys is None else np.concatenate([self.ys, [y]]) + self.xs = np.array([x]) if self.xs is None else np.concatenate([self.xs, [x]]) + + self.check_len() + + plt.close('all') + plt.plot(self.xs, self.ys, "b.-") + plt.xlabel(self.x_label) + plt.ylabel(self.y_label) + + if not os.path.exists(self.path): + os.makedirs(self.path) + plt.savefig(self.path + "/" + self.name + ".png") + + self.save() + + def check_len(self): + if self.ys.shape[0] > self.max_len: + half_ids = np.arange(self.ys.shape[0]//2, self.ys.shape[0]) + self.ys = self.ys[half_ids] + self.xs = self.xs[half_ids] + + def save(self): + save_obj(self.path + '/chart_obj', self) + + +def get_rays_ios_np(H, W, focal, c2w, cx=None, cy=None): + if cx is None or cy is None: + cx, cy = W * .5, H * .5 + # else: + # print("Cx from %.03f to %.03f, Cy from %.03f to %.03f" % (H/2, cx, W/2, cy)) + i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing='xy') + dirs = np.stack([(i-cx)/focal, -(j-cy)/focal, -np.ones_like(i)], -1) + # Rotate ray directions from camera frame to the world frame + rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3, :3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs] + rays_d = rays_d / np.linalg.norm(rays_d, axis=-1, keepdims=True) + # Translate camera frame's origin to the world frame. It is the origin of all rays. + rays_o = np.broadcast_to(c2w[:3, -1], np.shape(rays_d)) + return rays_o, rays_d + + +def get_rays_from_id(hid, wid, focal, c2w, cx, cy): + dir = np.stack([(wid - cx) / focal, - (hid - cy) / focal, -np.ones_like(wid)], axis=-1) + ray_d = np.einsum('wc,c->w', c2w[:3, :3], dir) + ray_d = ray_d / np.linalg.norm(ray_d) + ray_o = c2w[:3, -1] + ray_o, ray_d = np.array(ray_o, dtype=np.float32), np.array(ray_d, dtype=np.float32) + return ray_o, ray_d + + +def dep2pcl(depth, intr, cp, pixel_alignment=True): + intr = intr.copy() + h, w = np.shape(depth)[:2] + if pixel_alignment: + u, v = np.meshgrid(np.arange(w, dtype=np.float32) - 0.5, np.arange(h, dtype=np.float32) - 0.5, indexing='xy') + else: + u, v = np.meshgrid(np.arange(w, dtype=np.float32), np.arange(h, dtype=np.float32), indexing='xy') + z = - depth + uvz = np.stack([u*z, v*z, z], axis=-1).reshape([-1, 3]) + # The z axis is toward the camera and y axis should be conversed + intr[0, 0] = - intr[0, 0] + intr_inverse = np.linalg.inv(intr) + xyz_camera = np.einsum('bu,cu->bc', uvz, intr_inverse) + xyz_camera = np.concatenate([xyz_camera, np.ones([xyz_camera.shape[0], 1])], axis=-1) + xyz_world = np.einsum('bc,wc->bw', xyz_camera, cp) + return xyz_world + + +def get_cos_map(h, w, cx, cy, f): + i, j = np.meshgrid(np.arange(w, dtype=np.float32), np.arange(h, dtype=np.float32), indexing='xy') + dirs = np.stack([(i-cx)/f, -(j-cy)/f, -np.ones_like(i)], -1) + cos = 1 / np.linalg.norm(dirs, axis=-1) + cos = np.array(cos, dtype=np.float32) + return cos + + +def pts2imgcoor(pts, intr): + intr = intr.copy() + intr[0, 0] *= -1 + imgcoor = np.einsum('bc,ic->bi', pts, intr) + imgcoor /= imgcoor[..., -1][..., np.newaxis] + return imgcoor + + +def alpha_composition(pts_rgb, pts_sigma, t_values, sigma_noise_std=0., white_bkgd=False): + """Transforms model's predictions to semantically meaningful values. + Args: + pts_rgb: [num_rays, num_samples along ray, 3]. Prediction from model. + pts_sigma: [num_rays, num_samples along ray]. Prediction from model. + t_values: [num_rays, num_samples along ray]. Integration time. + Returns: + rgb_exp: [num_rays, 3]. Estimated RGB color of a ray. + weights: [num_rays, num_samples]. Weights assigned to each sampled color. + t_exp: [num_rays]. Estimated distance to object. + """ + # sigma2alpha = lambda sigma, dists: 1.-jt.exp(-sigma * dists) + sigma2alpha = lambda raw, dists, act_fn=jt.nn.relu: 1. - jt.exp(-act_fn(raw) * dists) + + delta = t_values[..., 1:] - t_values[..., :-1] + delta = jt.concat([delta, jt.array([1e10]).expand(delta[..., :1].shape)], -1) # [N_rays, N_samples] + + noise = 0. + if sigma_noise_std > 0: + # noise = jt.random(pts_sigma.shape) * sigma_noise_std + noise = jt.init.gauss(pts_sigma.shape, pts_sigma.dtype) * sigma_noise_std + + alpha = sigma2alpha(jt.nn.relu(pts_sigma + noise), delta) # [N_rays, N_samples] + weights = alpha * jt.cumprod(jt.concat([jt.ones((alpha.shape[0], 1)), 1.-alpha + 1e-10], -1), -1)[:, :-1] + rgb_exp = jt.sum(weights[..., None] * pts_rgb, -2) # [N_rays, 3] + + t_exp = jt.sum(weights * t_values, -1) + acc_map = jt.sum(weights, -1) + if white_bkgd: + rgb_exp = rgb_exp + (1. - acc_map[..., None]) + + return rgb_exp, t_exp, weights + + +def alpha_composition_wild(pts_rgb, pts_sigma, t_values, pts_transient_rgb, pts_transient_sigma, pts_transient_beta, beta_min=0.03, sigma_noise_std=0., white_bkgd=False): + """Transforms model's predictions to semantically meaningful values. + Args: + pts_rgb: [num_rays, num_samples along ray, 3]. Prediction from model. + pts_sigma: [num_rays, num_samples along ray]. Prediction from model. + t_values: [num_rays, num_samples along ray]. Integration time. + Returns: + rgb_exp: [num_rays, 3]. Estimated RGB color of a ray. + weights: [num_rays, num_samples]. Weights assigned to each sampled color. + t_exp: [num_rays]. Estimated distance to object. + """ + sigma2alpha = lambda sigma, dists: 1.-torch.exp(-sigma * dists) + + delta = t_values[..., 1:] - t_values[..., :-1] + delta = torch.cat([delta, torch.Tensor([1e10]).expand(delta[..., :1].shape).to(pts_rgb.device)], -1) # [N_rays, N_samples] + + noise = 0. + if sigma_noise_std > 0: + noise = torch.randn(pts_sigma.shape, device=pts_sigma.device) * sigma_noise_std + + sigma_static = torch.relu(pts_sigma + noise) + alpha_static = sigma2alpha(sigma_static, delta) + + sigma_transient = torch.relu(pts_transient_sigma) + alpha_transient = sigma2alpha(sigma_transient, delta) + T_transient = torch.cumprod(torch.cat([torch.ones((alpha_transient.shape[0], 1), device=alpha_transient.device), 1. - alpha_transient + 1e-10], -1), -1)[:, :-1] + beta_exp = torch.sum(T_transient[..., None] * alpha_transient[..., None] * torch.relu(pts_transient_beta), -2) + beta_min + + sigma_both = sigma_static + sigma_transient + alpha_both = sigma2alpha(sigma_both, delta) # [N_rays, N_samples] + T_both = torch.cumprod(torch.cat([torch.ones((alpha_both.shape[0], 1), device=alpha_both.device), 1.-alpha_both + 1e-10], -1), -1)[:, :-1] + + rgb_exp = torch.sum(T_both[..., None] * alpha_static[..., None] * pts_rgb + T_both[..., None] * alpha_transient[..., None] * pts_transient_rgb, -2) # [N_rays, 3] + + weights = alpha_both * T_both + t_exp = torch.sum(weights * t_values, -1) + acc_map = torch.sum(weights, -1) + if white_bkgd: + rgb_exp = rgb_exp + (1.-acc_map[..., None]) + + return rgb_exp, t_exp, weights, beta_exp + + +def batchify(fn, chunk=1024*32): + """Render rays in smaller minibatches to avoid OOM. + """ + if chunk is None: + return fn + + def ret_func(**kwargs): + x = kwargs[list(kwargs.keys())[0]] + all_ret = {} + for i in range(0, x.shape[0], chunk): + end = min(i + chunk, x.shape[0]) + chunk_kwargs = dict([[key, kwargs[key][i: end]] for key in kwargs.keys()]) + ret = fn(**chunk_kwargs) + for k in ret: + if k not in all_ret: + all_ret[k] = [] + all_ret[k].append(ret[k]) + + all_ret = {k: jt.concat(all_ret[k], 0) for k in all_ret} + return all_ret + + return ret_func + + +img2mse = lambda x, y: jt.mean((x - y) ** 2) +img2l1 = lambda x, y: (x - y).abs().mean() +mse2psnr = lambda x: -10. * jt.log(x) / jt.log(jt.array([10.])) +to8b = lambda x: np.array(x, dtype=np.uint8) + + +def get_rays(H, W, focal, cps, cx=None, cy=None, chunk=256): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + H, W = int(H), int(W) + if cx is None or cy is None: + cx, cy = W * .5, H * .5 + j, i = torch.meshgrid(torch.arange(H, dtype=torch.float32), torch.arange(W, dtype=torch.float32)) + dirs = torch.stack([(i-cx)/focal, -(j-cy)/focal, -torch.ones_like(i)], -1).to(device) + cps_tensor = torch.from_numpy(cps).float().to(device) + start = 0 + rays_o_total, rays_d_total = np.zeros([cps.shape[0], H, W, 3], np.float32), np.zeros([cps.shape[0], H, W, 3], np.float32) + while start < cps.shape[0]: + print('\rProcess: %.3f%%' % (start / cps.shape[0] * 100), end='') + end = min(start + chunk, cps.shape[0]) + + rays_d = torch.einsum('hwc,nbc->nhwb', dirs, cps_tensor[start: end, :3, :3]) + rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True) + rays_d = rays_d.cpu().numpy() + rays_o = np.broadcast_to(cps[start: end, :3, -1][:, np.newaxis, np.newaxis], np.shape(rays_d)) + + rays_o_total[start: end] = rays_o + rays_d_total[start: end] = rays_d + start = end + print('\rProcess: 100.000%%') + + return rays_o_total, rays_d_total + + +def empty_loss(ts, sigma, t_gt): + """Empty Loss""" + """ + ts: [ray, N] + sigma: [ray, N] + t_gt: [ray] + """ + delta_ts = ts[:, 1:] - ts[:, :-1] # [ray, N-1] + sigma = torch.relu(sigma[:, :-1]) # [ray, N-1] + boarder_rate = 0.9 + sigma_sum = torch.sum(sigma * delta_ts * (ts[:, :-1] < (t_gt.unsqueeze(-1) * boarder_rate)).float(), dim=-1) + loss_empty = torch.mean(sigma_sum) + return loss_empty + + +def sampling_pts_uniform(rays_o, rays_d, N_samples=64, near=0., far=1.05, harmony=False, perturb=False): + # Intersect, ts_nf of shape [ray, box] and [ray, box, 2] + ray_num = rays_o.shape[0] + + # Uniform sampling ts of shape [ray, N_samples] + ts = jt.linspace(0, 1, N_samples).unsqueeze(0).expand(ray_num, N_samples) + if not harmony: + ts = ts * (far - near) + near + else: + ts = 1. / (1./near * (1 - ts) + 1./far * ts) + + if perturb: + # Add perturb + rand = jt.zeros([ray_num, N_samples]) + jt.init.uniform_(rand, 0, 1) + mid = (ts[..., 1:] + ts[..., :-1]) / 2 + upper = jt.concat([mid, ts[..., -1:]], -1) + lower = jt.concat([ts[..., :1], mid], -1) + ts = lower + (upper - lower) * rand + + # From ts to pts. [ray, N_samples, 3] + rays_o, rays_d = rays_o.unsqueeze(1).expand([ray_num, N_samples, 3]), rays_d.unsqueeze(1).expand([ray_num, N_samples, 3]) + ts_expand = ts.unsqueeze(-1).expand([ray_num, N_samples, 3]) + pts = rays_o + ts_expand * rays_d + + return pts, ts + + +def sampling_pts_fine(rays_o, rays_d, ts, weights, N_samples_fine=64): + + ray_num, N_samples = ts.shape + # ts of shape [ray, N_samples], ts_mid of shape [ray, N_samples - 1] + ts_mid = 0.5 * (ts[..., 1:] + ts[..., :-1]) + # pdf of shape [ray, N_samples - 2] + weights = weights[..., 1:-1] + 1e-3 + pdf = weights / jt.sum(weights, -1, keepdims=True) + # cdf of shape [ray, N_samples - 1] + cdf = jt.cumsum(pdf, -1) + cdf = jt.concat([jt.zeros_like(cdf[..., :1]), cdf], -1) + # random sampling of shape [ray, N_samples_fine] + u = jt.random(list(cdf.shape[:-1]) + [N_samples_fine]) * (1-1e-3) # Avoid sample 1 + # inds below of shape [ray, N_samples_fine] in range [0, N_samples - 3] + below = jt.searchsorted(cdf, u, right=True) + below = jt.maximum(jt.zeros_like(below - 1), below - 1) + below = jt.minimum((N_samples - 3) * jt.ones_like(below), below) + # Use below to gather cdf. [ray, N_samples_fine] + ray_Nfine_N_1 = [ray_num, N_samples_fine, N_samples - 1] + cdf_g = jt.gather(cdf.unsqueeze(1).expand(ray_Nfine_N_1), -1, below.unsqueeze(-1)).squeeze(-1) + # Interval t values of cdf (pdf). [ray, N_samples_fine] + ray_Nfine_N_2 = [ray_num, N_samples_fine, N_samples - 2] + pdf_g = jt.gather(pdf.unsqueeze(1).expand(ray_Nfine_N_2), -1, below.unsqueeze(-1)).squeeze(-1) + pdf_g = jt.ternary(pdf_g == 0, jt.ones_like(pdf_g), pdf_g) + # ts in each interval. [ray, N_samples_fine] + ts_interval = (u - cdf_g) / pdf_g + # Above index of shape [ray, N_samples_fine] in range(1, N_samples - 2) + above = jt.minimum((cdf.shape[-1] - 1) * jt.ones_like(below), below + 1) + # ts boarder of each interval. [ray, N_samples_fine] + ts_near = jt.gather(ts_mid.unsqueeze(1).expand([ray_num, N_samples_fine, N_samples-1]), -1, below.unsqueeze(-1)).squeeze(-1) + ts_far = jt.gather(ts_mid.unsqueeze(1).expand([ray_num, N_samples_fine, N_samples-1]), -1, above.unsqueeze(-1)).squeeze(-1) + # ts_fine of shape [ray, N_samples_fine] + ts_fine = ts_near + ts_interval * (ts_far - ts_near) + # Sort from near to far [ray, N_samples + N_samples_fine] + ts = jt.concat([ts, ts_fine], dim=-1) + _, ts = jt.argsort(ts, dim=-1) + # Avoid BP + ts = ts.detach() + + # From ts to pts. [ray, N_samples + N_samples_fine, 3] + rays_o, rays_d = rays_o.unsqueeze(1).expand([ray_num, N_samples + N_samples_fine, 3]), rays_d.unsqueeze(1).expand([ray_num, N_samples + N_samples_fine, 3]) + ts_expand = ts.unsqueeze(-1).expand([ray_num, N_samples + N_samples_fine, 3]) + pts = rays_o + ts_expand * rays_d + pts = pts.detach() + + return pts, ts + + +def sampling_pts_fine_jt(rays_o, rays_d, ts, weights, N_samples_fine=64): + + # ts of shape [ray, N_samples], ts_mid of shape [ray, N_samples - 1] + ts_mid = 0.5 * (ts[..., 1:] + ts[..., :-1]) + t_samples = sample_pdf(ts_mid, weights[..., 1:-1], N_samples_fine, det=True) + t_samples = t_samples.detach() + _, t_vals = jt.argsort(jt.concat([ts, t_samples], -1), -1) + pts = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * t_vals.unsqueeze(-1) # [N_rays, N_samples + N_importance, 3] + + # Avoid BP + t_vals = t_vals.detach() + + return pts, t_vals + + +def sample_pdf(bins, weights, N_samples, det=False): + # Get pdf + weights = weights + 1e-5 # prevent nans + pdf = weights / jt.sum(weights, -1, keepdims=True) + cdf = jt.cumsum(pdf, -1) + cdf = jt.concat([jt.zeros_like(cdf[...,:1]), cdf], -1) # (batch, len(bins)) + + # Take uniform samples + if det: + u = jt.linspace(0., 1., steps=N_samples) + u = u.expand(list(cdf.shape[:-1]) + [N_samples]) + else: + u = jt.random(list(cdf.shape[:-1]) + [N_samples]) + + # Invert CDF + inds = jt.searchsorted(cdf, u, right=True) + below = jt.maximum(jt.zeros_like(inds-1), inds-1) + above = jt.minimum((cdf.shape[-1]-1) * jt.ones_like(inds), inds) + inds_g = jt.stack([below, above], -1) # (batch, N_samples, 2) + + matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]] + cdf_g = jt.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g) + bins_g = jt.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g) + + denom = (cdf_g[..., 1]-cdf_g[..., 0]) + cond = jt.where(denom < 1e-5) + denom[cond[0], cond[1]] = 1. + t = (u-cdf_g[..., 0]) / denom + samples = bins_g[..., 0] + t * (bins_g[..., 1]-bins_g[..., 0]) + + return samples +