-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathModel_loader_tester.py
117 lines (93 loc) · 4.97 KB
/
Model_loader_tester.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
# -*- coding: utf-8 -*-
"""notebook61ca3ea3e9.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1eX-FZfhpUbGbv7aUHVqeALeJi-JRRqO-
"""
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import load_img, img_to_array
import matplotlib.pyplot as plt
from keras.losses import MeanSquaredError
# Function to load images from a directory
def load_images_from_directory(directory, image_size):
image_list = []
for filename in os.listdir(directory):
if filename.endswith(".jpg") or filename.endswith(".png"): # Modify as per your dataset's image format
img_path = os.path.join(directory, filename)
img = load_img(img_path, target_size=image_size) # Resize to the desired size (256x256 here)
img_array = img_to_array(img) / 255.0 # Normalize the image to [0, 1]
image_list.append(img_array)
return np.array(image_list)
# Load Low-Resolution (LR) and High-Resolution (HR) images
lr_directory = "/kaggle/input/image-super-resolution/dataset/Raw Data/low_res" # Replace with the path to your LR images directory
hr_directory = "/kaggle/input/image-super-resolution/dataset/Raw Data/high_res" # Replace with the path to your HR images directory
# Load LR and HR images (assuming 256x256 resolution for both)
lr_images = load_images_from_directory(lr_directory, (256, 256))
hr_images = load_images_from_directory(hr_directory, (256, 256))
# Function to divide an image into patches
def extract_patches(image, patch_size, stride):
patches = []
h, w, c = image.shape
for i in range(0, h - patch_size + 1, stride):
for j in range(0, w - patch_size + 1, stride):
patch = image[i:i + patch_size, j:j + patch_size, :]
patches.append(patch)
return np.array(patches)
# Function to reassemble patches back into the full image
def reconstruct_image(patches, image_shape, patch_size, stride):
h, w, c = image_shape
reconstructed_image = np.zeros((h, w, c))
patch_count = np.zeros((h, w, c))
patch_idx = 0
for i in range(0, h - patch_size + 1, stride):
for j in range(0, w - patch_size + 1, stride):
reconstructed_image[i:i + patch_size, j:j + patch_size, :] += patches[patch_idx]
patch_count[i:i + patch_size, j:j + patch_size, :] += 1
patch_idx += 1
# Handle overlapping regions by averaging
reconstructed_image /= patch_count
return reconstructed_image
# Load your pre-trained Kaggle DRRN model trained on 31x31 patches
# Make sure you provide the correct path to your model
model = load_model('/kaggle/input/image_superresolution_with_drrn/tensorflow2/default/1/drrn_super_resolution (1).h5',
custom_objects={'mse': MeanSquaredError()})
# Set the patch size and stride
patch_size = 31
stride = patch_size # No overlap
# Process each image in the dataset
for img_idx, (lr_image, hr_image) in enumerate(zip(lr_images[:10], hr_images[:10])): # Process only first 10 images
print(f"Processing image {img_idx + 1}/10")
# Step 1: Divide the 256x256 LR image into 31x31 patches
patches = extract_patches(lr_image, patch_size, stride)
# Step 2: Predict high-resolution patches
predicted_patches = np.array([model.predict(patch[np.newaxis, ...])[0] for patch in patches])
# Step 3: Reconstruct the high-resolution image from predicted patches
sr_image = reconstruct_image(predicted_patches, lr_image.shape, patch_size, stride)
# Step 4: Plot the LR, SR, and HR images side by side for comparison
plt.figure(figsize=(15, 5))
plt.subplot(1, 3, 1)
plt.title(f"Low Resolution Image {img_idx + 1}")
plt.imshow(np.clip(lr_image, 0, 1)) # Clipping for safety
plt.subplot(1, 3, 2)
plt.title(f"Super-Resolved Image {img_idx + 1}")
plt.imshow(np.clip(sr_image, 0, 1)) # Clipping for safety
plt.subplot(1, 3, 3)
plt.title(f"High Resolution Image {img_idx + 1}")
plt.imshow(np.clip(hr_image, 0, 1)) # Clipping for safety
plt.show()