My code takes 3 images taken with a polarising filter rotated 45° between them and transform them into a single image encoding the polarization parameters as HSV. what can be improved?
import cv2
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from IPython.display import Image
%matplotlib inline
import glob
import math
imagefiles=glob.glob(r"C:\Users\HP\My Python stuff\openCV\Polarization\IMG*")
imagefiles.sort()
images=[]
for filename in imagefiles:
img=cv2.imread(filename)
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
images.append(img)
num_images=len(images)
pixel0x,pixel0y,pixel0z=cv2.split(images[0])
pixel45x,pixel45y,pixel45z=cv2.split(images[1])
pixel90x,pixel90y,pixel90z=cv2.split(images[2])
i0 = pixel0x * 0.3 + pixel0y * 0.59 + pixel0z * 0.11
i45 = pixel45x * 0.3 + pixel45y * 0.59 + pixel45z * 0.11
i90 = pixel90x * 0.3 + pixel90y * 0.59 + pixel90z * 0.11
stokesI = i0 + i90
stokesQ = i0 - i90
stokesU = (np.ones(stokesI.shape)*(2.0 * i45))- stokesI
polint = np.sqrt(stokesQ*stokesQ+stokesU*stokesU)
poldolp = polint/(stokesI+((np.ones(stokesI.shape)*0.001)))
polaop = 0.5 * np.arctan(stokesU, stokesQ)
h=(polaop+(np.ones(polaop.shape)*(3.1416/2.0)))/3.1416
s=poldolp
v=polint
hsvpolar=cv2.merge((h,s,v))
hsvpolar=np.multiply(hsvpolar, 255)
rgbimg = cv2.cvtColor(np.clip(hsvpolar, 0,255).astype('uint8'),cv2.COLOR_HSV2RGB)
plt.imshow(rgbimg)
cv2.imwrite("hsvpolarized.jpg",rgbimg)
these are the input images: input images
-
1\$\begingroup\$ Please embed sample input images, and show all of your imports \$\endgroup\$Reinderien– Reinderien2022年10月07日 12:38:39 +00:00Commented Oct 7, 2022 at 12:38
-
1\$\begingroup\$ That's... Not really what I meant. The images need to be separate and not in a combined screenshot, so that we can feed them to your code. \$\endgroup\$Reinderien– Reinderien2022年10月08日 03:34:46 +00:00Commented Oct 8, 2022 at 3:34
1 Answer 1
Not great use of Numpy - for instance, rather than writing your coefficients out in triplicate, this needs to be a single tensor dot product. ones()
is never called for in this program if you correctly use broadcasting.
Put your code in functions.
Don't hard-code system-specific file paths.
Prefer pathlib
over os
, for glob
etc.
Don't write a (truncated) literal for pi; instead write np.pi
.
Most importantly, your results are basically nonsense because the images don't only change polarisation - they change alignment. You need to apply a matching, homography and perspective de-warp to your images for this to make any sense, and OpenCV offers these to you out of the box. The demonstration code below follows https://docs.opencv.org/4.6.0/d1/de0/tutorial_py_feature_homography.html .
Suggested
from pathlib import Path
from typing import Iterable
import cv2
import matplotlib.pyplot as plt
import numpy as np
def warp_align(images: list[np.ndarray]) -> None:
sift = cv2.SIFT_create()
keys: list[tuple] = []
descriptors: list[np.ndarray] = []
for image in images:
key, descriptor = sift.detectAndCompute(image, mask=None)
keys.append(key)
descriptors.append(descriptor)
FLANN_INDEX_KDTREE = 1
flann = cv2.FlannBasedMatcher(
indexParams={'algorithm': FLANN_INDEX_KDTREE, 'trees': 5},
searchParams={'checks': 50},
)
LOWES_RATIO = 0.7
train_desc, *query_descs = descriptors
matches = [
[
m
for m, n in flann.knnMatch(query_desc, train_desc, k=2)
if m.distance < LOWES_RATIO*n.distance
]
for query_desc in query_descs
]
def keys_to_points(matched_keys: Iterable[tuple[float, float]]) -> np.ndarray:
return np.array(tuple(matched_keys), dtype=np.float32)
train_key, *query_keys = keys
for query_key, target_matches, image in zip(query_keys, matches, images[1:]):
query_points = keys_to_points(query_key[m.queryIdx].pt for m in target_matches)
train_points = keys_to_points(train_key[m.trainIdx].pt for m in target_matches)
M, mask = cv2.findHomography(query_points, train_points, method=cv2.RANSAC, ransacReprojThreshold=5)
print(M, '\n')
cv2.warpPerspective(src=image, dst=image, M=M, dsize=image.shape[1::-1])
def calculate_polarimetry(images: list[np.ndarray]) -> np.ndarray:
all_images = np.stack(images)
coefficients = np.array((0.30, 0.59, 0.11))
i00, i45, i90 = np.tensordot(all_images, coefficients, axes=[3, 0])
stokes_i = i00 + i90
stokes_q = i00 - i90
stokes_u = 2*i45 - stokes_i
polint = np.sqrt(stokes_q*stokes_q + stokes_u*stokes_u)
poldolp = polint/(stokes_i + 0.001)
polaop = 0.5 * np.arctan(stokes_u, stokes_q)
h = polaop/np.pi + 0.5
s = poldolp
v = polint
hsv_polar = 255*np.clip(cv2.merge((h, s, v)), 0, 1)
return cv2.cvtColor(hsv_polar.astype('uint8'), cv2.COLOR_HSV2RGB)
def main() -> None:
paths = sorted(Path.cwd().glob("pol*.png"))
images = [
cv2.cvtColor(cv2.imread(str(path)), cv2.COLOR_BGR2RGB)
for path in paths
]
warp_align(images)
rgb_img = calculate_polarimetry(images)
fig, ((tl, tr), (bl, br)) = plt.subplots(nrows=2, ncols=2)
tl.imshow(images[0])
tr.imshow(images[1])
bl.imshow(images[2])
br.imshow(rgb_img)
plt.show()
# cv2.imwrite("hsvpolarized.jpg", rgbimg)
if __name__ == '__main__':
main()
Prior to homographic correction
After homographic correction
-
\$\begingroup\$ The result is not nonsense. They do change polarization. The images are taken strapping a polarising filter to the camera, aligning it to horizontal polarization, taking a picture. Then rotating it 45°, taking another picture and then rotating again and taking the third picture, as described in hackaday.io/project/6958-dolpi-raspi-polarization-camera \$\endgroup\$Omar Morales Rivera– Omar Morales Rivera2022年10月08日 20:00:13 +00:00Commented Oct 8, 2022 at 20:00
-
1\$\begingroup\$ I'm not sure that you understood what I mean. The polarisation is important, and spatial displacement is not - so any serious analysis of polarisation requires that the spatial displacement be removed; otherwise the results will not make sense. \$\endgroup\$Reinderien– Reinderien2022年10月08日 20:06:13 +00:00Commented Oct 8, 2022 at 20:06
-
1\$\begingroup\$ Ohhh. Yes! You're absolutely right. I thought you meant that there was no polarization information in the images. That's why I explained how they were acquired. Sorry \$\endgroup\$Omar Morales Rivera– Omar Morales Rivera2022年10月08日 20:33:11 +00:00Commented Oct 8, 2022 at 20:33