aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--decoder.py144
-rw-r--r--encoder.py25
-rw-r--r--requirements.txt9
3 files changed, 156 insertions, 22 deletions
diff --git a/decoder.py b/decoder.py
index e58e246..6606b03 100644
--- a/decoder.py
+++ b/decoder.py
@@ -1,5 +1,7 @@
import argparse
+import collections
import cv2
+import matplotlib.pyplot as plt
import numpy as np
from creedsolo import RSCodec
from raptorq import Decoder
@@ -14,34 +16,148 @@ parser.add_argument("--level", help="error correction level", default=0.1, type=
parser.add_argument("--device", help="camera device index", default=1, type=int)
args = parser.parse_args()
-cheight = args.height // 10
-cwidth = args.width // 10
+cheight = cwidth = max(args.height // 10, args.width // 10)
frame_size = args.height * args.width - 4 * cheight * cwidth
frame_xor = np.arange(frame_size, dtype=np.uint8)
-rs_size = int(frame_size * (1 - args.level))
+rs_size = frame_size - int((frame_size + 254) / 255) * int(args.level * 255) - 4
-rsc = RSCodec(frame_size - rs_size)
-raptor_decoder = Decoder.with_defaults(args.len, rs_size)
+rsc = RSCodec(int(args.level * 255))
+decoder = Decoder.with_defaults(args.len, rs_size)
data = None
-cap = cv2.VideoCapture(args.device)
+# cap = cv2.VideoCapture(args.device)
while data is None:
- ret, raw_frame = cap.read()
- if not ret:
- continue
- color_frame = decode(raw_frame) # TODO
+ # ret, raw_frame = cap.read()
+ # if not ret:
+ # continue
+ raw_frame = cv2.cvtColor(
+ cv2.imread("/home/a/Pictures/Camera/IMG_20240422_000849_027.jpg"),
+ cv2.COLOR_BGR2RGB,
+ ).astype(np.float64)
+
+ X, Y = raw_frame.shape[:2]
+ scale = min(X // 20, Y // 20)
+ # Resize so smaller dim is 20
+ # Use fast default interpolation for factor of 4
+ # Then switch to good slow interpolation
+ dframe = cv2.resize(
+ cv2.resize(raw_frame, (Y // 4, X // 4)),
+ (Y // scale, X // scale), # OpenCV swaps them
+ interpolation=cv2.INTER_AREA,
+ )
+ plt.imshow(dframe.astype(np.uint8))
+ plt.show()
+
+ def max_in_orig(x):
+ return tuple(
+ np.array(np.unravel_index(np.argmax(x), x.shape)) * scale + scale // 2
+ )
+
+ sumframe = np.sum(dframe, axis=2)
+ # TODO: Only search in corner area
+ widx = max_in_orig((np.std(dframe, axis=2) < 35) * sumframe)
+ ridx = max_in_orig(2 * dframe[:, :, 0] - sumframe)
+ gidx = max_in_orig(2 * dframe[:, :, 1] - sumframe)
+ bidx = max_in_orig(2 * dframe[:, :, 2] - sumframe)
+
+ # Flood fill corners
+ def flood_fill(s):
+ vis = np.full((X, Y), False)
+ vis[s] = True
+ queue = collections.deque([s])
+ pos = np.array(s)
+ col = np.copy(raw_frame[s])
+ n = 1
+ while len(queue) > 0:
+ u = queue.popleft()
+ for d in [(5, 0), (0, 5), (-5, 0), (0, -5)]:
+ v = (u[0] + d[0], u[1] + d[1])
+ if (
+ 0 <= v[0] < X
+ and 0 <= v[1] < Y
+ and not vis[v]
+ and np.linalg.norm(raw_frame[v] - raw_frame[s]) < 100
+ ):
+ vis[v] = True
+ pos += np.array(v)
+ col += raw_frame[v]
+ n += 1
+ queue.append(v)
+ plt.imshow(raw_frame.astype(np.uint8))
+ plt.scatter(*reversed(np.where(vis)))
+ plt.scatter(pos[1] / n, pos[0] / n)
+ plt.show()
+ return pos / n, col / n
+
+ widx, wcol = flood_fill(widx)
+ ridx, rcol = flood_fill(ridx)
+ gidx, gcol = flood_fill(gidx)
+ bidx, bcol = flood_fill(bidx)
+
+ # Find basis of color space
+ origin = (rcol + gcol + bcol - wcol) / 2
+ rcol -= origin
+ gcol -= origin
+ bcol -= origin
+ print(origin, rcol, gcol, bcol)
+ F = 255 * np.linalg.inv(np.stack((rcol, gcol, bcol)).T)
+
+ # Dumb perspective transform
+ xv = np.linspace(
+ -(cheight / 2 - 1) / (args.height - cheight + 1),
+ 1 + (cheight / 2 - 1) / (args.height - cheight + 1),
+ args.height,
+ )
+ yv = np.linspace(
+ -(cwidth / 2 - 1) / (args.width - cwidth + 1),
+ 1 + (cwidth / 2 - 1) / (args.width - cwidth + 1),
+ args.width,
+ )
+ xp = (
+ np.outer(1 - xv, 1 - yv) * widx[0]
+ + np.outer(1 - xv, yv) * ridx[0]
+ + np.outer(xv, 1 - yv) * gidx[0]
+ + np.outer(xv, yv) * bidx[0]
+ )
+ yp = (
+ np.outer(1 - xv, 1 - yv) * widx[1]
+ + np.outer(1 - xv, yv) * ridx[1]
+ + np.outer(xv, 1 - yv) * gidx[1]
+ + np.outer(xv, yv) * bidx[1]
+ )
+
+ plt.scatter(widx[1], widx[0])
+ plt.scatter(ridx[1], ridx[0])
+ plt.scatter(gidx[1], gidx[0])
+ plt.scatter(bidx[1], bidx[0])
+ plt.scatter(yp, xp)
+ plt.imshow(raw_frame.astype(np.uint8))
+ plt.show()
+ print(111111111, xp)
+ print(111111111, yp)
+
+ raw_color_frame = raw_frame[xp.astype(np.int64), yp.astype(np.int64), :]
+ print(raw_color_frame)
+ color_frame = np.clip(
+ np.squeeze(F @ (raw_color_frame - origin)[..., np.newaxis]), 0, 255
+ ).astype(np.uint8)
+ print(color_frame)
frame = (
- (color_frame[:, :, 0] >> 5 & 0b00000111)
- + (color_frame[:, :, 1] >> 2 & 0b00111000)
+ (color_frame[:, :, 0] >> 5)
+ + (color_frame[:, :, 1] >> 3 & 0b00111000)
+ (color_frame[:, :, 2] & 0b11000000)
)
frame_data = np.concatenate(
(
frame[:cheight, cwidth : args.width - cwidth].flatten(),
frame[cheight : args.height - cheight].flatten(),
- frame[args.heigth - cheight, cwidth : args.width - cwidth].flatten(),
+ frame[args.height - cheight :, cwidth : args.width - cwidth].flatten(),
)
)
- data = raptor_decoder.decode(rsc.decode(frame_data ^ frame_xor))
+ print(list(frame_data))
+ tmp = rsc.decode(frame_data ^ frame_xor)
+ # print(tmp, list(tmp[2]), bytes(tmp[0]))
+ data = decoder.decode(bytes(tmp))
+ break
with open(args.file, "wb") as f:
f.write(data)
cap.release()
diff --git a/encoder.py b/encoder.py
index cc4d7cb..a4ef7c1 100644
--- a/encoder.py
+++ b/encoder.py
@@ -17,8 +17,12 @@ parser.add_argument("--level", help="error correction level", default=0.1, type=
args = parser.parse_args()
-cheight = args.height // 10
-cwidth = args.width // 10
+# Make corners
+cheight = cwidth = max(args.height // 10, args.width // 10)
+wcorner = np.pad(np.full((cheight - 1, cwidth - 1), 0b11111111), ((0, 1), (0, 1)))
+rcorner = np.pad(np.full((cheight - 1, cwidth - 1), 0b00000111), ((0, 1), (1, 0)))
+gcorner = np.pad(np.full((cheight - 1, cwidth - 1), 0b00111000), ((1, 0), (0, 1)))
+bcorner = np.pad(np.full((cheight - 1, cwidth - 1), 0b11000000), ((1, 0), (1, 0)))
midwidth = args.width - 2 * cwidth
frame_size = args.height * args.width - 4 * cheight * cwidth
frame_xor = np.arange(frame_size, dtype=np.uint8)
@@ -31,6 +35,7 @@ with open(args.file, "rb") as f:
rsc = RSCodec(int(args.level * 255))
encoder = Encoder.with_defaults(data, rs_size)
packets = encoder.get_encoded_packets(int(len(data) / rs_size * args.level))
+
print("Data length:", len(data))
print("Packets:", len(packets))
input("Seizure warning!")
@@ -59,9 +64,9 @@ class EncoderWidget(QWidget):
(
np.concatenate(
(
- np.zeros((cheight, cwidth), dtype=np.uint8), # TODO
+ wcorner,
frame_data[: cheight * midwidth].reshape((cheight, midwidth)),
- np.zeros((cheight, cwidth), dtype=np.uint8), # TODO
+ rcorner,
),
axis=1,
),
@@ -70,21 +75,25 @@ class EncoderWidget(QWidget):
].reshape((args.height - 2 * cheight, args.width)),
np.concatenate(
(
- np.zeros((cheight, cwidth), dtype=np.uint8), # TODO
+ gcorner,
frame_data[frame_size - cheight * midwidth :].reshape(
(cheight, midwidth)
),
- np.zeros((cheight, cwidth), dtype=np.uint8), # TODO
+ bcorner,
),
axis=1,
),
)
)
color_frame = np.stack(
- (frame << 5 & 0b11100000, frame << 2 & 0b11100000, frame & 0b11000000),
+ (
+ (frame & 0b00000111) * 255 // 7,
+ (frame >> 3 & 0b00000111) * 255 // 7,
+ (frame >> 6 & 0b00000011) * 255 // 3,
+ ),
axis=-1,
)
- img = Image.fromarray(color_frame)
+ img = Image.fromarray(color_frame.astype(np.uint8))
qt_img = ImageQt.ImageQt(img)
pixmap = QPixmap.fromImage(qt_img).scaled(self.size())
self.label.setPixmap(pixmap)
diff --git a/requirements.txt b/requirements.txt
index 9543501..9dc3725 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,9 +1,18 @@
+contourpy==1.2.1
+cycler==0.12.1
Cython==3.0.10
+fonttools==4.51.0
+kiwisolver==1.4.5
+matplotlib==3.8.4
numpy==1.26.4
opencv-python==4.9.0.80
+packaging==24.0
pillow==10.3.0
+pyparsing==3.1.2
PyQt6==6.6.1
PyQt6-Qt6==6.6.3
PyQt6-sip==13.6.0
+python-dateutil==2.9.0.post0
raptorq==2.0.0
reedsolo==1.7.0
+six==1.16.0