aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--decoder.py1
-rw-r--r--encoder.py126
2 files changed, 83 insertions, 44 deletions
diff --git a/decoder.py b/decoder.py
index 6606b03..649220c 100644
--- a/decoder.py
+++ b/decoder.py
@@ -29,6 +29,7 @@ while data is None:
# ret, raw_frame = cap.read()
# if not ret:
# continue
+ # TODO: Try decoding saved videos
raw_frame = cv2.cvtColor(
cv2.imread("/home/a/Pictures/Camera/IMG_20240422_000849_027.jpg"),
cv2.COLOR_BGR2RGB,
diff --git a/encoder.py b/encoder.py
index a4ef7c1..b20cd56 100644
--- a/encoder.py
+++ b/encoder.py
@@ -1,5 +1,6 @@
import argparse
import sys
+import cv2
import numpy as np
from creedsolo import RSCodec
from PyQt6.QtWidgets import QApplication, QWidget, QLabel, QVBoxLayout
@@ -9,14 +10,21 @@ from PIL import Image, ImageQt
from raptorq import Encoder
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
-parser.add_argument("file", help="output file for decoded data")
+parser.add_argument("file", help="input file")
parser.add_argument("--height", help="grid height", default=100, type=int)
parser.add_argument("--width", help="grid width", default=100, type=int)
parser.add_argument("--fps", help="framerate", default=30, type=int)
parser.add_argument("--level", help="error correction level", default=0.1, type=float)
+parser.add_argument("--video", help="output file for encoded video")
+parser.add_argument("--scale", help="scale of new frames", default=2, type=int)
args = parser.parse_args()
+if args.video:
+ cap = cv2.VideoCapture(args.file)
+ args.height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) / args.scale)
+ args.width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) / args.scale)
+
# Make corners
cheight = cwidth = max(args.height // 10, args.width // 10)
wcorner = np.pad(np.full((cheight - 1, cwidth - 1), 0b11111111), ((0, 1), (0, 1)))
@@ -38,7 +46,49 @@ packets = encoder.get_encoded_packets(int(len(data) / rs_size * args.level))
print("Data length:", len(data))
print("Packets:", len(packets))
-input("Seizure warning!")
+
+idx = 0
+
+
+def get_frame():
+ global idx
+ frame_data = np.array(rsc.encode(packets[idx]))
+ # Pad frame to fit frame_size since raptorq might not add 4 bytes
+ frame_data = np.pad(frame_data, (0, frame_size - len(frame_data))) ^ frame_xor
+ idx = (idx + 1) % len(packets)
+ frame = np.concatenate(
+ (
+ np.concatenate(
+ (
+ wcorner,
+ frame_data[: cheight * midwidth].reshape((cheight, midwidth)),
+ rcorner,
+ ),
+ axis=1,
+ ),
+ frame_data[cheight * midwidth : frame_size - cheight * midwidth].reshape(
+ (args.height - 2 * cheight, args.width)
+ ),
+ np.concatenate(
+ (
+ gcorner,
+ frame_data[frame_size - cheight * midwidth :].reshape(
+ (cheight, midwidth)
+ ),
+ bcorner,
+ ),
+ axis=1,
+ ),
+ )
+ )
+ return np.stack(
+ (
+ (frame & 0b00000111) * 255 // 7,
+ (frame >> 3 & 0b00000111) * 255 // 7,
+ (frame >> 6 & 0b00000011) * 255 // 3,
+ ),
+ axis=-1,
+ ).astype(np.uint8)
class EncoderWidget(QWidget):
@@ -47,7 +97,6 @@ class EncoderWidget(QWidget):
self.timer = QTimer(self)
self.timer.timeout.connect(self.update)
self.timer.start(1000 // args.fps)
- self.idx = 0
self.label = QLabel(self)
layout = QVBoxLayout(self)
layout.addWidget(self.label)
@@ -56,49 +105,38 @@ class EncoderWidget(QWidget):
self.showFullScreen()
def update(self):
- frame_data = np.array(rsc.encode(packets[self.idx]))
- # Pad frame to fit frame_size since raptorq might not add 4 bytes
- frame_data = np.pad(frame_data, (0, frame_size - len(frame_data))) ^ frame_xor
- self.idx = (self.idx + 1) % len(packets)
- frame = np.concatenate(
- (
- np.concatenate(
- (
- wcorner,
- frame_data[: cheight * midwidth].reshape((cheight, midwidth)),
- rcorner,
- ),
- axis=1,
- ),
- frame_data[
- cheight * midwidth : frame_size - cheight * midwidth
- ].reshape((args.height - 2 * cheight, args.width)),
- np.concatenate(
- (
- gcorner,
- frame_data[frame_size - cheight * midwidth :].reshape(
- (cheight, midwidth)
- ),
- bcorner,
- ),
- axis=1,
- ),
- )
- )
- color_frame = np.stack(
- (
- (frame & 0b00000111) * 255 // 7,
- (frame >> 3 & 0b00000111) * 255 // 7,
- (frame >> 6 & 0b00000011) * 255 // 3,
- ),
- axis=-1,
- )
- img = Image.fromarray(color_frame.astype(np.uint8))
+ img = Image.fromarray(get_frame())
qt_img = ImageQt.ImageQt(img)
pixmap = QPixmap.fromImage(qt_img).scaled(self.size())
self.label.setPixmap(pixmap)
-app = QApplication([])
-widget = EncoderWidget()
-sys.exit(app.exec())
+if args.video:
+ out = cv2.VideoWriter(
+ args.video,
+ cv2.VideoWriter_fourcc(*"mp4v"),
+ cap.get(cv2.CAP_PROP_FPS),
+ (args.scale * args.width, args.scale * args.height),
+ )
+ while cap.isOpened():
+ ret, frame = cap.read()
+ if not ret:
+ break
+ frame[: args.scale * cheight, : args.scale * cwidth] = 255
+ frame[: args.scale * cheight, args.scale * (args.width - cwidth) :] = 255
+ frame[args.scale * (args.height - cheight) :, : args.scale * cwidth] = 255
+ frame[
+ args.scale * (args.height - cheight) :, args.scale * (args.width - cwidth) :
+ ] = 255
+ out.write(
+ (
+ frame.astype(np.int64)
+ * np.repeat(np.repeat(get_frame(), args.scale, 0), args.scale, 1)
+ / 255
+ ).astype(np.uint8)
+ )
+else:
+ input("Seizure warning!")
+ app = QApplication([])
+ widget = EncoderWidget()
+ sys.exit(app.exec())