1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
|
import argparse
import sys
import cv2
import numpy as np
from creedsolo import RSCodec
from PyQt6.QtWidgets import QApplication, QWidget, QLabel, QVBoxLayout
from PyQt6.QtGui import QPixmap
from PyQt6.QtCore import QTimer
from PIL import Image, ImageQt
from raptorq import Encoder
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-i", "--input", help="input file")
parser.add_argument("-o", "--output", help="output video file")
parser.add_argument("-x", "--height", help="grid height", default=100, type=int)
parser.add_argument("-y", "--width", help="grid width", default=100, type=int)
parser.add_argument("-f", "--fps", help="frame rate", default=30, type=int)
parser.add_argument("-l", "--level", help="error correction level", default=0.1, type=float)
parser.add_argument("-m", "--mix", help="mix frames with original video", action="store_true")
args = parser.parse_args()
cheight = cwidth = max(args.height // 10, args.width // 10)
midwidth = args.width - 2 * cwidth
frame_size = args.height * args.width - 4 * cheight * cwidth
frame_xor = np.arange(frame_size // 2, dtype=np.uint8)
# reedsolo breaks message into 255-byte chunks
# raptorq can add up to 4 extra bytes
# Divide by 2 for 4-bit color
rs_size = frame_size - (frame_size // 2 + 254) // 255 * int(args.level * 255) - 4
with open(args.input, "rb") as f:
data = f.read()
rsc = RSCodec(int(args.level * 255))
encoder = Encoder.with_defaults(data, rs_size)
# This formula is wrong 🤷
packets = encoder.get_encoded_packets(int(len(data) / rs_size * args.level))
# Make corners
wcorner = np.pad(np.full((cheight - 1, cwidth - 1), 0b1111), ((0, 1), (0, 1)))
rcorner = np.pad(np.full((cheight - 1, cwidth - 1), 0b0001), ((0, 1), (1, 0)))
gcorner = np.pad(np.full((cheight - 1, cwidth - 1), 0b0110), ((1, 0), (0, 1)))
bcorner = np.pad(np.full((cheight - 1, cwidth - 1), 0b1000), ((1, 0), (1, 0)))
print("Data length:", len(data))
print("Packets:", len(packets))
idx = 0
def get_frame():
global idx
frame = np.array(rsc.encode(packets[idx]))
idx = (idx + 1) % len(packets)
# Add 4 bytes, pad frame to be multiple of 255
frame = np.pad(frame, (0, (len(frame) + 258) // 255 * 255 - len(frame)))
# Space out elements in each size 255 chunk
frame = np.ravel(np.reshape(frame, (len(frame) // 255, 255)), "F")[: frame_size // 2] ^ frame_xor
frame = np.ravel(np.column_stack((frame >> 4, frame & 0b1111)))
frame = np.concatenate(
(
np.concatenate(
(wcorner, frame[: cheight * midwidth].reshape((cheight, midwidth)), rcorner),
axis=1,
),
frame[cheight * midwidth : frame_size - cheight * midwidth].reshape(
(args.height - 2 * cheight, args.width)
),
np.concatenate(
(gcorner, frame[frame_size - cheight * midwidth :].reshape((cheight, midwidth)), bcorner),
axis=1,
),
)
)
return np.stack(
((frame & 0b0001) * 255, (frame >> 1 & 0b0011) * 255 / 3, (frame >> 3) * 255),
axis=-1,
).astype(np.uint8)
class EncoderWidget(QWidget):
def __init__(self):
super().__init__()
self.timer = QTimer(self)
self.timer.timeout.connect(self.update)
self.timer.start(1000 // args.fps)
self.label = QLabel(self)
layout = QVBoxLayout(self)
layout.addWidget(self.label)
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
self.showFullScreen()
def update(self):
img = Image.fromarray(get_frame())
qt_img = ImageQt.ImageQt(img)
pixmap = QPixmap.fromImage(qt_img).scaled(self.size())
self.label.setPixmap(pixmap)
if args.output:
if args.mix:
cap = cv2.VideoCapture(args.input)
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
hscale = height // args.height
wscale = width // args.width
out = cv2.VideoWriter(args.output, cv2.VideoWriter_fourcc(*"RGBA"), args.fps, (width, height))
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame = frame.astype(np.float64) / 255
frame[: hscale * cheight, : wscale * cwidth] = 1
frame[: hscale * cheight, wscale * (args.width - cwidth) :] = 1
frame[hscale * (args.height - cheight) :, : wscale * cwidth] = 1
frame[hscale * (args.height - cheight) :, wscale * (args.width - cwidth) :] = 1
out.write(
cv2.cvtColor(
(frame * np.repeat(np.repeat(get_frame(), hscale, 0), wscale, 1)).astype(np.uint8),
cv2.COLOR_RGB2BGR,
)
)
else:
out = cv2.VideoWriter(args.output, cv2.VideoWriter_fourcc(*"RGBA"), args.fps, (args.width, args.height))
for _ in packets:
out.write(cv2.cvtColor(get_frame(), cv2.COLOR_RGB2BGR))
else:
input("Seizure warning!")
app = QApplication([])
widget = EncoderWidget()
sys.exit(app.exec())
|