aboutsummaryrefslogtreecommitdiff
path: root/encoder.py
diff options
context:
space:
mode:
authorAnthony Wang2024-04-22 14:36:57 -0400
committerAnthony Wang2024-04-22 14:36:57 -0400
commit51548066f6660ef944afcd6876bf268e385d95c5 (patch)
treed2c2a9ba73b05974d2ab2f1e9f618624d50e4081 /encoder.py
parentc41ca8720532388ef95efad01e1f7fd9eb022f58 (diff)
Short args, clean up code
Diffstat (limited to 'encoder.py')
-rw-r--r--encoder.py105
1 files changed, 46 insertions, 59 deletions
diff --git a/encoder.py b/encoder.py
index b20cd56..5752d55 100644
--- a/encoder.py
+++ b/encoder.py
@@ -10,27 +10,17 @@ from PIL import Image, ImageQt
from raptorq import Encoder
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
-parser.add_argument("file", help="input file")
-parser.add_argument("--height", help="grid height", default=100, type=int)
-parser.add_argument("--width", help="grid width", default=100, type=int)
-parser.add_argument("--fps", help="framerate", default=30, type=int)
-parser.add_argument("--level", help="error correction level", default=0.1, type=float)
-parser.add_argument("--video", help="output file for encoded video")
-parser.add_argument("--scale", help="scale of new frames", default=2, type=int)
+parser.add_argument("-i", "--input", help="input file")
+parser.add_argument("-o", "--output", help="output video file")
+parser.add_argument("-x", "--height", help="grid height", default=100, type=int)
+parser.add_argument("-y", "--width", help="grid width", default=100, type=int)
+parser.add_argument("-f", "--fps", help="frame rate", default=30, type=int)
+parser.add_argument("-l", "--level", help="error correction level", default=0.1, type=float)
+parser.add_argument("-m", "--mix", help="mix frames with original video", action="store_true")
args = parser.parse_args()
-if args.video:
- cap = cv2.VideoCapture(args.file)
- args.height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) / args.scale)
- args.width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) / args.scale)
-
-# Make corners
cheight = cwidth = max(args.height // 10, args.width // 10)
-wcorner = np.pad(np.full((cheight - 1, cwidth - 1), 0b11111111), ((0, 1), (0, 1)))
-rcorner = np.pad(np.full((cheight - 1, cwidth - 1), 0b00000111), ((0, 1), (1, 0)))
-gcorner = np.pad(np.full((cheight - 1, cwidth - 1), 0b00111000), ((1, 0), (0, 1)))
-bcorner = np.pad(np.full((cheight - 1, cwidth - 1), 0b11000000), ((1, 0), (1, 0)))
midwidth = args.width - 2 * cwidth
frame_size = args.height * args.width - 4 * cheight * cwidth
frame_xor = np.arange(frame_size, dtype=np.uint8)
@@ -38,12 +28,18 @@ frame_xor = np.arange(frame_size, dtype=np.uint8)
# raptorq can add up to 4 extra bytes
rs_size = frame_size - int((frame_size + 254) / 255) * int(args.level * 255) - 4
-with open(args.file, "rb") as f:
+with open(args.input, "rb") as f:
data = f.read()
rsc = RSCodec(int(args.level * 255))
encoder = Encoder.with_defaults(data, rs_size)
packets = encoder.get_encoded_packets(int(len(data) / rs_size * args.level))
+# Make corners
+wcorner = np.pad(np.full((cheight - 1, cwidth - 1), 0b11111111), ((0, 1), (0, 1)))
+rcorner = np.pad(np.full((cheight - 1, cwidth - 1), 0b00000111), ((0, 1), (1, 0)))
+gcorner = np.pad(np.full((cheight - 1, cwidth - 1), 0b00111000), ((1, 0), (0, 1)))
+bcorner = np.pad(np.full((cheight - 1, cwidth - 1), 0b11000000), ((1, 0), (1, 0)))
+
print("Data length:", len(data))
print("Packets:", len(packets))
@@ -55,38 +51,26 @@ def get_frame():
frame_data = np.array(rsc.encode(packets[idx]))
# Pad frame to fit frame_size since raptorq might not add 4 bytes
frame_data = np.pad(frame_data, (0, frame_size - len(frame_data))) ^ frame_xor
+ if idx == 0:
+ print(list(frame_data))
idx = (idx + 1) % len(packets)
frame = np.concatenate(
(
np.concatenate(
- (
- wcorner,
- frame_data[: cheight * midwidth].reshape((cheight, midwidth)),
- rcorner,
- ),
+ (wcorner, frame_data[: cheight * midwidth].reshape((cheight, midwidth)), rcorner),
axis=1,
),
frame_data[cheight * midwidth : frame_size - cheight * midwidth].reshape(
(args.height - 2 * cheight, args.width)
),
np.concatenate(
- (
- gcorner,
- frame_data[frame_size - cheight * midwidth :].reshape(
- (cheight, midwidth)
- ),
- bcorner,
- ),
+ (gcorner, frame_data[frame_size - cheight * midwidth :].reshape((cheight, midwidth)), bcorner),
axis=1,
),
)
)
return np.stack(
- (
- (frame & 0b00000111) * 255 // 7,
- (frame >> 3 & 0b00000111) * 255 // 7,
- (frame >> 6 & 0b00000011) * 255 // 3,
- ),
+ ((frame & 0b00000111) * 255 // 7, (frame >> 3 & 0b00000111) * 255 // 7, (frame >> 6) * 255 // 3),
axis=-1,
).astype(np.uint8)
@@ -111,30 +95,33 @@ class EncoderWidget(QWidget):
self.label.setPixmap(pixmap)
-if args.video:
- out = cv2.VideoWriter(
- args.video,
- cv2.VideoWriter_fourcc(*"mp4v"),
- cap.get(cv2.CAP_PROP_FPS),
- (args.scale * args.width, args.scale * args.height),
- )
- while cap.isOpened():
- ret, frame = cap.read()
- if not ret:
- break
- frame[: args.scale * cheight, : args.scale * cwidth] = 255
- frame[: args.scale * cheight, args.scale * (args.width - cwidth) :] = 255
- frame[args.scale * (args.height - cheight) :, : args.scale * cwidth] = 255
- frame[
- args.scale * (args.height - cheight) :, args.scale * (args.width - cwidth) :
- ] = 255
- out.write(
- (
- frame.astype(np.int64)
- * np.repeat(np.repeat(get_frame(), args.scale, 0), args.scale, 1)
- / 255
- ).astype(np.uint8)
- )
+if args.output:
+ if args.mix:
+ cap = cv2.VideoCapture(args.input)
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+ hscale = height // args.height
+ wscale = width // args.width
+ out = cv2.VideoWriter(args.output, cv2.VideoWriter_fourcc(*"mp4v"), args.fps, (width, height))
+ while cap.isOpened():
+ ret, frame = cap.read()
+ if not ret:
+ break
+ frame = frame.astype(np.float64) / 255
+ frame[: hscale * cheight, : wscale * cwidth] = 1
+ frame[: hscale * cheight, wscale * (args.width - cwidth) :] = 1
+ frame[hscale * (args.height - cheight) :, : wscale * cwidth] = 1
+ frame[hscale * (args.height - cheight) :, wscale * (args.width - cwidth) :] = 1
+ out.write(
+ cv2.cvtColor(
+ (frame * np.repeat(np.repeat(get_frame(), hscale, 0), wscale, 1)).astype(np.uint8),
+ cv2.COLOR_RGB2BGR,
+ )
+ )
+ else:
+ out = cv2.VideoWriter(args.output, cv2.VideoWriter_fourcc(*"mp4v"), args.fps, (4* args.width, 4*args.height))
+ for _ in packets:
+ out.write(cv2.cvtColor(np.repeat(np.repeat(get_frame(), 4, 0), 4, 1), cv2.COLOR_RGB2BGR))
else:
input("Seizure warning!")
app = QApplication([])