aboutsummaryrefslogtreecommitdiff
path: root/encoder.py
blob: 634a166a494d4e4723d29c4379701aed174e4f5a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import argparse
import cv2
import numpy as np
from creedsolo import RSCodec
from raptorq import Encoder

parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-i", "--input", help="input file")
parser.add_argument("-o", "--output", help="output video file", default="vid.mkv")
parser.add_argument("-x", "--height", help="grid height", default=100, type=int)
parser.add_argument("-y", "--width", help="grid width", default=100, type=int)
parser.add_argument("-l", "--level", help="error correction level", default=0.1, type=float)
parser.add_argument("-f", "--fps", help="frame rate", default=30, type=int)
parser.add_argument("-m", "--mix", help="mix frames with original video", action="store_true")
args = parser.parse_args()


cheight = cwidth = max(args.height // 10, args.width // 10)
midwidth = args.width - 2 * cwidth
frame_size = args.height * args.width - 4 * cheight * cwidth
# Divide by 8 / 3 for 3-bit color
frame_bytes = frame_size * 3 // 8
frame_xor = np.arange(frame_bytes, dtype=np.uint8)
# reedsolo breaks message into 255-byte chunks
# raptorq can add up to 4 extra bytes
rs_bytes = frame_bytes - (frame_bytes + 254) // 255 * int(args.level * 255) - 4

with open(args.input, "rb") as f:
    data = f.read()
rsc = RSCodec(int(args.level * 255))
encoder = Encoder.with_defaults(data, rs_bytes)
packets = encoder.get_encoded_packets(int(len(data) / rs_bytes * (1 / (1 - args.level) - 1)))

# Make corners
ones = np.ones((cheight - 1, cwidth - 1))
zeros = np.zeros((cheight - 1, cwidth - 1))
wcorner = np.pad(np.dstack((ones, ones, ones)), ((0, 1), (0, 1), (0, 0)))
rcorner = np.pad(np.dstack((ones, zeros, zeros)), ((0, 1), (1, 0), (0, 0)))
gcorner = np.pad(np.dstack((zeros, ones, zeros)), ((1, 0), (0, 1), (0, 0)))
bcorner = np.pad(np.dstack((zeros, zeros, ones)), ((1, 0), (1, 0), (0, 0)))

# Output flags for decoder
print(f"-x {args.height} -y {args.width} -l {args.level} -s {len(data)} -p {len(packets[0])}", end="")


def mkframe(packet):
    frame = np.array(rsc.encode(bytearray(packet)))
    frame = np.unpackbits(np.pad(frame, (0, frame_bytes - len(frame))) ^ frame_xor)
    # Pad to be multiple of 3 so we can reshape into RGB channels
    frame = np.pad(frame, (0, (3 - len(frame)) % 3))
    frame = np.reshape(frame, (frame_size, 3))
    frame = np.concatenate(
        (
            np.concatenate(
                (wcorner, frame[: cheight * midwidth].reshape((cheight, midwidth, 3)), rcorner),
                axis=1,
            ),
            frame[cheight * midwidth : frame_size - cheight * midwidth].reshape(
                (args.height - 2 * cheight, args.width, 3)
            ),
            np.concatenate(
                (gcorner, frame[frame_size - cheight * midwidth :].reshape((cheight, midwidth, 3)), bcorner),
                axis=1,
            ),
        )
    )
    return frame.astype(np.uint8) * 255


if args.mix:
    # Mix frames with original video
    cap = cv2.VideoCapture(args.input)
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    hscale = height // args.height
    wscale = width // args.width
    out = cv2.VideoWriter(args.output, cv2.VideoWriter_fourcc(*"FFV1"), args.fps, (width, height))
    i = 0
    while cap.isOpened():
        ret, vidframe = cap.read()
        if not ret:
            break
        vidframe[: hscale * cheight, : wscale * cwidth] = 0
        vidframe[: hscale * cheight, wscale * (args.width - cwidth) :] = 0
        vidframe[hscale * (args.height - cheight) :, : wscale * cwidth] = 0
        vidframe[hscale * (args.height - cheight) :, wscale * (args.width - cwidth) :] = 0
        frame = np.repeat(np.repeat(mkframe(packets[i]), hscale, 0), wscale, 1)
        # Set edges in original video to black
        frame[vidframe % 255 != 0] = 0
        out.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
        i = (i + 1) % len(packets)
else:
    # Create a new video
    out = cv2.VideoWriter(args.output, cv2.VideoWriter_fourcc(*"FFV1"), args.fps, (args.width, args.height))
    for packet in packets:
        out.write(cv2.cvtColor(mkframe(packet), cv2.COLOR_RGB2BGR))