|
1 | 1 | from __future__ import annotations |
| 2 | +from dataclasses import dataclass |
2 | 3 | from datetime import datetime |
| 4 | +import io |
3 | 5 | import json |
4 | 6 | import logging |
5 | 7 | import os |
6 | 8 | import tempfile |
7 | 9 | import time |
8 | 10 | from tempfile import TemporaryDirectory |
| 11 | +import uuid |
9 | 12 |
|
10 | 13 | from pydantic import BaseModel, BeforeValidator, RootModel |
11 | 14 |
|
|
17 | 20 | from labthings_fastapi.types.numpy import NDArray |
18 | 21 | from labthings_fastapi.dependencies.metadata import GetThingStates |
19 | 22 | from labthings_fastapi.dependencies.blocking_portal import BlockingPortal |
20 | | -from labthings_fastapi.outputs.blob import blob_type |
21 | | -from typing import Annotated, Any, Iterator, Literal, Mapping, Optional |
| 23 | +from labthings_fastapi.outputs.blob import Blob, BlobBytes |
| 24 | +from typing import Annotated, Any, Iterator, Literal, Mapping, Optional, Self |
22 | 25 | from contextlib import contextmanager |
23 | 26 | import piexif |
| 27 | +from scipy.ndimage import zoom |
| 28 | +from scipy.interpolate import interp1d |
| 29 | +from PIL import Image |
24 | 30 | from threading import RLock |
25 | 31 | import picamera2 |
26 | 32 | from picamera2 import Picamera2 |
|
30 | 36 | from . import recalibrate_utils |
31 | 37 |
|
32 | 38 |
|
33 | | -JPEGBlob = blob_type("image/jpeg") |
| 39 | +class JPEGBlob(Blob): |
| 40 | + media_type: str = "image/jpeg" |
34 | 41 |
|
35 | 42 |
|
| 43 | +class PNGBlob(Blob): |
| 44 | + media_type: str = "image/png" |
| 45 | + |
| 46 | + |
| 47 | +class RawBlob(Blob): |
| 48 | + media_type: str = "image/raw" |
| 49 | + |
| 50 | + |
| 51 | +class RawImageModel(BaseModel): |
| 52 | + image_data: RawBlob |
| 53 | + thing_states: Optional[Mapping[str, Mapping]] |
| 54 | + metadata: Optional[Mapping[str, Mapping]] |
| 55 | + processing_inputs: Optional[ImageProcessingInputs] = None |
| 56 | + size: tuple[int, int] |
| 57 | + stride: int |
| 58 | + format: str |
| 59 | + |
36 | 60 | class PicameraControl(PropertyDescriptor): |
37 | 61 | def __init__( |
38 | 62 | self, control_name: str, model: type = float, description: Optional[str] = None |
@@ -68,7 +92,7 @@ def __init__(self, stream: MJPEGStream, portal: BlockingPortal): |
68 | 92 | self.stream = stream |
69 | 93 | self.portal = portal |
70 | 94 |
|
71 | | - def outputframe(self, frame, _keyframe=True, _timestamp=None): |
| 95 | + def outputframe(self, frame, _keyframe=True, _timestamp=None, _packet=None, _audio=False): |
72 | 96 | """Add a frame to the stream's ringbuffer""" |
73 | 97 | self.stream.add_frame(frame, self.portal) |
74 | 98 |
|
@@ -98,6 +122,78 @@ class LensShading(BaseModel): |
98 | 122 | Cb: list[list[float]] |
99 | 123 |
|
100 | 124 |
|
| 125 | +class ImageProcessingInputs(BaseModel): |
| 126 | + lens_shading: LensShading |
| 127 | + colour_gains: tuple[float, float] |
| 128 | + white_norm_lores: NDArray |
| 129 | + raw_size: tuple[int, int] |
| 130 | + colour_correction_matrix: tuple[float, float, float, float, float, float, float, float, float] |
| 131 | + gamma: NDArray |
| 132 | + |
| 133 | + |
| 134 | +@dataclass |
| 135 | +class ImageProcessingCache: |
| 136 | + white_norm: np.ndarray |
| 137 | + gamma: interp1d |
| 138 | + ccm: np.ndarray |
| 139 | + |
| 140 | + |
| 141 | +class BlobNumpyDict(BlobBytes): |
| 142 | + def __init__(self, arrays: Mapping[str, np.ndarray]): |
| 143 | + self._arrays = arrays |
| 144 | + self._bytesio: Optional[io.BytesIO] = None |
| 145 | + self.media_type = "application/npz" |
| 146 | + |
| 147 | + @property |
| 148 | + def arrays(self) -> Mapping[str, np.ndarray]: |
| 149 | + return self._arrays |
| 150 | + |
| 151 | + @property |
| 152 | + def _bytes(self) -> bytes: #noqa mypy: override |
| 153 | + """Generate binary content on-the-fly from numpy data""" |
| 154 | + if not self._bytesio: |
| 155 | + out = io.BytesIO() |
| 156 | + np.savez(out, **self.arrays) |
| 157 | + self._bytes_cache = out.getvalue() |
| 158 | + return self._bytes_cache |
| 159 | + |
| 160 | + |
| 161 | +class NumpyBlob(Blob): |
| 162 | + media_type: str = "application/npz" |
| 163 | + |
| 164 | + @classmethod |
| 165 | + def from_arrays(cls, arrays: Mapping[str, np.ndarray]) -> Self: |
| 166 | + return cls.model_construct( # type: ignore[return-value] |
| 167 | + href="blob://local", |
| 168 | + _data=BlobNumpyDict( |
| 169 | + arrays, |
| 170 | + media_type=cls.default_media_type() |
| 171 | + ), |
| 172 | + ) |
| 173 | + |
| 174 | + |
| 175 | + |
| 176 | +def raw2rggb(raw: np.ndarray, size: tuple[int, int]) -> np.ndarray: |
| 177 | + """Convert packed 10 bit raw to RGGB 8 bit""" |
| 178 | + raw = np.asarray(raw) # ensure it's an array |
| 179 | + output_shape = (size[1]//2, size[0]//2, 4) |
| 180 | + rggb = np.empty(output_shape, dtype=np.uint8) |
| 181 | + raw_w = rggb.shape[1] // 2 * 5 |
| 182 | + for plane, offset in enumerate([(1, 1), (0, 1), (1, 0), (0, 0)]): |
| 183 | + rggb[:, ::2, plane] = raw[offset[0] :: 2, offset[1] : raw_w + offset[1] : 5] |
| 184 | + rggb[:, 1::2, plane] = raw[ |
| 185 | + offset[0] :: 2, offset[1] + 2 : raw_w + offset[1] + 2 : 5 |
| 186 | + ] |
| 187 | + return rggb |
| 188 | + |
| 189 | + |
| 190 | +def rggb2rgb(rggb: np.ndarray) -> np.ndarray: |
| 191 | + """Convert rggb to rgb by averaging green channels""" |
| 192 | + return np.stack( |
| 193 | + [rggb[..., 0], rggb[..., 1] // 2 + rggb[..., 2] // 2, rggb[..., 3]], axis=2 |
| 194 | + ) |
| 195 | + |
| 196 | + |
101 | 197 | class StreamingPiCamera2(Thing): |
102 | 198 | """A Thing that represents an OpenCV camera""" |
103 | 199 |
|
@@ -442,6 +538,146 @@ def capture_array( |
442 | 538 | with self.picamera() as cam: |
443 | 539 | return cam.capture_array(stream_name) |
444 | 540 |
|
| 541 | + @thing_action |
| 542 | + def capture_raw( |
| 543 | + self, |
| 544 | + states_getter: GetThingStates, |
| 545 | + get_states: bool=True, |
| 546 | + get_processing_inputs: bool=True, |
| 547 | + ) -> RawImageModel: |
| 548 | + """Capture a raw image |
| 549 | + |
| 550 | + This function is intended to be as fast as possible, and will return |
| 551 | + as soon as an image has been captured. The output format is not intended |
| 552 | + to be useful, except as input to `raw_to_png`. |
| 553 | + |
| 554 | + When used via the HTTP interface, this function returns the data as a |
| 555 | + `Blob` object, meaning it can be passed to another action without |
| 556 | + transferring it over the network. |
| 557 | + """ |
| 558 | + with self.picamera() as cam: |
| 559 | + (buffer, ), parameters = cam.capture_buffers(["raw"]) |
| 560 | + configuration = cam.camera_configuration() |
| 561 | + return RawImageModel( |
| 562 | + image_data = RawBlob.from_bytes(buffer.tobytes()), |
| 563 | + thing_states = states_getter() if get_states else None, |
| 564 | + metadata = { "parameters": parameters, "sensor": configuration["sensor"] }, |
| 565 | + processing_inputs = ( |
| 566 | + self.image_processing_inputs if get_processing_inputs else None |
| 567 | + ), |
| 568 | + size = configuration["raw"]["size"], |
| 569 | + format = configuration["raw"]["format"], |
| 570 | + stride = configuration["raw"]["stride"], |
| 571 | + ) |
| 572 | + |
| 573 | + @thing_property |
| 574 | + def image_processing_inputs(self) -> ImageProcessingInputs: |
| 575 | + """The information needed to turn raw images into processed ones""" |
| 576 | + lst = self.lens_shading_tables |
| 577 | + lum = np.array(lst.luminance) |
| 578 | + Cr = np.array(lst.Cr) |
| 579 | + Cb = np.array(lst.Cb) |
| 580 | + gr, gb = self.colour_gains |
| 581 | + G = 1 / lum |
| 582 | + R = ( |
| 583 | + G / Cr / gr * np.min(Cr) |
| 584 | + ) # The extra /np.max(Cr) emulates the quirky handling of Cr in |
| 585 | + B = G / Cb / gb * np.min(Cb) # the picamera2 pipeline |
| 586 | + white_norm_lores = np.stack([R, G, B], axis=2) |
| 587 | + |
| 588 | + with self.picamera() as cam: |
| 589 | + size: tuple[int, int] = cam.camera_configuration()["raw"]["size"] |
| 590 | + |
| 591 | + contrast_algorithm = Picamera2.find_tuning_algo(self.tuning, "rpi.contrast") |
| 592 | + gamma = np.array(contrast_algorithm["gamma_curve"]).reshape((-1, 2)) |
| 593 | + |
| 594 | + return ImageProcessingInputs( |
| 595 | + lens_shading=lst, |
| 596 | + colour_gains=(gr, gb), |
| 597 | + colour_correction_matrix=self.colour_correction_matrix, |
| 598 | + white_norm_lores=white_norm_lores, |
| 599 | + raw_size=size, |
| 600 | + gamma=gamma, |
| 601 | + ) |
| 602 | + |
| 603 | + @staticmethod |
| 604 | + def generate_image_processing_cache( |
| 605 | + p: ImageProcessingInputs, |
| 606 | + ) -> ImageProcessingCache: |
| 607 | + """Prepare to process raw images |
| 608 | + |
| 609 | + This is a static method to ensure its outputs depend only on its |
| 610 | + inputs.""" |
| 611 | + zoom_factors = [ |
| 612 | + i / 2 / n for i, n in zip(p.raw_size[::-1], p.white_norm_lores.shape[:2]) |
| 613 | + ] + [1] |
| 614 | + white_norm = zoom(p.white_norm_lores, zoom_factors, order=1)[ |
| 615 | + : (p.raw_size[1]//2), : (p.raw_size[0]//2), : |
| 616 | + ] |
| 617 | + ccm = np.array(p.colour_correction_matrix).reshape((3,3)) |
| 618 | + gamma = interp1d(p.gamma[:, 0] / 255, p.gamma[:, 1] / 255) |
| 619 | + return ImageProcessingCache( |
| 620 | + white_norm=white_norm, |
| 621 | + ccm = ccm, |
| 622 | + gamma = gamma, |
| 623 | + ) |
| 624 | + |
| 625 | + _image_processing_cache: ImageProcessingCache | None = None |
| 626 | + @thing_action |
| 627 | + def prepare_image_normalisation( |
| 628 | + self, |
| 629 | + inputs: ImageProcessingInputs | None = None |
| 630 | + ) -> ImageProcessingInputs: |
| 631 | + """The parameters used to convert raw image data into processed images |
| 632 | + |
| 633 | + NB this method uses only information from `inputs` or |
| 634 | + `self.image_processing_inputs`, to ensure repeatability |
| 635 | + """ |
| 636 | + p = inputs or self.image_processing_inputs |
| 637 | + self._image_processing_cache = self.generate_image_processing_cache(p) |
| 638 | + return p |
| 639 | + |
| 640 | + @thing_action |
| 641 | + def process_raw_array( |
| 642 | + self, |
| 643 | + raw: RawImageModel, |
| 644 | + use_cache: bool = False, |
| 645 | + )->NDArray: |
| 646 | + """Convert a raw image to a processed array""" |
| 647 | + if not use_cache: |
| 648 | + if raw.processing_inputs is None: |
| 649 | + raise ValueError( |
| 650 | + "The raw image does not contain processing inputs, " |
| 651 | + "and we are not using the cache. This may be solved by " |
| 652 | + "capturing with `get_processing_inputs=True`." |
| 653 | + ) |
| 654 | + self.prepare_image_normalisation( |
| 655 | + raw.processing_inputs |
| 656 | + ) |
| 657 | + p = self._image_processing_cache |
| 658 | + assert p is not None |
| 659 | + assert raw.format == "SBGGR10_CSI2P" |
| 660 | + buffer = np.frombuffer(raw.image_data.content, dtype=np.uint8) |
| 661 | + packed = buffer.reshape((-1, raw.stride)) |
| 662 | + rgb = rggb2rgb(raw2rggb(packed, raw.size)) |
| 663 | + normed = rgb / p.white_norm |
| 664 | + corrected = np.dot( |
| 665 | + p.ccm, normed.reshape((-1, 3)).T |
| 666 | + ).T.reshape(normed.shape) |
| 667 | + corrected[corrected < 0] = 0 |
| 668 | + corrected[corrected > 255] = 255 |
| 669 | + processed_image = p.gamma(corrected) |
| 670 | + return processed_image.astype(np.uint8) |
| 671 | + |
| 672 | + @thing_action |
| 673 | + def raw_to_png(self, raw: RawImageModel, use_cache: bool = False)->PNGBlob: |
| 674 | + """Process a raw image to a PNG""" |
| 675 | + arr = self.process_raw_array(raw=raw, use_cache=use_cache) |
| 676 | + image = Image.fromarray(arr.astype(np.uint8), mode="RGB") |
| 677 | + out = io.BytesIO() |
| 678 | + image.save(out, format="png") |
| 679 | + return PNGBlob.from_bytes(out.getvalue()) |
| 680 | + |
445 | 681 | @thing_property |
446 | 682 | def camera_configuration(self) -> Mapping: |
447 | 683 | """The "configuration" dictionary of the picamera2 object |
|
0 commit comments