diff --git a/src/components/tools/polygon/PolygonWidget2D.vue b/src/components/tools/polygon/PolygonWidget2D.vue
index da252fc3c..28311eb4d 100644
--- a/src/components/tools/polygon/PolygonWidget2D.vue
+++ b/src/components/tools/polygon/PolygonWidget2D.vue
@@ -30,6 +30,7 @@ import type { Vector3 } from '@kitware/vtk.js/types';
import { ToolID } from '@/src/types/annotation-tool';
import { VtkViewContext } from '@/src/components/vtk/context';
import { useSliceInfo } from '@/src/composables/useSliceInfo';
+import { getRenderSlice } from '@/src/core/cine/getRenderSlice';
import SVG2DComponent from './PolygonSVG2D.vue';
export default defineComponent({
@@ -134,10 +135,11 @@ export default defineComponent({
widget.setManipulator(manipulator);
watchEffect(() => {
+ const semantic = tool.value?.slice ?? slice.value ?? 0;
updatePlaneManipulatorFor2DView(
manipulator,
viewDirection.value,
- tool.value?.slice ?? slice.value ?? 0,
+ getRenderSlice(imageId.value, semantic),
imageMetadata.value
);
});
diff --git a/src/components/tools/rectangle/RectangleWidget2D.vue b/src/components/tools/rectangle/RectangleWidget2D.vue
index a41f28b54..320f1c83d 100644
--- a/src/components/tools/rectangle/RectangleWidget2D.vue
+++ b/src/components/tools/rectangle/RectangleWidget2D.vue
@@ -32,6 +32,7 @@ import { VtkViewContext } from '@/src/components/vtk/context';
import { useSliceInfo } from '@/src/composables/useSliceInfo';
import { Maybe } from '@/src/types';
import { whenever } from '@vueuse/core';
+import { getRenderSlice } from '@/src/core/cine/getRenderSlice';
const useStore = useRectangleStore;
const vtkWidgetFactory = vtkRectangleWidget;
@@ -122,10 +123,11 @@ export default defineComponent({
widget.setManipulator(manipulator);
watchEffect(() => {
+ const semantic = tool.value?.slice ?? slice.value;
updatePlaneManipulatorFor2DView(
manipulator,
viewDirection.value,
- tool.value?.slice ?? slice.value,
+ getRenderSlice(imageId.value, semantic),
imageMetadata.value
);
});
diff --git a/src/components/tools/ruler/RulerWidget2D.vue b/src/components/tools/ruler/RulerWidget2D.vue
index b24b8d976..ae42ab7c9 100644
--- a/src/components/tools/ruler/RulerWidget2D.vue
+++ b/src/components/tools/ruler/RulerWidget2D.vue
@@ -33,6 +33,7 @@ import { Maybe } from '@/src/types';
import { useSliceInfo } from '@/src/composables/useSliceInfo';
import { VtkViewContext } from '@/src/components/vtk/context';
import { whenever } from '@vueuse/core';
+import { getRenderSlice } from '@/src/core/cine/getRenderSlice';
export default defineComponent({
name: 'RulerWidget2D',
@@ -121,10 +122,11 @@ export default defineComponent({
widget.setManipulator(manipulator);
watchEffect(() => {
+ const semantic = ruler.value?.slice ?? slice.value;
updatePlaneManipulatorFor2DView(
manipulator,
viewDirection.value,
- ruler.value?.slice ?? slice.value,
+ getRenderSlice(imageId.value, semantic),
imageMetadata.value
);
});
diff --git a/src/components/vtk/VtkBaseSliceRepresentation.vue b/src/components/vtk/VtkBaseSliceRepresentation.vue
index b8c63569c..b66cdc19c 100644
--- a/src/components/vtk/VtkBaseSliceRepresentation.vue
+++ b/src/components/vtk/VtkBaseSliceRepresentation.vue
@@ -1,5 +1,14 @@
diff --git a/src/components/vtk/VtkSliceViewSlicingKeyManipulator.vue b/src/components/vtk/VtkSliceViewSlicingKeyManipulator.vue
index b7cfa8797..848bd32c4 100644
--- a/src/components/vtk/VtkSliceViewSlicingKeyManipulator.vue
+++ b/src/components/vtk/VtkSliceViewSlicingKeyManipulator.vue
@@ -67,16 +67,14 @@ const scroll = useMouseRangeManipulatorListener(
sliceConfig.range,
1,
sliceConfig.slice.value,
- -1
+ -1,
+ // Set the scrolled view as the active view — only on real user input.
+ () => {
+ useViewStore().setActiveView(unref(viewId));
+ }
);
syncRef(scroll, sliceConfig.slice, { immediate: true });
-
-// set just scrolled view as active view
-watch(scroll, () => {
- const viewStore = useViewStore();
- viewStore.setActiveView(unref(viewId));
-});
diff --git a/src/components/vtk/VtkSliceViewSlicingManipulator.vue b/src/components/vtk/VtkSliceViewSlicingManipulator.vue
index a7f92f089..b389bc408 100644
--- a/src/components/vtk/VtkSliceViewSlicingManipulator.vue
+++ b/src/components/vtk/VtkSliceViewSlicingManipulator.vue
@@ -10,7 +10,7 @@ import vtkMouseRangeManipulator, {
} from '@kitware/vtk.js/Interaction/Manipulators/MouseRangeManipulator';
import vtkInteractorStyleManipulator from '@kitware/vtk.js/Interaction/Style/InteractorStyleManipulator';
import { syncRef } from '@vueuse/core';
-import { inject, toRefs, unref, watch, computed } from 'vue';
+import { inject, toRefs, unref, computed } from 'vue';
import { useViewStore } from '@/src/store/views';
interface Props {
@@ -55,14 +55,12 @@ const scroll = useMouseRangeManipulatorListener(
sliceConfig.range,
1,
sliceConfig.slice.value,
- -1 // Invert scroll: scroll down = decrease slice for anatomical consistency
+ -1, // Invert scroll: scroll down = decrease slice for anatomical consistency
+ () => {
+ useViewStore().setActiveView(unref(viewId));
+ }
);
-watch(scroll, () => {
- const viewStore = useViewStore();
- viewStore.setActiveView(unref(viewId));
-});
-
syncRef(scroll, sliceConfig.slice, { immediate: true });
diff --git a/src/core/cine/DicomCineImage.ts b/src/core/cine/DicomCineImage.ts
new file mode 100644
index 000000000..8f8f0b7d7
--- /dev/null
+++ b/src/core/cine/DicomCineImage.ts
@@ -0,0 +1,292 @@
+// DicomCineImage — backing image for multi-frame ultrasound DICOM clips.
+//
+// Treats the clip as a normal VolView image selection: extends BaseProgressiveImage,
+// owns a single 2D vtkImageData (extent [0, cols-1, 0, rows-1, 0, 0], 3-component
+// RGB uint8 scalars), and swaps the scalars in-place whenever the selected frame
+// changes. The original compressed frame bytes live in this object too, so we never
+// hold all decoded frames at once — only the LRU's worth.
+
+import vtkImageData from '@kitware/vtk.js/Common/DataModel/ImageData';
+import vtkDataArray from '@kitware/vtk.js/Common/Core/DataArray';
+import mitt, { Emitter } from 'mitt';
+import { computed } from 'vue';
+
+import {
+ BaseProgressiveImage,
+ ProgressiveImageEvents,
+} from '@/src/core/progressiveImage';
+import {
+ CineHeader,
+ CineParseResult,
+ isNativeTransferSyntax,
+ isSupportedCineTransferSyntax,
+} from './parseCineDicom';
+import {
+ DecodedFrame,
+ FrameCache,
+ decodeJpegFrame,
+ decodeNativeFrame,
+} from './frameCache';
+
+// DICOM PhysicalUnits code 3 = centimetres. Other codes (dB, %, m/s, ...) are
+// used for Doppler regions and aren't a length we can express as VTK spacing.
+const PHYSICAL_UNITS_CM = 3;
+
+function pickPixelSpacing(header: CineHeader): [number, number] {
+ const region = header.regions[0];
+ if (!region) return [1, 1];
+ if (
+ region.physicalUnitsX !== PHYSICAL_UNITS_CM ||
+ region.physicalUnitsY !== PHYSICAL_UNITS_CM
+ ) {
+ return [1, 1];
+ }
+ const dx = region.physicalDeltaX;
+ const dy = region.physicalDeltaY;
+ if (!dx || !dy || !Number.isFinite(dx) || !Number.isFinite(dy)) {
+ return [1, 1];
+ }
+ // Convert centimetres-per-pixel to millimetres-per-pixel.
+ return [Math.abs(dx) * 10, Math.abs(dy) * 10];
+}
+
+// Convert RGBA (from createImageBitmap or our native decoder) to a 3-component
+// RGB Uint8Array suitable for VTK scalar swap.
+function rgbaToRgb(
+ rgba: Uint8ClampedArray,
+ out: Uint8Array
+): void {
+ const pixels = rgba.length >> 2;
+ for (let i = 0; i < pixels; i++) {
+ const src = i * 4;
+ const dst = i * 3;
+ out[dst] = rgba[src];
+ out[dst + 1] = rgba[src + 1];
+ out[dst + 2] = rgba[src + 2];
+ }
+}
+
+export default class DicomCineImage extends BaseProgressiveImage {
+ public readonly header: CineHeader;
+
+ private readonly frames: Uint8Array[];
+ private readonly encapsulated: boolean;
+ private readonly cache: FrameCache;
+ private readonly inFlightFrames: Map
>;
+ private readonly events: Emitter;
+ private readonly scalarBuffer: Uint8Array;
+
+ private disposed = false;
+
+ constructor(init: CineParseResult) {
+ super();
+
+ const { header, frames, encapsulated } = init;
+ this.header = header;
+ this.frames = frames;
+ this.encapsulated = encapsulated;
+ this.cache = new FrameCache();
+ this.inFlightFrames = new Map();
+ this.events = mitt();
+
+ // Build the single 2D vtkImageData. 3-component RGB uint8.
+ const cols = header.cols;
+ const rows = header.rows;
+ if (!cols || !rows) {
+ throw new Error('Cine DICOM has invalid Rows/Columns');
+ }
+ this.scalarBuffer = new Uint8Array(cols * rows * 3);
+ const imageData = vtkImageData.newInstance();
+ imageData.setExtent([0, cols - 1, 0, rows - 1, 0, 0]);
+ // Region calibration — DICOM C.8.5.5.1: PhysicalUnits=3 means cm. We only
+ // apply spacing when both axes report cm, so length measurements come out
+ // in millimetres. Anything else (missing region, percent, dB, m/s, mixed
+ // units) leaves spacing at 1 — the ruler then reports pixels, which is
+ // the documented v1 behavior.
+ const [spacingX, spacingY] = pickPixelSpacing(header);
+ imageData.setSpacing([spacingX, spacingY, 1]);
+ imageData.setOrigin([0, 0, 0]);
+ imageData.setDirection([1, 0, 0, 0, 1, 0, 0, 0, 1]);
+
+ const scalars = vtkDataArray.newInstance({
+ numberOfComponents: 3,
+ values: this.scalarBuffer,
+ });
+ scalars.setRange({ min: 0, max: 255 }, 0);
+ scalars.setRange({ min: 0, max: 255 }, 1);
+ scalars.setRange({ min: 0, max: 255 }, 2);
+ imageData.getPointData().setScalars(scalars);
+
+ this.vtkImageData.value = imageData;
+
+ // The cine clip has all metadata up-front, so it is "complete" once the
+ // first frame is decoded. Volume views still see status='incomplete' until
+ // we kick off the initial decode in startLoad().
+ this.status.value = 'incomplete';
+ this.loaded = computed(
+ () => !this.loading.value && this.status.value === 'complete'
+ );
+
+ this.events.on('loading', (loading) => {
+ this.loading.value = loading;
+ });
+ this.events.on('status', (status) => {
+ this.status.value = status;
+ });
+ }
+
+ addEventListener(
+ type: T,
+ callback: (info: ProgressiveImageEvents[T]) => void
+ ): void {
+ this.events.on(type, callback);
+ }
+
+ removeEventListener(
+ type: T,
+ callback: (info: ProgressiveImageEvents[T]) => void
+ ): void {
+ this.events.off(type, callback);
+ }
+
+ startLoad(): void {
+ // No streaming step — the compressed bytes are already in memory. We
+ // decode the first frame so the canonical compatibility image has valid
+ // scalars for older consumers (thumbnail, getVtkImageData, metadata).
+ if (this.disposed) return;
+ if (!isSupportedCineTransferSyntax(this.header.transferSyntaxUID)) {
+ this.reportError(
+ new Error(
+ `Unsupported cine transfer syntax: ${this.header.transferSyntaxUID}`
+ )
+ );
+ return;
+ }
+ this.events.emit('loading', true);
+ this.getFrame(0)
+ .then((frame) => {
+ if (this.disposed) return;
+ this.applyDecodedFrame(frame.rgba);
+ this.markComplete();
+ })
+ .catch(() => {
+ // getFrame already routed the error through reportError.
+ });
+ }
+
+ stopLoad(): void {
+ // Nothing to interrupt — frames are decoded one at a time on demand.
+ }
+
+ dispose(): void {
+ if (this.disposed) return;
+ this.disposed = true;
+ super.dispose();
+ this.events.all.clear();
+ this.cache.clear();
+ this.inFlightFrames.clear();
+ // Clear the compressed-frame views; we don't own the underlying buffer
+ // (it was sliced from the original ArrayBuffer), but null'ing the array
+ // lets GC reclaim the wrapper objects.
+ this.frames.length = 0;
+ this.vtkImageData.value.delete();
+ }
+
+ getNumberOfFrames(): number {
+ return this.header.numberOfFrames;
+ }
+
+ // Decoded-frame access for per-view render buffers. Concurrent requests
+ // for the same uncached frame share a single decode via inFlightFrames.
+ async getFrame(frameIndex: number): Promise {
+ if (this.disposed) {
+ throw new Error('DicomCineImage is disposed');
+ }
+ const clamped = Math.max(
+ 0,
+ Math.min(this.header.numberOfFrames - 1, frameIndex | 0)
+ );
+
+ const cached = this.cache.get(clamped);
+ if (cached) return cached;
+
+ const inFlight = this.inFlightFrames.get(clamped);
+ if (inFlight) return inFlight;
+
+ const decodePromise = (async () => {
+ try {
+ const decoded = await this.decode(clamped);
+ if (this.disposed) {
+ throw new Error('DicomCineImage is disposed');
+ }
+ this.cache.set(clamped, decoded);
+ return decoded;
+ } catch (err) {
+ // Disposal isn't a decode failure; don't emit a user-visible error.
+ if (!this.disposed) {
+ this.reportError(err);
+ }
+ throw err;
+ } finally {
+ this.inFlightFrames.delete(clamped);
+ }
+ })();
+
+ this.inFlightFrames.set(clamped, decodePromise);
+ return decodePromise;
+ }
+
+ // Emit an error and clear the loading flag — otherwise consumers stay stuck
+ // on the spinner forever after a bad first-frame decode.
+ private reportError(err: unknown): void {
+ const error = err instanceof Error ? err : new Error(String(err));
+ this.events.emit('error', error);
+ if (this.loading.value) {
+ this.events.emit('loading', false);
+ }
+ }
+
+ private async decode(frameIndex: number) {
+ const bytes = this.frames[frameIndex];
+ if (!bytes) throw new Error(`Frame ${frameIndex} is missing`);
+ if (this.encapsulated) {
+ return decodeJpegFrame(bytes, this.header.cols, this.header.rows);
+ }
+ return decodeNativeFrame(bytes, {
+ width: this.header.cols,
+ height: this.header.rows,
+ samplesPerPixel: this.header.samplesPerPixel,
+ photometric: this.header.photometricInterpretation,
+ planarConfiguration: this.header.planarConfiguration,
+ });
+ }
+
+ private applyDecodedFrame(rgba: Uint8ClampedArray): void {
+ rgbaToRgb(rgba, this.scalarBuffer);
+ const scalars = this.vtkImageData.value.getPointData().getScalars();
+ scalars.modified();
+ this.vtkImageData.value.modified();
+ }
+
+ private markComplete(): void {
+ if (this.status.value !== 'complete') {
+ this.events.emit('status', 'complete');
+ }
+ if (this.loading.value) {
+ this.events.emit('loading', false);
+ }
+ }
+
+ // Helper used by import routing to decide whether the file is cine-renderable.
+ static isSupported(header: CineHeader): boolean {
+ if (!isSupportedCineTransferSyntax(header.transferSyntaxUID)) return false;
+ if (isNativeTransferSyntax(header.transferSyntaxUID)) {
+ // Native pixel data: only support 8-bit RGB or grayscale for v1.
+ if (header.bitsAllocated !== 8) return false;
+ if (header.samplesPerPixel !== 1 && header.samplesPerPixel !== 3) {
+ return false;
+ }
+ }
+ return true;
+ }
+}
diff --git a/src/core/cine/__tests__/parseCineDicom.spec.ts b/src/core/cine/__tests__/parseCineDicom.spec.ts
new file mode 100644
index 000000000..22213e3ad
--- /dev/null
+++ b/src/core/cine/__tests__/parseCineDicom.spec.ts
@@ -0,0 +1,243 @@
+import { describe, it, expect } from 'vitest';
+import { parseCineDicom } from '../parseCineDicom';
+
+// =================================================================
+// Synthetic DICOM builder (Explicit VR LE)
+// =================================================================
+//
+// Lets the parser smoke tests run anywhere without committing real ultrasound
+// fixtures. Produces the minimum byte layout the parser cares about: 128-byte
+// preamble, DICM magic, a TransferSyntaxUID file-meta element, and a hand-
+// rolled dataset.
+
+const TS_EXPLICIT_VR_LE = '1.2.840.10008.1.2.1';
+const TS_JPEG_BASELINE = '1.2.840.10008.1.2.4.50';
+
+function ascii(s: string): Uint8Array {
+ const out = new Uint8Array(s.length);
+ for (let i = 0; i < s.length; i++) out[i] = s.charCodeAt(i);
+ return out;
+}
+
+function evenPad(bytes: Uint8Array): Uint8Array {
+ if (bytes.byteLength % 2 === 0) return bytes;
+ const out = new Uint8Array(bytes.byteLength + 1);
+ out.set(bytes);
+ out[bytes.byteLength] = 0x20;
+ return out;
+}
+
+function concat(parts: Uint8Array[]): Uint8Array {
+ const total = parts.reduce((s, p) => s + p.byteLength, 0);
+ const out = new Uint8Array(total);
+ let off = 0;
+ for (const p of parts) {
+ out.set(p, off);
+ off += p.byteLength;
+ }
+ return out;
+}
+
+const VR4 = new Set(['OB', 'OW', 'OF', 'OD', 'OL', 'SQ', 'UC', 'UR', 'UT', 'UN']);
+
+function elementShort(
+ group: number,
+ element: number,
+ vr: string,
+ value: Uint8Array
+): Uint8Array {
+ const padded = evenPad(value);
+ const header = new Uint8Array(8);
+ const dv = new DataView(header.buffer);
+ dv.setUint16(0, group, true);
+ dv.setUint16(2, element, true);
+ header[4] = vr.charCodeAt(0);
+ header[5] = vr.charCodeAt(1);
+ dv.setUint16(6, padded.byteLength, true);
+ return concat([header, padded]);
+}
+
+function elementLong(
+ group: number,
+ element: number,
+ vr: string,
+ value: Uint8Array
+): Uint8Array {
+ if (!VR4.has(vr)) throw new Error(`VR ${vr} is not 4-byte length`);
+ const header = new Uint8Array(12);
+ const dv = new DataView(header.buffer);
+ dv.setUint16(0, group, true);
+ dv.setUint16(2, element, true);
+ header[4] = vr.charCodeAt(0);
+ header[5] = vr.charCodeAt(1);
+ // header[6..8] reserved zero
+ dv.setUint32(8, value.byteLength, true);
+ return concat([header, value]);
+}
+
+function u16Bytes(value: number): Uint8Array {
+ const out = new Uint8Array(2);
+ new DataView(out.buffer).setUint16(0, value, true);
+ return out;
+}
+
+function fileMeta(transferSyntaxUID: string): Uint8Array {
+ return elementShort(0x0002, 0x0010, 'UI', ascii(transferSyntaxUID));
+}
+
+const COMMON_TAGS = (numberOfFrames: number, rows: number, cols: number) =>
+ concat([
+ elementShort(0x0008, 0x0016, 'UI', ascii('1.2.840.10008.5.1.4.1.1.3.1')),
+ elementShort(0x0008, 0x0018, 'UI', ascii('1.2.3.4.5.6.7.8.9')),
+ elementShort(0x0008, 0x0060, 'CS', ascii('US')),
+ elementShort(0x0020, 0x000d, 'UI', ascii('1.2.3.4.5.6.7.8.9.1')),
+ elementShort(0x0020, 0x000e, 'UI', ascii('1.2.3.4.5.6.7.8.9.2')),
+ elementShort(0x0028, 0x0002, 'US', u16Bytes(1)),
+ elementShort(0x0028, 0x0004, 'CS', ascii('MONOCHROME2')),
+ elementShort(0x0028, 0x0008, 'IS', ascii(String(numberOfFrames))),
+ elementShort(0x0028, 0x0010, 'US', u16Bytes(rows)),
+ elementShort(0x0028, 0x0011, 'US', u16Bytes(cols)),
+ elementShort(0x0028, 0x0100, 'US', u16Bytes(8)),
+ elementShort(0x0028, 0x0101, 'US', u16Bytes(8)),
+ ]);
+
+function buildNativeDicom(opts: {
+ numberOfFrames: number;
+ rows: number;
+ cols: number;
+ fillByte?: number;
+}): Uint8Array {
+ const { numberOfFrames, rows, cols, fillByte = 0x42 } = opts;
+ const preamble = new Uint8Array(128);
+ const dicm = ascii('DICM');
+ const meta = fileMeta(TS_EXPLICIT_VR_LE);
+ const dataset = COMMON_TAGS(numberOfFrames, rows, cols);
+
+ const pixelBytes = new Uint8Array(numberOfFrames * rows * cols);
+ pixelBytes.fill(fillByte);
+ const pixelData = elementLong(0x7fe0, 0x0010, 'OB', pixelBytes);
+
+ return concat([preamble, dicm, meta, dataset, pixelData]);
+}
+
+function buildEncapsulatedDicom(opts: {
+ rows: number;
+ cols: number;
+ frameJpegBytes: Uint8Array[];
+ populateBot: boolean;
+}): Uint8Array {
+ const { rows, cols, frameJpegBytes, populateBot } = opts;
+ const preamble = new Uint8Array(128);
+ const dicm = ascii('DICM');
+ const meta = fileMeta(TS_JPEG_BASELINE);
+ const dataset = COMMON_TAGS(frameJpegBytes.length, rows, cols);
+
+ // Build the encapsulated PixelData: undefined length (0xFFFFFFFF), BOT item,
+ // one fragment item per frame, sequence delimitation item.
+ const fragmentItems = frameJpegBytes.map((bytes) => {
+ const padded = evenPad(bytes);
+ const header = new Uint8Array(8);
+ const dv = new DataView(header.buffer);
+ dv.setUint16(0, 0xfffe, true);
+ dv.setUint16(2, 0xe000, true);
+ dv.setUint32(4, padded.byteLength, true);
+ return concat([header, padded]);
+ });
+
+ const botEntries = new Uint8Array(
+ populateBot ? frameJpegBytes.length * 4 : 0
+ );
+ if (populateBot) {
+ const dv = new DataView(botEntries.buffer);
+ let relOffset = 0;
+ frameJpegBytes.forEach((bytes, i) => {
+ dv.setUint32(i * 4, relOffset, true);
+ relOffset += 8 + evenPad(bytes).byteLength;
+ });
+ }
+ const botHeader = new Uint8Array(8);
+ const botDv = new DataView(botHeader.buffer);
+ botDv.setUint16(0, 0xfffe, true);
+ botDv.setUint16(2, 0xe000, true);
+ botDv.setUint32(4, botEntries.byteLength, true);
+ const botItem = concat([botHeader, botEntries]);
+
+ const seqDelim = new Uint8Array(8);
+ const sdv = new DataView(seqDelim.buffer);
+ sdv.setUint16(0, 0xfffe, true);
+ sdv.setUint16(2, 0xe0dd, true);
+ sdv.setUint32(4, 0, true);
+
+ const pdHeader = new Uint8Array(12);
+ const pdDv = new DataView(pdHeader.buffer);
+ pdDv.setUint16(0, 0x7fe0, true);
+ pdDv.setUint16(2, 0x0010, true);
+ pdHeader[4] = 'O'.charCodeAt(0);
+ pdHeader[5] = 'B'.charCodeAt(0);
+ pdDv.setUint32(8, 0xffffffff, true);
+
+ const pdBody = concat([botItem, ...fragmentItems, seqDelim]);
+ return concat([preamble, dicm, meta, dataset, pdHeader, pdBody]);
+}
+
+// Marker bytes that look like a JPEG SOI/EOI. The parser doesn't actually
+// decode them, so any payload with the right framing works.
+const fakeJpegFrame = (n: number): Uint8Array =>
+ Uint8Array.from([0xff, 0xd8, 0xff, 0xe0, n & 0xff, 0xff, 0xd9]);
+
+// =================================================================
+// In-test (synthetic) fixtures — run everywhere
+// =================================================================
+
+describe('parseCineDicom on synthetic fixtures', () => {
+ it('parses Explicit VR LE native multi-frame', () => {
+ const bytes = buildNativeDicom({
+ numberOfFrames: 5,
+ rows: 4,
+ cols: 3,
+ fillByte: 0x7f,
+ });
+ const { header, frames, encapsulated } = parseCineDicom(bytes);
+ expect(encapsulated).toBe(false);
+ expect(header.numberOfFrames).toBe(5);
+ expect(header.rows).toBe(4);
+ expect(header.cols).toBe(3);
+ expect(header.transferSyntaxUID).toBe(TS_EXPLICIT_VR_LE);
+ expect(header.photometricInterpretation).toBe('MONOCHROME2');
+ expect(frames.length).toBe(5);
+ expect(frames[0].byteLength).toBe(4 * 3);
+ expect(frames[0][0]).toBe(0x7f);
+ });
+
+ it('parses encapsulated PixelData with a populated BOT', () => {
+ const frameBytes = [fakeJpegFrame(1), fakeJpegFrame(2), fakeJpegFrame(3)];
+ const bytes = buildEncapsulatedDicom({
+ rows: 2,
+ cols: 2,
+ frameJpegBytes: frameBytes,
+ populateBot: true,
+ });
+ const { header, frames, encapsulated } = parseCineDicom(bytes);
+ expect(encapsulated).toBe(true);
+ expect(header.numberOfFrames).toBe(3);
+ expect(frames.length).toBe(3);
+ expect(frames[0][0]).toBe(0xff);
+ expect(frames[0][1]).toBe(0xd8);
+ expect(frames[2][4]).toBe(3); // payload byte from fakeJpegFrame(3)
+ });
+
+ it('parses encapsulated PixelData with an empty BOT (1-fragment-per-frame)', () => {
+ const frameBytes = [fakeJpegFrame(10), fakeJpegFrame(20)];
+ const bytes = buildEncapsulatedDicom({
+ rows: 2,
+ cols: 2,
+ frameJpegBytes: frameBytes,
+ populateBot: false,
+ });
+ const { header, frames } = parseCineDicom(bytes);
+ expect(header.numberOfFrames).toBe(2);
+ expect(frames.length).toBe(2);
+ expect(frames[1][4]).toBe(20);
+ });
+});
+
diff --git a/src/core/cine/frameCache.ts b/src/core/cine/frameCache.ts
new file mode 100644
index 000000000..48860f199
--- /dev/null
+++ b/src/core/cine/frameCache.ts
@@ -0,0 +1,184 @@
+// Decoded-frame cache for cine playback.
+//
+// Holds CPU-visible RGBA bytes (not ImageBitmaps) because VTK's scalar buffer
+// needs to copy them in. Bounded by a byte budget; LRU eviction. Per-image
+// instance — disposed alongside the DicomCineImage that owns it.
+
+export type DecodedFrame = {
+ width: number;
+ height: number;
+ // RGBA, 8-bit per channel, row-major (matches OffscreenCanvas getImageData).
+ rgba: Uint8ClampedArray;
+};
+
+const DEFAULT_BUDGET_BYTES = 256 * 1024 * 1024;
+
+export class FrameCache {
+ private readonly budgetBytes: number;
+ private bytesInUse: number;
+ // Insertion-ordered Map gives O(1) LRU behavior.
+ private readonly entries: Map;
+
+ constructor(budgetBytes: number = DEFAULT_BUDGET_BYTES) {
+ this.budgetBytes = budgetBytes;
+ this.bytesInUse = 0;
+ this.entries = new Map();
+ }
+
+ get(frameIndex: number): DecodedFrame | null {
+ const entry = this.entries.get(frameIndex);
+ if (!entry) return null;
+ // Move to "most recent" by re-inserting.
+ this.entries.delete(frameIndex);
+ this.entries.set(frameIndex, entry);
+ return entry;
+ }
+
+ set(frameIndex: number, frame: DecodedFrame): void {
+ const existing = this.entries.get(frameIndex);
+ if (existing) {
+ this.bytesInUse -= existing.rgba.byteLength;
+ this.entries.delete(frameIndex);
+ }
+ this.entries.set(frameIndex, frame);
+ this.bytesInUse += frame.rgba.byteLength;
+ this.evictUntilUnderBudget();
+ }
+
+ has(frameIndex: number): boolean {
+ return this.entries.has(frameIndex);
+ }
+
+ clear(): void {
+ this.entries.clear();
+ this.bytesInUse = 0;
+ }
+
+ size(): number {
+ return this.entries.size;
+ }
+
+ getBytesInUse(): number {
+ return this.bytesInUse;
+ }
+
+ private evictUntilUnderBudget(): void {
+ if (this.bytesInUse <= this.budgetBytes) return;
+ // Map iteration order is insertion order; the oldest entry is the first.
+ for (const key of this.entries.keys()) {
+ if (this.bytesInUse <= this.budgetBytes) break;
+ const entry = this.entries.get(key)!;
+ this.bytesInUse -= entry.rgba.byteLength;
+ this.entries.delete(key);
+ }
+ }
+}
+
+// =================================================================
+// Decoders
+// =================================================================
+
+// Decode a single JPEG-Baseline compressed frame to RGBA using the browser's
+// native JPEG decoder. Returns the decoded pixel bytes that can be copied
+// directly into a VTK scalar buffer.
+export async function decodeJpegFrame(
+ bytes: Uint8Array,
+ expectedWidth: number,
+ expectedHeight: number
+): Promise {
+ // The Uint8Array is a view into the original DICOM ArrayBuffer; the cast
+ // satisfies the lib.dom typing of BlobPart (which excludes SharedArrayBuffer-
+ // backed views) without forcing a buffer copy.
+ const blob = new Blob([bytes as BlobPart], { type: 'image/jpeg' });
+ const bitmap = await createImageBitmap(blob);
+ try {
+ const canvas = new OffscreenCanvas(bitmap.width, bitmap.height);
+ const ctx = canvas.getContext('2d');
+ if (!ctx) throw new Error('OffscreenCanvas 2D context unavailable');
+ ctx.drawImage(bitmap, 0, 0);
+ const imageData = ctx.getImageData(0, 0, bitmap.width, bitmap.height);
+ if (
+ (expectedWidth && bitmap.width !== expectedWidth) ||
+ (expectedHeight && bitmap.height !== expectedHeight)
+ ) {
+ // Mismatch is unusual for valid DICOM US clips. Keep the data but flag
+ // it in the console — the caller will still copy whatever it returns.
+ console.warn(
+ `JPEG frame size ${bitmap.width}x${bitmap.height} does not match DICOM-declared ${expectedWidth}x${expectedHeight}`
+ );
+ }
+ return {
+ width: bitmap.width,
+ height: bitmap.height,
+ rgba: imageData.data,
+ };
+ } finally {
+ bitmap.close();
+ }
+}
+
+type NativeFrameLayout = {
+ width: number;
+ height: number;
+ samplesPerPixel: number;
+ photometric: string;
+ // DICOM PlanarConfiguration (0028,0006): 0 = pixel-interleaved (RGBRGB...),
+ // 1 = plane-interleaved (RRR...GGG...BBB...).
+ planarConfiguration: number;
+};
+
+// Convert a raw uncompressed frame to RGBA. Supports the two photometric
+// interpretations our sample corpus shows in the native PixelData path:
+// - 'RGB' with samplesPerPixel=3 (interleaved RGB)
+// - 'MONOCHROME2' with samplesPerPixel=1 (grayscale, replicated to RGB)
+export function decodeNativeFrame(
+ bytes: Uint8Array,
+ layout: NativeFrameLayout
+): DecodedFrame {
+ const { width, height, samplesPerPixel, photometric, planarConfiguration } =
+ layout;
+ const pixelCount = width * height;
+ const out = new Uint8ClampedArray(pixelCount * 4);
+
+ if (samplesPerPixel === 1) {
+ // Grayscale — replicate luminance to R, G, B; alpha = 255.
+ for (let i = 0; i < pixelCount; i++) {
+ const v = bytes[i] ?? 0;
+ const j = i * 4;
+ out[j] = v;
+ out[j + 1] = v;
+ out[j + 2] = v;
+ out[j + 3] = 255;
+ }
+ } else if (
+ samplesPerPixel === 3 &&
+ (photometric === 'RGB' || photometric === 'PALETTE COLOR')
+ ) {
+ if (planarConfiguration === 1) {
+ // RRRR...GGGG...BBBB...
+ const plane = pixelCount;
+ for (let i = 0; i < pixelCount; i++) {
+ const j = i * 4;
+ out[j] = bytes[i] ?? 0;
+ out[j + 1] = bytes[plane + i] ?? 0;
+ out[j + 2] = bytes[2 * plane + i] ?? 0;
+ out[j + 3] = 255;
+ }
+ } else {
+ for (let i = 0; i < pixelCount; i++) {
+ const k = i * 3;
+ const j = i * 4;
+ out[j] = bytes[k] ?? 0;
+ out[j + 1] = bytes[k + 1] ?? 0;
+ out[j + 2] = bytes[k + 2] ?? 0;
+ out[j + 3] = 255;
+ }
+ }
+ } else {
+ throw new Error(
+ `Unsupported native frame format: ${photometric} samplesPerPixel=${samplesPerPixel}`
+ );
+ }
+
+ return { width, height, rgba: out };
+}
diff --git a/src/core/cine/getRenderSlice.ts b/src/core/cine/getRenderSlice.ts
new file mode 100644
index 000000000..64e9908f2
--- /dev/null
+++ b/src/core/cine/getRenderSlice.ts
@@ -0,0 +1,17 @@
+import { Maybe } from '@/src/types';
+import { isCineImage } from './isCineImage';
+
+// VTK render-slice index versus VolView's semantic slice value.
+//
+// For a normal 3D volume the two are identical.
+//
+// For a cine image, the semantic slice is the frame cursor and ranges over
+// [0, numberOfFrames-1], but the underlying vtkImageData has only one Z slice
+// at index 0. Anything that pokes a VTK slice mapper or 2D widget plane needs
+// the render slice (always 0 for cine).
+export function getRenderSlice(
+ imageID: Maybe,
+ semanticSlice: number
+): number {
+ return isCineImage(imageID) ? 0 : semanticSlice;
+}
diff --git a/src/core/cine/isCineImage.ts b/src/core/cine/isCineImage.ts
new file mode 100644
index 000000000..526529747
--- /dev/null
+++ b/src/core/cine/isCineImage.ts
@@ -0,0 +1,14 @@
+import { Maybe } from '@/src/types';
+import { useImageCacheStore } from '@/src/store/image-cache';
+import { useDICOMStore } from '@/src/store/datasets-dicom';
+import type DicomCineImage from './DicomCineImage';
+
+export function isCineImage(imageID: Maybe): boolean {
+ if (!imageID) return false;
+ return useDICOMStore().volumeInfo[imageID]?.kind === 'cine';
+}
+
+export function getCineImage(imageID: Maybe): DicomCineImage | null {
+ if (!isCineImage(imageID)) return null;
+ return (useImageCacheStore().imageById[imageID!] as DicomCineImage) ?? null;
+}
diff --git a/src/core/cine/parseCineDicom.ts b/src/core/cine/parseCineDicom.ts
new file mode 100644
index 000000000..c8a4ec57e
--- /dev/null
+++ b/src/core/cine/parseCineDicom.ts
@@ -0,0 +1,309 @@
+import dicomParser, { DataSet, Element } from 'dicom-parser';
+
+// =================================================================
+// Tag constants (dicom-parser format: 'xGGGGEEEE', lowercase hex)
+// =================================================================
+
+const TAG_PIXEL_DATA = 'x7fe00010';
+const TAG_SOP_CLASS_UID = 'x00080016';
+const TAG_SOP_INSTANCE_UID = 'x00080018';
+const TAG_PATIENT_NAME = 'x00100010';
+const TAG_PATIENT_ID = 'x00100020';
+const TAG_PATIENT_BIRTH_DATE = 'x00100030';
+const TAG_PATIENT_SEX = 'x00100040';
+const TAG_STUDY_INSTANCE_UID = 'x0020000d';
+const TAG_STUDY_DATE = 'x00080020';
+const TAG_STUDY_TIME = 'x00080030';
+const TAG_STUDY_ID = 'x00200010';
+const TAG_ACCESSION_NUMBER = 'x00080050';
+const TAG_STUDY_DESCRIPTION = 'x00081030';
+const TAG_MODALITY = 'x00080060';
+const TAG_SERIES_INSTANCE_UID = 'x0020000e';
+const TAG_SERIES_NUMBER = 'x00200011';
+const TAG_SERIES_DESCRIPTION = 'x0008103e';
+const TAG_NUMBER_OF_FRAMES = 'x00280008';
+const TAG_ROWS = 'x00280010';
+const TAG_COLUMNS = 'x00280011';
+const TAG_BITS_ALLOCATED = 'x00280100';
+const TAG_SAMPLES_PER_PIXEL = 'x00280002';
+const TAG_PHOTOMETRIC = 'x00280004';
+const TAG_FRAME_TIME = 'x00181063';
+const TAG_SEQUENCE_OF_ULTRASOUND_REGIONS = 'x00186011';
+const TAG_PLANAR_CONFIGURATION = 'x00280006';
+
+const TAG_REGION_LOCATION_MIN_X0 = 'x00186018';
+const TAG_REGION_LOCATION_MIN_Y0 = 'x0018601a';
+const TAG_REGION_LOCATION_MAX_X1 = 'x0018601c';
+const TAG_REGION_LOCATION_MAX_Y1 = 'x0018601e';
+const TAG_PHYSICAL_UNITS_X = 'x00186024';
+const TAG_PHYSICAL_UNITS_Y = 'x00186026';
+const TAG_PHYSICAL_DELTA_X = 'x0018602c';
+const TAG_PHYSICAL_DELTA_Y = 'x0018602e';
+
+const TAG_TRANSFER_SYNTAX_UID = 'x00020010';
+
+// =================================================================
+// Public types
+// =================================================================
+
+type CineUltrasoundRegion = {
+ minX0: number;
+ minY0: number;
+ maxX1: number;
+ maxY1: number;
+ physicalDeltaX: number | null;
+ physicalDeltaY: number | null;
+ physicalUnitsX: number | null;
+ physicalUnitsY: number | null;
+};
+
+type CinePatientInfo = {
+ PatientID: string;
+ PatientName: string;
+ PatientBirthDate: string;
+ PatientSex: string;
+};
+
+type CineStudyInfo = {
+ StudyID: string;
+ StudyInstanceUID: string;
+ StudyDate: string;
+ StudyTime: string;
+ AccessionNumber: string;
+ StudyDescription: string;
+};
+
+type CineSeriesInfo = {
+ SeriesInstanceUID: string;
+ SeriesNumber: string;
+ SeriesDescription: string;
+ Modality: string;
+ SOPInstanceUID: string;
+ SOPClassUID: string;
+};
+
+export type CineHeader = {
+ transferSyntaxUID: string;
+ rows: number;
+ cols: number;
+ numberOfFrames: number;
+ samplesPerPixel: number;
+ bitsAllocated: number;
+ planarConfiguration: number;
+ photometricInterpretation: string;
+ frameTimeMs: number | null;
+ patient: CinePatientInfo;
+ study: CineStudyInfo;
+ series: CineSeriesInfo;
+ regions: CineUltrasoundRegion[];
+};
+
+export type CineParseResult = {
+ header: CineHeader;
+ // Native: zero-copy view into the source buffer. Encapsulated: bytes for
+ // one frame, possibly assembled from multiple fragments.
+ frames: Uint8Array[];
+ encapsulated: boolean;
+};
+
+// =================================================================
+// Tag readers
+// =================================================================
+
+const str = (ds: DataSet, tag: string): string =>
+ (ds.string(tag) ?? '').trim();
+
+const intStr = (ds: DataSet, tag: string): number =>
+ ds.intString(tag) ?? 0;
+
+const u16 = (ds: DataSet, tag: string): number =>
+ ds.uint16(tag) ?? 0;
+
+// VR=FD (PhysicalDeltaX/Y in ultrasound regions)
+const readDouble = (ds: DataSet, tag: string): number | null => {
+ const v = ds.double(tag);
+ return v !== undefined && Number.isFinite(v) ? v : null;
+};
+
+// VR=DS (FrameTime, decimal string)
+const readDecimalString = (ds: DataSet, tag: string): number | null => {
+ const v = ds.floatString(tag);
+ return v !== undefined && Number.isFinite(v) ? v : null;
+};
+
+function buildRegion(item: DataSet): CineUltrasoundRegion {
+ return {
+ minX0: u16(item, TAG_REGION_LOCATION_MIN_X0),
+ minY0: u16(item, TAG_REGION_LOCATION_MIN_Y0),
+ maxX1: u16(item, TAG_REGION_LOCATION_MAX_X1),
+ maxY1: u16(item, TAG_REGION_LOCATION_MAX_Y1),
+ physicalDeltaX: readDouble(item, TAG_PHYSICAL_DELTA_X),
+ physicalDeltaY: readDouble(item, TAG_PHYSICAL_DELTA_Y),
+ physicalUnitsX: u16(item, TAG_PHYSICAL_UNITS_X) || null,
+ physicalUnitsY: u16(item, TAG_PHYSICAL_UNITS_Y) || null,
+ };
+}
+
+// =================================================================
+// PixelData extraction
+// =================================================================
+
+function extractEncapsulatedFrames(
+ ds: DataSet,
+ pd: Element,
+ numberOfFrames: number
+): Uint8Array[] {
+ const bot = pd.basicOffsetTable ?? [];
+ const fragments = pd.fragments ?? [];
+
+ let offsets: number[];
+ if (bot.length === numberOfFrames) {
+ offsets = bot;
+ } else if (fragments.length === numberOfFrames) {
+ offsets = fragments.map((f) => f.offset);
+ } else {
+ // Empty BOT and fragments don't map 1:1 — scan for JPEG SOI markers.
+ offsets = dicomParser.createJPEGBasicOffsetTable(ds, pd);
+ }
+
+ const frames: Uint8Array[] = [];
+ for (let i = 0; i < numberOfFrames; i++) {
+ // ByteArray = Uint8Array | Buffer in the TS shim; in browser it's always Uint8Array.
+ frames.push(
+ dicomParser.readEncapsulatedImageFrame(
+ ds,
+ pd,
+ i,
+ offsets,
+ fragments
+ ) as Uint8Array
+ );
+ }
+ return frames;
+}
+
+function extractNativeFrames(
+ ds: DataSet,
+ pd: Element,
+ numberOfFrames: number,
+ frameBytes: number
+): Uint8Array[] {
+ const byteArray = ds.byteArray;
+ const frames: Uint8Array[] = [];
+ for (let i = 0; i < numberOfFrames; i++) {
+ frames.push(
+ new Uint8Array(
+ byteArray.buffer,
+ byteArray.byteOffset + pd.dataOffset + i * frameBytes,
+ frameBytes
+ )
+ );
+ }
+ return frames;
+}
+
+// =================================================================
+// Public entry point
+// =================================================================
+
+export function parseCineDicom(
+ input: ArrayBuffer | Uint8Array
+): CineParseResult {
+ const u8 =
+ input instanceof Uint8Array ? input : new Uint8Array(input as ArrayBuffer);
+ const ds = dicomParser.parseDicom(u8);
+
+ const pd = ds.elements[TAG_PIXEL_DATA];
+ if (!pd) {
+ throw new Error('PixelData (7FE0,0010) not found in DICOM file');
+ }
+
+ const numberOfFrames = intStr(ds, TAG_NUMBER_OF_FRAMES) || 1;
+ const rows = u16(ds, TAG_ROWS);
+ const cols = u16(ds, TAG_COLUMNS);
+ const samplesPerPixel = u16(ds, TAG_SAMPLES_PER_PIXEL) || 1;
+ const bitsAllocated = u16(ds, TAG_BITS_ALLOCATED) || 8;
+ const planarConfiguration = u16(ds, TAG_PLANAR_CONFIGURATION);
+ const transferSyntaxUID = str(ds, TAG_TRANSFER_SYNTAX_UID);
+
+ const regionItems = ds.elements[TAG_SEQUENCE_OF_ULTRASOUND_REGIONS]?.items ?? [];
+ const regions = regionItems
+ .map((item) => item.dataSet)
+ .filter((d): d is DataSet => !!d)
+ .map(buildRegion);
+
+ const header: CineHeader = {
+ transferSyntaxUID,
+ rows,
+ cols,
+ numberOfFrames,
+ samplesPerPixel,
+ bitsAllocated,
+ planarConfiguration,
+ photometricInterpretation: str(ds, TAG_PHOTOMETRIC),
+ frameTimeMs: readDecimalString(ds, TAG_FRAME_TIME),
+ patient: {
+ PatientID: str(ds, TAG_PATIENT_ID),
+ PatientName: str(ds, TAG_PATIENT_NAME),
+ PatientBirthDate: str(ds, TAG_PATIENT_BIRTH_DATE),
+ PatientSex: str(ds, TAG_PATIENT_SEX),
+ },
+ study: {
+ StudyID: str(ds, TAG_STUDY_ID),
+ StudyInstanceUID: str(ds, TAG_STUDY_INSTANCE_UID),
+ StudyDate: str(ds, TAG_STUDY_DATE),
+ StudyTime: str(ds, TAG_STUDY_TIME),
+ AccessionNumber: str(ds, TAG_ACCESSION_NUMBER),
+ StudyDescription: str(ds, TAG_STUDY_DESCRIPTION),
+ },
+ series: {
+ SeriesInstanceUID: str(ds, TAG_SERIES_INSTANCE_UID),
+ SeriesNumber: str(ds, TAG_SERIES_NUMBER),
+ SeriesDescription: str(ds, TAG_SERIES_DESCRIPTION),
+ Modality: str(ds, TAG_MODALITY),
+ SOPInstanceUID: str(ds, TAG_SOP_INSTANCE_UID),
+ SOPClassUID: str(ds, TAG_SOP_CLASS_UID),
+ },
+ regions,
+ };
+
+ const encapsulated = !!pd.encapsulatedPixelData;
+ const frames = encapsulated
+ ? extractEncapsulatedFrames(ds, pd, numberOfFrames)
+ : extractNativeFrames(
+ ds,
+ pd,
+ numberOfFrames,
+ rows * cols * samplesPerPixel * Math.ceil(bitsAllocated / 8)
+ );
+
+ return { header, frames, encapsulated };
+}
+
+// =================================================================
+// Supported transfer syntaxes
+// =================================================================
+
+const TRANSFER_SYNTAX_IMPLICIT_VR_LE = '1.2.840.10008.1.2';
+const TRANSFER_SYNTAX_EXPLICIT_VR_LE = '1.2.840.10008.1.2.1';
+const TRANSFER_SYNTAX_JPEG_BASELINE_1 = '1.2.840.10008.1.2.4.50';
+const TRANSFER_SYNTAX_JPEG_BASELINE_2_4 = '1.2.840.10008.1.2.4.51';
+
+const NATIVE_TRANSFER_SYNTAXES = new Set([
+ TRANSFER_SYNTAX_IMPLICIT_VR_LE,
+ TRANSFER_SYNTAX_EXPLICIT_VR_LE,
+]);
+
+const SUPPORTED_TRANSFER_SYNTAXES = new Set([
+ ...NATIVE_TRANSFER_SYNTAXES,
+ TRANSFER_SYNTAX_JPEG_BASELINE_1,
+ TRANSFER_SYNTAX_JPEG_BASELINE_2_4,
+]);
+
+export function isNativeTransferSyntax(uid: string): boolean {
+ return NATIVE_TRANSFER_SYNTAXES.has(uid);
+}
+
+export function isSupportedCineTransferSyntax(uid: string): boolean {
+ return SUPPORTED_TRANSFER_SYNTAXES.has(uid);
+}
diff --git a/src/core/dicomTags.ts b/src/core/dicomTags.ts
index 7f599f262..aa544203d 100644
--- a/src/core/dicomTags.ts
+++ b/src/core/dicomTags.ts
@@ -33,13 +33,35 @@ const tags: Tag[] = [
{ name: 'RescaleIntercept', tag: '0028|1052' },
{ name: 'RescaleSlope', tag: '0028|1053' },
{ name: 'NumberOfFrames', tag: '0028|0008' },
+ { name: 'SOPClassUID', tag: '0008|0016' },
+ { name: 'PhotometricInterpretation', tag: '0028|0004' },
+ { name: 'FrameTime', tag: '0018|1063' },
{ name: 'SequenceOfUltrasoundRegions', tag: '0018|6011' },
- { name: 'PhysicalDeltaX', tag: '0018|602c' },
- { name: 'PhysicalDeltaY', tag: '0018|602e' },
+ { name: 'RegionLocationMinX0', tag: '0018|6018' },
+ { name: 'RegionLocationMinY0', tag: '0018|601a' },
+ { name: 'RegionLocationMaxX1', tag: '0018|601c' },
+ { name: 'RegionLocationMaxY1', tag: '0018|601e' },
{ name: 'PhysicalUnitsXDirection', tag: '0018|6024' },
{ name: 'PhysicalUnitsYDirection', tag: '0018|6026' },
+ { name: 'PhysicalDeltaX', tag: '0018|602c' },
+ { name: 'PhysicalDeltaY', tag: '0018|602e' },
];
+// DICOM SOP Class UIDs for Ultrasound Multi-frame Image Storage. The current
+// identifier is .4.1.1.3.1; the retired pre-1993 identifier is .4.1.1.3.
+// Some legacy clinical archives and well-known test corpora (e.g. GDCM's
+// US-MONO2-8-8x-execho) still emit the retired UID.
+export const SOP_CLASS_ULTRASOUND_MULTIFRAME = '1.2.840.10008.5.1.4.1.1.3.1';
+export const SOP_CLASS_ULTRASOUND_MULTIFRAME_RETIRED = '1.2.840.10008.5.1.4.1.1.3';
+
+export function isUltrasoundMultiframeSopClass(uid: string): boolean {
+ const trimmed = uid.trim();
+ return (
+ trimmed === SOP_CLASS_ULTRASOUND_MULTIFRAME ||
+ trimmed === SOP_CLASS_ULTRASOUND_MULTIFRAME_RETIRED
+ );
+}
+
export const TAG_TO_NAME = new Map(tags.map((t) => [t.tag, t.name]));
export const NAME_TO_TAG = new Map(tags.map((t) => [t.name, t.tag]));
export const Tags = Object.fromEntries(tags.map((t) => [t.name, t.tag]));
diff --git a/src/core/progressiveImage.ts b/src/core/progressiveImage.ts
index 59f7c3755..757a7e1d9 100644
--- a/src/core/progressiveImage.ts
+++ b/src/core/progressiveImage.ts
@@ -27,6 +27,10 @@ export interface ProgressiveImage {
getStatus(): ProgressiveImageStatus;
isLoading(): boolean;
isLoaded(): boolean;
+ // Returns a data-URI thumbnail, or null if this image has no thumbnail.
+ // The base implementation returns null so consumers can call this on any
+ // ProgressiveImage without type-narrowing.
+ getThumbnail(): Promise;
addEventListener(
type: T,
callback: (info: ProgressiveImageEvents[T]) => void
@@ -162,6 +166,10 @@ export abstract class BaseProgressiveImage implements ProgressiveImage {
this.cleanupListeners();
}
+ getThumbnail(): Promise {
+ return Promise.resolve(null);
+ }
+
abstract startLoad(): void;
abstract stopLoad(): void;
diff --git a/src/core/streaming/chunkImage.ts b/src/core/streaming/chunkImage.ts
index 8cedce1ba..bd180e2ff 100644
--- a/src/core/streaming/chunkImage.ts
+++ b/src/core/streaming/chunkImage.ts
@@ -5,10 +5,6 @@ import {
import { Chunk } from '@/src/core/streaming/chunk';
import { Extent } from '@kitware/vtk.js/types';
-export enum ThumbnailStrategy {
- MiddleSlice,
-}
-
export enum ChunkStatus {
NotLoaded,
Loading,
@@ -33,7 +29,6 @@ export type ChunkImageEvents = {
export interface ChunkImage extends ProgressiveImage {
addChunks(chunks: Chunk[]): void;
- getThumbnail(strategy: ThumbnailStrategy): Promise;
addEventListener(
type: T,
callback: (info: ChunkImageEvents[T]) => void
diff --git a/src/core/streaming/dicomChunkImage.ts b/src/core/streaming/dicomChunkImage.ts
index a49d176e4..16fbef9a3 100644
--- a/src/core/streaming/dicomChunkImage.ts
+++ b/src/core/streaming/dicomChunkImage.ts
@@ -14,7 +14,6 @@ import vtkDataArray from '@kitware/vtk.js/Common/Core/DataArray';
import { ChunkState } from '@/src/core/streaming/chunkStateMachine';
import {
type ChunkImage,
- ThumbnailStrategy,
ChunkStatus,
ChunkImageEvents,
} from '@/src/core/streaming/chunkImage';
@@ -78,7 +77,7 @@ export default class DicomChunkImage
{
protected chunks: Chunk[];
private chunkListeners: Array<() => void>;
- private thumbnailCache: WeakMap>;
+ private thumbnailCache: WeakMap>;
private events: Emitter;
private chunkStatus: ChunkStatus[];
@@ -220,10 +219,7 @@ export default class DicomChunkImage
}
}
- getThumbnail(strategy: ThumbnailStrategy): Promise {
- if (strategy !== ThumbnailStrategy.MiddleSlice)
- throw new Error('Can only handle MiddleSlice thumbnailing strategy');
-
+ getThumbnail(): Promise {
const middle = Math.floor(this.chunks.length / 2);
const chunk = this.chunks[middle];
diff --git a/src/core/vtk/useMouseRangeManipulatorListener.ts b/src/core/vtk/useMouseRangeManipulatorListener.ts
index fa5c39cce..80db2ef74 100644
--- a/src/core/vtk/useMouseRangeManipulatorListener.ts
+++ b/src/core/vtk/useMouseRangeManipulatorListener.ts
@@ -15,7 +15,12 @@ export function useMouseRangeManipulatorListener(
range: MaybeRef>,
step: MaybeRef>,
initialValue?: number,
- scale: number = 1 // Negative scale inverts scroll direction
+ scale: number = 1, // Negative scale inverts scroll direction
+ // Fires only on real wheel/drag input — not on programmatic writes that
+ // come back through a bidirectional sync. Use this to drive side effects
+ // like setActiveView so they don't fire during cine playback or other
+ // programmatic slice updates.
+ onUserInput?: () => void
) {
const internalValue = ref(initialValue ?? 0);
@@ -38,6 +43,7 @@ export function useMouseRangeManipulatorListener(
() => internalValue.value,
(val) => {
internalValue.value = val;
+ onUserInput?.();
},
scale
);
diff --git a/src/store/datasets-dicom.ts b/src/store/datasets-dicom.ts
index 6ba636310..94de879fa 100644
--- a/src/store/datasets-dicom.ts
+++ b/src/store/datasets-dicom.ts
@@ -4,7 +4,9 @@ import * as DICOM from '@/src/io/dicom';
import { Chunk } from '@/src/core/streaming/chunk';
import { useImageCacheStore } from '@/src/store/image-cache';
import DicomChunkImage from '@/src/core/streaming/dicomChunkImage';
-import { Tags } from '@/src/core/dicomTags';
+import DicomCineImage from '@/src/core/cine/DicomCineImage';
+import { parseCineDicom } from '@/src/core/cine/parseCineDicom';
+import { isUltrasoundMultiframeSopClass, Tags } from '@/src/core/dicomTags';
import { removeFromArray } from '../utils';
export const ANONYMOUS_PATIENT = 'Anonymous';
@@ -45,6 +47,10 @@ export interface VolumeInfo {
SeriesDescription: string;
WindowLevel: string;
WindowWidth: string;
+ // 'cine' marks an ultrasound multi-frame image whose NumberOfSlices is the
+ // number of cine frames (not Z-axis slices). Absent/undefined means a normal
+ // 3D volume.
+ kind?: 'volume' | 'cine';
}
interface State {
@@ -90,6 +96,19 @@ export const getDisplayName = (info: VolumeInfo) => {
);
};
+function isCineChunkGroup(chunks: Chunk[]): boolean {
+ if (chunks.length !== 1) return false;
+ const meta = chunks[0].metadata;
+ if (!meta) return false;
+ const lookup = Object.fromEntries(meta);
+ const sopClass = lookup[Tags.SOPClassUID] ?? '';
+ const numberOfFrames = parseInt(
+ (lookup[Tags.NumberOfFrames] ?? '0').trim(),
+ 10
+ );
+ return isUltrasoundMultiframeSopClass(sopClass) && numberOfFrames > 1;
+}
+
export const getWindowLevels = (info: VolumeInfo) => {
const { WindowWidth, WindowLevel } = info;
if (
@@ -141,11 +160,19 @@ export const useDICOMStore = defineStore('dicom', {
await Promise.all(
Object.entries(chunksByVolume).map(async ([id, sortedChunks]) => {
- const image = imageCacheStore.imageById[id] ?? new DicomChunkImage();
- if (!(image instanceof DicomChunkImage)) {
- throw new Error('image is not a DicomChunkImage');
+ if (isCineChunkGroup(sortedChunks)) {
+ await this._importCineChunk(id, sortedChunks[0]);
+ return;
}
+ if (this.volumeInfo[id]?.kind === 'cine') {
+ throw new Error(
+ `Volume ${id} is already loaded as a cine clip; cannot re-import as a chunk volume.`
+ );
+ }
+ const image = (imageCacheStore.imageById[id] ??
+ new DicomChunkImage()) as DicomChunkImage;
+
await image.addChunks(sortedChunks);
imageCacheStore.addProgressiveImage(image, { id });
@@ -191,6 +218,68 @@ export const useDICOMStore = defineStore('dicom', {
return chunksByVolume;
},
+ async _importCineChunk(id: string, chunk: Chunk) {
+ const imageCacheStore = useImageCacheStore();
+
+ // If we already created this cine image (state-file reload), bail.
+ if (this.volumeInfo[id]?.kind === 'cine') {
+ return;
+ }
+
+ await chunk.loadData();
+ const blob = chunk.dataBlob;
+ if (!blob) throw new Error('Cine DICOM chunk has no data');
+ const buffer = await blob.arrayBuffer();
+ const parsed = parseCineDicom(buffer);
+
+ if (!DicomCineImage.isSupported(parsed.header)) {
+ throw new Error(
+ `Unsupported cine transfer syntax / pixel format: ` +
+ `${parsed.header.transferSyntaxUID} (` +
+ `${parsed.header.bitsAllocated}-bit, ` +
+ `${parsed.header.samplesPerPixel}-sample ` +
+ `${parsed.header.photometricInterpretation})`
+ );
+ }
+
+ const image = new DicomCineImage(parsed);
+ imageCacheStore.addProgressiveImage(image, { id });
+
+ const { patient, study, series } = parsed.header;
+
+ const patientInfo: PatientInfo = {
+ PatientID: patient.PatientID,
+ PatientName: patient.PatientName,
+ PatientBirthDate: patient.PatientBirthDate,
+ PatientSex: patient.PatientSex,
+ };
+
+ const studyInfo: StudyInfo = {
+ StudyID: study.StudyID,
+ StudyInstanceUID: study.StudyInstanceUID,
+ StudyDate: study.StudyDate,
+ StudyTime: study.StudyTime,
+ AccessionNumber: study.AccessionNumber,
+ StudyDescription: study.StudyDescription,
+ };
+
+ const volumeInfo: VolumeInfo = {
+ NumberOfSlices: parsed.header.numberOfFrames,
+ VolumeID: id,
+ Modality: series.Modality,
+ SeriesInstanceUID: series.SeriesInstanceUID,
+ SeriesNumber: series.SeriesNumber,
+ SeriesDescription: series.SeriesDescription,
+ WindowLevel: '',
+ WindowWidth: '',
+ kind: 'cine',
+ };
+
+ this._updateDatabase(patientInfo, studyInfo, volumeInfo);
+
+ image.setName(getDisplayName(volumeInfo));
+ },
+
_updateDatabase(
patient: PatientInfo,
study: StudyInfo,
diff --git a/src/store/image-cache.ts b/src/store/image-cache.ts
index 278339c0f..916c69488 100644
--- a/src/store/image-cache.ts
+++ b/src/store/image-cache.ts
@@ -110,6 +110,11 @@ export const useImageCacheStore = defineStore('image-cache', () => {
if (!(id in imageById)) return;
unregisterListeners(id);
+ // Release vtk data and any per-image caches (e.g. cine compressed frames
+ // and decoded-frame LRU). Without this, removing a dataset leaks all of
+ // its memory until the page reloads.
+ imageById[id].dispose();
+
const idx = imageIds.value.indexOf(id);
if (idx > -1) imageIds.value.splice(idx, 1);
delete imageById[id];
diff --git a/src/store/image-stats.ts b/src/store/image-stats.ts
index def7fa68a..b80e2b33b 100644
--- a/src/store/image-stats.ts
+++ b/src/store/image-stats.ts
@@ -19,6 +19,7 @@ import { useImage } from '@/src/composables/useCurrentImage';
import { ensureError } from '@/src/utils';
import { useImageCacheStore } from './image-cache';
import { useMessageStore } from './messages';
+import { isCineImage } from '@/src/core/cine/isCineImage';
export type ImageStats = {
scalarMin: number;
@@ -127,6 +128,10 @@ export const useImageStatsStore = defineStore('image-stats', () => {
};
const setupImageWatchers = (id: string) => {
+ // Cine images are 8-bit display-encoded; histograms and auto-range are
+ // meaningless and would allocate a huge Float64Array. Skip them.
+ if (isCineImage(id)) return;
+
const { imageData, isLoading: isImageLoading } = useImage(
computed(() => id)
);
diff --git a/src/store/segmentGroups.ts b/src/store/segmentGroups.ts
index 14ba4dc3d..3b345dbda 100644
--- a/src/store/segmentGroups.ts
+++ b/src/store/segmentGroups.ts
@@ -19,6 +19,7 @@ import {
import vtkImageExtractComponents from '@/src/utils/imageExtractComponentsFilter';
import { useImageCacheStore } from '@/src/store/image-cache';
import DicomChunkImage from '@/src/core/streaming/dicomChunkImage';
+import { useDICOMStore } from '@/src/store/datasets-dicom';
import vtkLabelMap from '../vtk/LabelMap';
import {
StateFile,
@@ -266,7 +267,11 @@ export const useSegmentGroupStore = defineStore('segmentGroup', () => {
image: vtkLabelMap,
component = 0
) {
- if (!isRegularImage(imageId)) {
+ const dicomStore = useDICOMStore();
+ if (
+ !isRegularImage(imageId) &&
+ dicomStore.volumeInfo[imageId]?.kind !== 'cine'
+ ) {
await untilLoaded(imageId);
const chunkImage = imageCacheStore.imageById[imageId] as DicomChunkImage;
diff --git a/src/store/tools/useAnnotationTool.ts b/src/store/tools/useAnnotationTool.ts
index 842c23c61..41f65fe51 100644
--- a/src/store/tools/useAnnotationTool.ts
+++ b/src/store/tools/useAnnotationTool.ts
@@ -134,9 +134,12 @@ export const useAnnotationTool = <
const imageID = currentImageID.value;
if (!imageID || tool.imageID !== imageID) return;
+ // Cine images report dimensions [cols, rows, 1] but tools may live on
+ // any frame; allow out-of-bounds so we still resolve the axis.
const toolImageFrame = frameOfReferenceToImageSliceAndAxis(
tool.frameOfReference,
- currentImageMetadata.value
+ currentImageMetadata.value,
+ { allowOutOfBoundsSlice: true }
);
if (!toolImageFrame) return;
diff --git a/src/store/view-configs/slicing.ts b/src/store/view-configs/slicing.ts
index 2849699ac..0431e27ae 100644
--- a/src/store/view-configs/slicing.ts
+++ b/src/store/view-configs/slicing.ts
@@ -14,6 +14,7 @@ import { ViewConfig } from '@/src/io/state-file/schema';
import { SliceConfig } from '@/src/store/view-configs/types';
import { useImageStore } from '@/src/store/datasets-images';
import { useViewStore } from '@/src/store/views';
+import { getCineImage } from '@/src/core/cine/isCineImage';
export const defaultSliceConfig = (): SliceConfig => ({
slice: 0,
@@ -36,6 +37,19 @@ export const useViewSliceStore = defineStore('viewSlice', () => {
const view = viewStore.getView(viewID);
if (view?.type !== '2D') return defaultSliceConfig();
+ // Cine images report dimensions [cols, rows, 1] but the slice axis is the
+ // frame index, so override using the cine clip's frame count.
+ const cine = getCineImage(imageID);
+ if (cine) {
+ const max = Math.max(0, cine.getNumberOfFrames() - 1);
+ return {
+ min: 0,
+ slice: 0,
+ max,
+ syncState: false,
+ };
+ }
+
const { orientation } = view.options;
const { metadata } = useImage(imageID);
const { lpsOrientation, dimensions } = metadata.value;
diff --git a/tests/specs/cine-rendering.e2e.ts b/tests/specs/cine-rendering.e2e.ts
new file mode 100644
index 000000000..9f85321b5
--- /dev/null
+++ b/tests/specs/cine-rendering.e2e.ts
@@ -0,0 +1,38 @@
+import { Key } from 'webdriverio';
+import { volViewPage } from '../pageobjects/volview.page';
+import { CINE_US_DATASET } from './configTestUtils';
+import { openUrls } from './utils';
+
+const PLAY_CONTROLS = '.play-controls';
+const FRAME_LABEL = '.view-annotations .frame-label';
+
+describe('VolView cine playback', () => {
+ it('loads a multi-frame ultrasound and scrubs frames', async () => {
+ await openUrls([CINE_US_DATASET]);
+
+ const playControls = $(PLAY_CONTROLS);
+ await playControls.waitForDisplayed({ timeout: 10000 });
+
+ const frameLabel = await $(FRAME_LABEL);
+ const initialText = (await frameLabel.getText()).trim();
+ // GDCM US-MONO2-8-8x-execho is 8 frames.
+ expect(initialText).toBe('Frame: 1 / 8');
+
+ // Scrub one frame forward via arrow key and verify the counter advances.
+ // Cine starts at frame 1, so ArrowUp is the direction with headroom.
+ await volViewPage.focusFirst2DView();
+ await browser.keys([Key.ArrowUp]);
+ await browser.waitUntil(
+ async () => (await frameLabel.getText()).trim() !== initialText,
+ {
+ timeout: 5000,
+ timeoutMsg: 'Expected cine frame counter to change after arrow key',
+ }
+ );
+
+ // The new label should still be "Frame: N / 8" with N in [1, 8].
+ const after = (await frameLabel.getText()).trim();
+ expect(after).toMatch(/^Frame: [1-8] \/ 8$/);
+ expect(after).not.toBe(initialText);
+ });
+});
diff --git a/tests/specs/configTestUtils.ts b/tests/specs/configTestUtils.ts
index 6e045dfb2..07002d2d0 100644
--- a/tests/specs/configTestUtils.ts
+++ b/tests/specs/configTestUtils.ts
@@ -81,6 +81,13 @@ export const US_MULTIFRAME_DICOM = {
name: 'US_multiframe_30frames.dcm',
} as const;
+// 8-frame echocardiogram from the BSD-licensed GDCM test corpus. Native
+// Explicit VR LE, MONOCHROME2, retired Ultrasound Multi-frame SOP UID.
+export const CINE_US_DATASET = {
+ url: 'https://sourceforge.net/p/gdcm/gdcmdata/ci/master/tree/US-MONO2-8-8x-execho.dcm?format=raw',
+ name: 'US-MONO2-8-8x-echo.dcm',
+} as const;
+
export type DatasetResource = {
url: string;
name?: string;
diff --git a/tests/specs/reveal-slice.e2e.ts b/tests/specs/reveal-slice.e2e.ts
new file mode 100644
index 000000000..09e10bc92
--- /dev/null
+++ b/tests/specs/reveal-slice.e2e.ts
@@ -0,0 +1,170 @@
+import { Key } from 'webdriverio';
+import { CINE_US_DATASET, PROSTATEX_DATASET } from './configTestUtils';
+import { downloadFile, openUrls } from './utils';
+import { volViewPage } from '../pageobjects/volview.page';
+
+const openMeasurementsTab = async () => {
+ const annotationsTab = await $(
+ 'button[data-testid="module-tab-Annotations"]'
+ );
+ await annotationsTab.click();
+
+ const measurementsTab = await $('button.v-tab*=Measurements');
+ await measurementsTab.waitForClickable();
+ await measurementsTab.click();
+};
+
+const waitForToolEntry = async (iconClass: string) => {
+ await browser.waitUntil(
+ async () => {
+ const entries = await $$(`.v-list-item i.${iconClass}.tool-icon`);
+ return (await entries.length) >= 1;
+ },
+ { timeoutMsg: `Tool entry with icon ${iconClass} not found` }
+ );
+};
+
+const clickRevealSliceButton = async () => {
+ // The reveal-slice button is the v-btn wrapping the mdi-target icon
+ // inside the measurement tool list entry.
+ const button = await $('.v-list-item button .mdi-target');
+ await button.waitForClickable();
+ await button.click();
+};
+
+const getCanvasCenter = async () => {
+ const views2D = await volViewPage.getViews2D();
+ const view = views2D[0];
+ const canvas = await view.$('canvas');
+ const location = await canvas.getLocation();
+ const size = await canvas.getSize();
+ return {
+ centerX: location.x + size.width / 2,
+ centerY: location.y + size.height / 2,
+ };
+};
+
+const clickAt = async (x: number, y: number) => {
+ await browser
+ .action('pointer')
+ .move({ x: Math.round(x), y: Math.round(y) })
+ .down()
+ .up()
+ .perform();
+};
+
+const placeRulerAtCanvasCenter = async () => {
+ const rulerToolButton = await $('button span i[class~=mdi-ruler]');
+ await rulerToolButton.click();
+
+ const { centerX, centerY } = await getCanvasCenter();
+ await clickAt(centerX - 40, centerY);
+ await clickAt(centerX + 40, centerY);
+};
+
+// --- normal volume image ----------------------------------------------
+
+const waitForSliceOverlay = async () => {
+ await browser.waitUntil(
+ async () => (await volViewPage.getFirst2DSlice()) !== null,
+ { timeoutMsg: 'Slice overlay never appeared' }
+ );
+};
+
+const waitForSlice = async (expectedSlice1Indexed: number) => {
+ await browser.waitUntil(
+ async () => {
+ const slice = await volViewPage.getFirst2DSlice();
+ return slice === expectedSlice1Indexed;
+ },
+ { timeoutMsg: `Expected view slice to reach ${expectedSlice1Indexed}` }
+ );
+};
+
+describe('Reveal Slice on a volume image', () => {
+ it('ruler Reveal Slice jumps the view back to the placed slice', async () => {
+ await downloadFile(PROSTATEX_DATASET.url, PROSTATEX_DATASET.name);
+ await openUrls([PROSTATEX_DATASET]);
+
+ await volViewPage.focusFirst2DView();
+ await waitForSliceOverlay();
+ const placementSlice = await volViewPage.getFirst2DSlice();
+ expect(placementSlice).not.toBeNull();
+
+ await placeRulerAtCanvasCenter();
+
+ // Advance to a different slice so reveal has somewhere to jump back to.
+ await volViewPage.advanceSliceAndWait();
+ await volViewPage.advanceSliceAndWait();
+ const movedSlice = await volViewPage.getFirst2DSlice();
+ expect(movedSlice).not.toBe(placementSlice);
+
+ await openMeasurementsTab();
+ await waitForToolEntry('mdi-ruler');
+
+ await clickRevealSliceButton();
+ await waitForSlice(placementSlice!);
+ });
+});
+
+// --- cine ultrasound --------------------------------------------------
+
+const FRAME_LABEL_SELECTOR = '.view-annotations .frame-label';
+
+const getCineFrame = async () => {
+ const frameLabel = await $(FRAME_LABEL_SELECTOR);
+ const text = (await frameLabel.getText()).trim();
+ const match = text.match(/^Frame:\s*(\d+)\s*\/\s*\d+/);
+ return match ? parseInt(match[1], 10) : null;
+};
+
+const waitForFrame = async (expected: number) => {
+ await browser.waitUntil(
+ async () => (await getCineFrame()) === expected,
+ { timeoutMsg: `Expected cine frame to reach ${expected}` }
+ );
+};
+
+const advanceCineFrame = async () => {
+ const before = await getCineFrame();
+ await browser.keys([Key.ArrowUp]);
+ await browser.waitUntil(
+ async () => {
+ const now = await getCineFrame();
+ return now !== null && now !== before;
+ },
+ { timeoutMsg: 'Cine frame counter did not advance' }
+ );
+};
+
+describe('Reveal Slice on cine ultrasound', () => {
+ it('ruler Reveal Slice jumps to the cine frame the ruler was placed on', async () => {
+ await openUrls([CINE_US_DATASET]);
+
+ const playControls = $('.play-controls');
+ await playControls.waitForDisplayed();
+ await waitForFrame(1);
+
+ await volViewPage.focusFirst2DView();
+
+ // Cine images report dimensions [cols, rows, 1]; placing the tool past
+ // frame 1 (slice index > 0) is what exercises the reveal-slice bug.
+ await advanceCineFrame();
+ await advanceCineFrame();
+ const placementFrame = await getCineFrame();
+ expect(placementFrame).not.toBe(1);
+
+ await placeRulerAtCanvasCenter();
+
+ await advanceCineFrame();
+ await advanceCineFrame();
+ const movedFrame = await getCineFrame();
+ expect(movedFrame).not.toBe(placementFrame);
+
+ await openMeasurementsTab();
+ await waitForToolEntry('mdi-ruler');
+
+ await clickRevealSliceButton();
+ await waitForFrame(placementFrame!);
+ });
+});
diff --git a/wdio.shared.conf.ts b/wdio.shared.conf.ts
index 1dfb43a02..e11f9183d 100644
--- a/wdio.shared.conf.ts
+++ b/wdio.shared.conf.ts
@@ -24,6 +24,10 @@ const TEST_DATASETS = [
url: 'https://data.kitware.com/api/v1/item/635679c311dab8142820a4f4/download',
name: 'fetus.zip',
},
+ {
+ url: 'https://sourceforge.net/p/gdcm/gdcmdata/ci/master/tree/US-MONO2-8-8x-execho.dcm?format=raw',
+ name: 'US-MONO2-8-8x-echo.dcm',
+ },
];
export const WINDOW_SIZE = [1200, 800] as const;