rlm@3: /** rlm@3: * @(#)AVIOutputStream.java 1.5.1 2011-01-17 rlm@3: * rlm@3: * Copyright (c) 2008-2011 Werner Randelshofer, Immensee, Switzerland. rlm@3: * All rights reserved. rlm@3: * rlm@3: * You may not use, copy or modify this file, except in compliance with the rlm@3: * license agreement you entered into with Werner Randelshofer. rlm@3: * For details see accompanying license terms. rlm@3: */ rlm@3: package com.aurellem.capture; rlm@3: rlm@3: import java.awt.Dimension; rlm@3: import java.awt.image.BufferedImage; rlm@3: import java.awt.image.DataBufferByte; rlm@3: import java.awt.image.IndexColorModel; rlm@3: import java.awt.image.WritableRaster; rlm@3: import java.io.File; rlm@3: import java.io.FileInputStream; rlm@3: import java.io.IOException; rlm@3: import java.io.InputStream; rlm@3: import java.io.OutputStream; rlm@3: import java.util.Arrays; rlm@3: import java.util.Date; rlm@3: import java.util.LinkedList; rlm@3: rlm@3: import javax.imageio.IIOImage; rlm@3: import javax.imageio.ImageIO; rlm@3: import javax.imageio.ImageWriteParam; rlm@3: import javax.imageio.ImageWriter; rlm@3: import javax.imageio.stream.FileImageOutputStream; rlm@3: import javax.imageio.stream.ImageOutputStream; rlm@3: import javax.imageio.stream.MemoryCacheImageOutputStream; rlm@3: rlm@3: /** rlm@3: * This class supports writing of images into an AVI 1.0 video file. rlm@3: *
rlm@3: * The images are written as video frames. rlm@3: *
rlm@3: * Video frames can be encoded with one of the following formats: rlm@3: *
rlm@3: * All frames in an AVI file must have the same duration. The duration can rlm@3: * be set by setting an appropriate pair of values using methods rlm@3: * {@link #setFrameRate} and {@link #setTimeScale}. rlm@3: *
rlm@3: * The length of an AVI 1.0 file is limited to 1 GB. rlm@3: * This class supports lengths of up to 4 GB, but such files may not work on rlm@3: * all players. rlm@3: *
rlm@3: * For detailed information about the AVI RIFF file format see:
rlm@3: * msdn.microsoft.com AVI RIFF
rlm@3: * www.microsoft.com FOURCC for Video Compression
rlm@3: * www.saettler.com RIFF
rlm@3: *
rlm@3: * @author Werner Randelshofer
rlm@3: * @version 1.5.1 2011-01-17 Fixes unintended closing of output stream..
rlm@3: *
1.5 2011-01-06 Adds support for RLE 8-bit video format.
rlm@3: *
1.4 2011-01-04 Adds support for RAW 4-bit and 8-bit video format. Fixes offsets
rlm@3: * in "idx1" chunk.
rlm@3: *
1.3.2 2010-12-27 File size limit is 1 GB.
rlm@3: *
1.3.1 2010-07-19 Fixes seeking and calculation of offsets.
rlm@3: *
1.3 2010-07-08 Adds constructor with ImageOutputStream.
rlm@3: * Added method getVideoDimension().
rlm@3: *
1.2 2009-08-29 Adds support for RAW video format.
rlm@3: *
1.1 2008-08-27 Fixes computation of dwMicroSecPerFrame in avih
rlm@3: * chunk. Changed the API to reflect that AVI works with frame rates instead of
rlm@3: * with frame durations.
rlm@3: *
1.0.1 2008-08-13 Uses FourCC "MJPG" instead of "jpg " for JPG
rlm@3: * encoded video.
rlm@3: *
1.0 2008-08-11 Created.
rlm@3: */
rlm@3: public class AVIOutputStream {
rlm@3:
rlm@3: /**
rlm@3: * Underlying output stream.
rlm@3: */
rlm@3: private ImageOutputStream out;
rlm@3: /** The offset of the QuickTime stream in the underlying ImageOutputStream.
rlm@3: * Normally this is 0 unless the underlying stream already contained data
rlm@3: * when it was passed to the constructor.
rlm@3: */
rlm@3: private long streamOffset;
rlm@3: /** Previous frame for delta compression. */
rlm@3: private Object previousData;
rlm@3:
rlm@3: /**
rlm@3: * Supported video encodings.
rlm@3: */
rlm@3: public static enum VideoFormat {
rlm@3:
rlm@3: RAW, RLE, JPG, PNG;
rlm@3: }
rlm@3: /**
rlm@3: * Current video formats.
rlm@3: */
rlm@3: private VideoFormat videoFormat;
rlm@3: /**
rlm@3: * Quality of JPEG encoded video frames.
rlm@3: */
rlm@3: private float quality = 0.9f;
rlm@3: /**
rlm@3: * Creation time of the movie output stream.
rlm@3: */
rlm@3: private Date creationTime;
rlm@3: /**
rlm@3: * Width of the video frames. All frames must have the same width.
rlm@3: * The value -1 is used to mark unspecified width.
rlm@3: */
rlm@3: private int imgWidth = -1;
rlm@3: /**
rlm@3: * Height of the video frames. All frames must have the same height.
rlm@3: * The value -1 is used to mark unspecified height.
rlm@3: */
rlm@3: private int imgHeight = -1;
rlm@3: /** Number of bits per pixel. */
rlm@3: private int imgDepth = 24;
rlm@3: /** Index color model for RAW_RGB4 and RAW_RGB8 formats. */
rlm@3: private IndexColorModel palette;
rlm@3: private IndexColorModel previousPalette;
rlm@3: /** Video encoder. */
rlm@3:
rlm@3: /**
rlm@3: * The timeScale of the movie.
rlm@3: *
rlm@3: * Used with frameRate to specify the time scale that this stream will use. rlm@3: * Dividing frameRate by timeScale gives the number of samples per second. rlm@3: * For video streams, this is the frame rate. For audio streams, this rate rlm@3: * corresponds to the time needed to play nBlockAlign bytes of audio, which rlm@3: * for PCM audio is the just the sample rate. rlm@3: */ rlm@3: private int timeScale = 1; rlm@3: /** rlm@3: * The frameRate of the movie in timeScale units. rlm@3: *
rlm@3: * @see timeScale
rlm@3: */
rlm@3: private int frameRate = 30;
rlm@3: /** Interval between keyframes. */
rlm@3: private int syncInterval = 30;
rlm@3:
rlm@3: /**
rlm@3: * The states of the movie output stream.
rlm@3: */
rlm@3: private static enum States {
rlm@3:
rlm@3: STARTED, FINISHED, CLOSED;
rlm@3: }
rlm@3: /**
rlm@3: * The current state of the movie output stream.
rlm@3: */
rlm@3: private States state = States.FINISHED;
rlm@3:
rlm@3: /**
rlm@3: * AVI stores media data in samples.
rlm@3: * A sample is a single element in a sequence of time-ordered data.
rlm@3: */
rlm@3: private static class Sample {
rlm@3:
rlm@3: String chunkType;
rlm@3: /** Offset of the sample relative to the start of the AVI file.
rlm@3: */
rlm@3: long offset;
rlm@3: /** Data length of the sample. */
rlm@3: long length;
rlm@3: /**
rlm@3: * The duration of the sample in time scale units.
rlm@3: */
rlm@3: int duration;
rlm@3: /** Whether the sample is a sync-sample. */
rlm@3: boolean isSync;
rlm@3:
rlm@3: /**
rlm@3: * Creates a new sample.
rlm@3: * @param duration
rlm@3: * @param offset
rlm@3: * @param length
rlm@3: */
rlm@3: public Sample(String chunkId, int duration, long offset, long length, boolean isSync) {
rlm@3: this.chunkType = chunkId;
rlm@3: this.duration = duration;
rlm@3: this.offset = offset;
rlm@3: this.length = length;
rlm@3: this.isSync = isSync;
rlm@3: }
rlm@3: }
rlm@3: /**
rlm@3: * List of video frames.
rlm@3: */
rlm@3: private LinkedList
rlm@3: * The default value is 1.
rlm@3: *
rlm@3: * @param newValue
rlm@3: */
rlm@3: public void setTimeScale(int newValue) {
rlm@3: if (newValue <= 0) {
rlm@3: throw new IllegalArgumentException("timeScale must be greater 0");
rlm@3: }
rlm@3: this.timeScale = newValue;
rlm@3: }
rlm@3:
rlm@3: /**
rlm@3: * Returns the time scale of this media.
rlm@3: *
rlm@3: * @return time scale
rlm@3: */
rlm@3: public int getTimeScale() {
rlm@3: return timeScale;
rlm@3: }
rlm@3:
rlm@3: /**
rlm@3: * Sets the rate of video frames in time scale units.
rlm@3: *
rlm@3: * The default value is 30. Together with the default value 1 of timeScale
rlm@3: * this results in 30 frames pers second.
rlm@3: *
rlm@3: * @param newValue
rlm@3: */
rlm@3: public void setFrameRate(int newValue) {
rlm@3: if (newValue <= 0) {
rlm@3: throw new IllegalArgumentException("frameDuration must be greater 0");
rlm@3: }
rlm@3: if (state == States.STARTED) {
rlm@3: throw new IllegalStateException("frameDuration must be set before the first frame is written");
rlm@3: }
rlm@3: this.frameRate = newValue;
rlm@3: }
rlm@3:
rlm@3: /**
rlm@3: * Returns the frame rate of this media.
rlm@3: *
rlm@3: * @return frame rate
rlm@3: */
rlm@3: public int getFrameRate() {
rlm@3: return frameRate;
rlm@3: }
rlm@3:
rlm@3: /** Sets the global color palette. */
rlm@3: public void setPalette(IndexColorModel palette) {
rlm@3: this.palette = palette;
rlm@3: }
rlm@3:
rlm@3: /**
rlm@3: * Sets the compression quality of the video track.
rlm@3: * A value of 0 stands for "high compression is important" a value of
rlm@3: * 1 for "high image quality is important".
rlm@3: *
rlm@3: * Changing this value affects frames which are subsequently written
rlm@3: * to the AVIOutputStream. Frames which have already been written
rlm@3: * are not changed.
rlm@3: *
rlm@3: * This value has only effect on videos encoded with JPG format.
rlm@3: *
rlm@3: * The default value is 0.9.
rlm@3: *
rlm@3: * @param newValue
rlm@3: */
rlm@3: public void setVideoCompressionQuality(float newValue) {
rlm@3: this.quality = newValue;
rlm@3: }
rlm@3:
rlm@3: /**
rlm@3: * Returns the video compression quality.
rlm@3: *
rlm@3: * @return video compression quality
rlm@3: */
rlm@3: public float getVideoCompressionQuality() {
rlm@3: return quality;
rlm@3: }
rlm@3:
rlm@3: /**
rlm@3: * Sets the dimension of the video track.
rlm@3: *
rlm@3: * You need to explicitly set the dimension, if you add all frames from
rlm@3: * files or input streams.
rlm@3: *
rlm@3: * If you add frames from buffered images, then AVIOutputStream
rlm@3: * can determine the video dimension from the image width and height.
rlm@3: *
rlm@3: * @param width Must be greater than 0.
rlm@3: * @param height Must be greater than 0.
rlm@3: */
rlm@3: public void setVideoDimension(int width, int height) {
rlm@3: if (width < 1 || height < 1) {
rlm@3: throw new IllegalArgumentException("width and height must be greater zero.");
rlm@3: }
rlm@3: this.imgWidth = width;
rlm@3: this.imgHeight = height;
rlm@3: }
rlm@3:
rlm@3: /**
rlm@3: * Gets the dimension of the video track.
rlm@3: *
rlm@3: * Returns null if the dimension is not known.
rlm@3: */
rlm@3: public Dimension getVideoDimension() {
rlm@3: if (imgWidth < 1 || imgHeight < 1) {
rlm@3: return null;
rlm@3: }
rlm@3: return new Dimension(imgWidth, imgHeight);
rlm@3: }
rlm@3:
rlm@3: /**
rlm@3: * Sets the state of the QuickTimeOutpuStream to started.
rlm@3: *
rlm@3: * If the state is changed by this method, the prolog is
rlm@3: * written.
rlm@3: */
rlm@3: private void ensureStarted() throws IOException {
rlm@3: if (state != States.STARTED) {
rlm@3: creationTime = new Date();
rlm@3: writeProlog();
rlm@3: state = States.STARTED;
rlm@3: }
rlm@3: }
rlm@3:
rlm@3: /**
rlm@3: * Writes a frame to the video track.
rlm@3: *
rlm@3: * If the dimension of the video track has not been specified yet, it
rlm@3: * is derived from the first buffered image added to the AVIOutputStream.
rlm@3: *
rlm@3: * @param image The frame image.
rlm@3: *
rlm@3: * @throws IllegalArgumentException if the duration is less than 1, or
rlm@3: * if the dimension of the frame does not match the dimension of the video
rlm@3: * track.
rlm@3: * @throws IOException if writing the image failed.
rlm@3: */
rlm@3: public void writeFrame(BufferedImage image) throws IOException {
rlm@3: ensureOpen();
rlm@3: ensureStarted();
rlm@3:
rlm@3: // Get the dimensions of the first image
rlm@3: if (imgWidth == -1) {
rlm@3: imgWidth = image.getWidth();
rlm@3: imgHeight = image.getHeight();
rlm@3: } else {
rlm@3: // The dimension of the image must match the dimension of the video track
rlm@3: if (imgWidth != image.getWidth() || imgHeight != image.getHeight()) {
rlm@3: throw new IllegalArgumentException("Dimensions of image[" + videoFrames.size()
rlm@3: + "] (width=" + image.getWidth() + ", height=" + image.getHeight()
rlm@3: + ") differs from image[0] (width="
rlm@3: + imgWidth + ", height=" + imgHeight);
rlm@3: }
rlm@3: }
rlm@3:
rlm@3: DataChunk videoFrameChunk;
rlm@3: long offset = getRelativeStreamPosition();
rlm@3: boolean isSync = true;
rlm@3: switch (videoFormat) {
rlm@3: case RAW: {
rlm@3: switch (imgDepth) {
rlm@3: case 4: {
rlm@3: IndexColorModel imgPalette = (IndexColorModel) image.getColorModel();
rlm@3: int[] imgRGBs = new int[16];
rlm@3: imgPalette.getRGBs(imgRGBs);
rlm@3: int[] previousRGBs = new int[16];
rlm@3: if (previousPalette == null) {
rlm@3: previousPalette = palette;
rlm@3: }
rlm@3: previousPalette.getRGBs(previousRGBs);
rlm@3: if (!Arrays.equals(imgRGBs, previousRGBs)) {
rlm@3: previousPalette = imgPalette;
rlm@3: DataChunk paletteChangeChunk = new DataChunk("00pc");
rlm@3: /*
rlm@3: int first = imgPalette.getMapSize();
rlm@3: int last = -1;
rlm@3: for (int i = 0; i < 16; i++) {
rlm@3: if (previousRGBs[i] != imgRGBs[i] && i < first) {
rlm@3: first = i;
rlm@3: }
rlm@3: if (previousRGBs[i] != imgRGBs[i] && i > last) {
rlm@3: last = i;
rlm@3: }
rlm@3: }*/
rlm@3: int first = 0;
rlm@3: int last = imgPalette.getMapSize() - 1;
rlm@3: /*
rlm@3: * typedef struct {
rlm@3: BYTE bFirstEntry;
rlm@3: BYTE bNumEntries;
rlm@3: WORD wFlags;
rlm@3: PALETTEENTRY peNew[];
rlm@3: } AVIPALCHANGE;
rlm@3: *
rlm@3: * typedef struct tagPALETTEENTRY {
rlm@3: BYTE peRed;
rlm@3: BYTE peGreen;
rlm@3: BYTE peBlue;
rlm@3: BYTE peFlags;
rlm@3: } PALETTEENTRY;
rlm@3: */
rlm@3: DataChunkOutputStream pOut = paletteChangeChunk.getOutputStream();
rlm@3: pOut.writeByte(first);//bFirstEntry
rlm@3: pOut.writeByte(last - first + 1);//bNumEntries
rlm@3: pOut.writeShort(0);//wFlags
rlm@3:
rlm@3: for (int i = first; i <= last; i++) {
rlm@3: pOut.writeByte((imgRGBs[i] >>> 16) & 0xff); // red
rlm@3: pOut.writeByte((imgRGBs[i] >>> 8) & 0xff); // green
rlm@3: pOut.writeByte(imgRGBs[i] & 0xff); // blue
rlm@3: pOut.writeByte(0); // reserved*/
rlm@3: }
rlm@3:
rlm@3: moviChunk.add(paletteChangeChunk);
rlm@3: paletteChangeChunk.finish();
rlm@3: long length = getRelativeStreamPosition() - offset;
rlm@3: videoFrames.add(new Sample(paletteChangeChunk.chunkType, 0, offset, length - 8, false));
rlm@3: offset = getRelativeStreamPosition();
rlm@3: }
rlm@3:
rlm@3: videoFrameChunk = new DataChunk("00db");
rlm@3: byte[] rgb8 = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
rlm@3: byte[] rgb4 = new byte[imgWidth / 2];
rlm@3: for (int y = (imgHeight - 1) * imgWidth; y >= 0; y -= imgWidth) { // Upside down
rlm@3: for (int x = 0, xx = 0, n = imgWidth; x < n; x += 2, ++xx) {
rlm@3: rgb4[xx] = (byte) (((rgb8[y + x] & 0xf) << 4) | (rgb8[y + x + 1] & 0xf));
rlm@3: }
rlm@3: videoFrameChunk.getOutputStream().write(rgb4);
rlm@3: }
rlm@3: break;
rlm@3: }
rlm@3: case 8: {
rlm@3: IndexColorModel imgPalette = (IndexColorModel) image.getColorModel();
rlm@3: int[] imgRGBs = new int[256];
rlm@3: imgPalette.getRGBs(imgRGBs);
rlm@3: int[] previousRGBs = new int[256];
rlm@3: if (previousPalette == null) {
rlm@3: previousPalette = palette;
rlm@3: }
rlm@3: previousPalette.getRGBs(previousRGBs);
rlm@3: if (!Arrays.equals(imgRGBs, previousRGBs)) {
rlm@3: previousPalette = imgPalette;
rlm@3: DataChunk paletteChangeChunk = new DataChunk("00pc");
rlm@3: /*
rlm@3: int first = imgPalette.getMapSize();
rlm@3: int last = -1;
rlm@3: for (int i = 0; i < 16; i++) {
rlm@3: if (previousRGBs[i] != imgRGBs[i] && i < first) {
rlm@3: first = i;
rlm@3: }
rlm@3: if (previousRGBs[i] != imgRGBs[i] && i > last) {
rlm@3: last = i;
rlm@3: }
rlm@3: }*/
rlm@3: int first = 0;
rlm@3: int last = imgPalette.getMapSize() - 1;
rlm@3: /*
rlm@3: * typedef struct {
rlm@3: BYTE bFirstEntry;
rlm@3: BYTE bNumEntries;
rlm@3: WORD wFlags;
rlm@3: PALETTEENTRY peNew[];
rlm@3: } AVIPALCHANGE;
rlm@3: *
rlm@3: * typedef struct tagPALETTEENTRY {
rlm@3: BYTE peRed;
rlm@3: BYTE peGreen;
rlm@3: BYTE peBlue;
rlm@3: BYTE peFlags;
rlm@3: } PALETTEENTRY;
rlm@3: */
rlm@3: DataChunkOutputStream pOut = paletteChangeChunk.getOutputStream();
rlm@3: pOut.writeByte(first);//bFirstEntry
rlm@3: pOut.writeByte(last - first + 1);//bNumEntries
rlm@3: pOut.writeShort(0);//wFlags
rlm@3:
rlm@3: for (int i = first; i <= last; i++) {
rlm@3: pOut.writeByte((imgRGBs[i] >>> 16) & 0xff); // red
rlm@3: pOut.writeByte((imgRGBs[i] >>> 8) & 0xff); // green
rlm@3: pOut.writeByte(imgRGBs[i] & 0xff); // blue
rlm@3: pOut.writeByte(0); // reserved*/
rlm@3: }
rlm@3:
rlm@3: moviChunk.add(paletteChangeChunk);
rlm@3: paletteChangeChunk.finish();
rlm@3: long length = getRelativeStreamPosition() - offset;
rlm@3: videoFrames.add(new Sample(paletteChangeChunk.chunkType, 0, offset, length - 8, false));
rlm@3: offset = getRelativeStreamPosition();
rlm@3: }
rlm@3:
rlm@3: videoFrameChunk = new DataChunk("00db");
rlm@3: byte[] rgb8 = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
rlm@3: for (int y = (imgHeight - 1) * imgWidth; y >= 0; y -= imgWidth) { // Upside down
rlm@3: videoFrameChunk.getOutputStream().write(rgb8, y, imgWidth);
rlm@3: }
rlm@3: break;
rlm@3: }
rlm@3: default: {
rlm@3: videoFrameChunk = new DataChunk("00db");
rlm@3: WritableRaster raster = image.getRaster();
rlm@3: int[] raw = new int[imgWidth * 3]; // holds a scanline of raw image data with 3 channels of 32 bit data
rlm@3: byte[] bytes = new byte[imgWidth * 3]; // holds a scanline of raw image data with 3 channels of 8 bit data
rlm@3: for (int y = imgHeight - 1; y >= 0; --y) { // Upside down
rlm@3: raster.getPixels(0, y, imgWidth, 1, raw);
rlm@3: for (int x = 0, n = imgWidth * 3; x < n; x += 3) {
rlm@3: bytes[x + 2] = (byte) raw[x]; // Blue
rlm@3: bytes[x + 1] = (byte) raw[x + 1]; // Green
rlm@3: bytes[x] = (byte) raw[x + 2]; // Red
rlm@3: }
rlm@3: videoFrameChunk.getOutputStream().write(bytes);
rlm@3: }
rlm@3: break;
rlm@3: }
rlm@3: }
rlm@3: break;
rlm@3: }
rlm@3:
rlm@3: case JPG: {
rlm@3: videoFrameChunk = new DataChunk("00dc");
rlm@3: ImageWriter iw = (ImageWriter) ImageIO.getImageWritersByMIMEType("image/jpeg").next();
rlm@3: ImageWriteParam iwParam = iw.getDefaultWriteParam();
rlm@3: iwParam.setCompressionMode(ImageWriteParam.MODE_EXPLICIT);
rlm@3: iwParam.setCompressionQuality(quality);
rlm@3: MemoryCacheImageOutputStream imgOut = new MemoryCacheImageOutputStream(videoFrameChunk.getOutputStream());
rlm@3: iw.setOutput(imgOut);
rlm@3: IIOImage img = new IIOImage(image, null, null);
rlm@3: iw.write(null, img, iwParam);
rlm@3: iw.dispose();
rlm@3: break;
rlm@3: }
rlm@3: case PNG:
rlm@3: default: {
rlm@3: videoFrameChunk = new DataChunk("00dc");
rlm@3: ImageWriter iw = (ImageWriter) ImageIO.getImageWritersByMIMEType("image/png").next();
rlm@3: ImageWriteParam iwParam = iw.getDefaultWriteParam();
rlm@3: MemoryCacheImageOutputStream imgOut = new MemoryCacheImageOutputStream(videoFrameChunk.getOutputStream());
rlm@3: iw.setOutput(imgOut);
rlm@3: IIOImage img = new IIOImage(image, null, null);
rlm@3: iw.write(null, img, iwParam);
rlm@3: iw.dispose();
rlm@3: break;
rlm@3: }
rlm@3: }
rlm@3: long length = getRelativeStreamPosition() - offset;
rlm@3: moviChunk.add(videoFrameChunk);
rlm@3: videoFrameChunk.finish();
rlm@3:
rlm@3: videoFrames.add(new Sample(videoFrameChunk.chunkType, frameRate, offset, length - 8, isSync));
rlm@3: if (getRelativeStreamPosition() > 1L << 32) {
rlm@3: throw new IOException("AVI file is larger than 4 GB");
rlm@3: }
rlm@3: }
rlm@3:
rlm@3: /**
rlm@3: * Writes a frame from a file to the video track.
rlm@3: *
rlm@3: * This method does not inspect the contents of the file.
rlm@3: * For example, Its your responsibility to only add JPG files if you have
rlm@3: * chosen the JPEG video format.
rlm@3: *
rlm@3: * If you add all frames from files or from input streams, then you
rlm@3: * have to explicitly set the dimension of the video track before you
rlm@3: * call finish() or close().
rlm@3: *
rlm@3: * @param file The file which holds the image data.
rlm@3: *
rlm@3: * @throws IllegalStateException if the duration is less than 1.
rlm@3: * @throws IOException if writing the image failed.
rlm@3: */
rlm@3: public void writeFrame(File file) throws IOException {
rlm@3: FileInputStream in = null;
rlm@3: try {
rlm@3: in = new FileInputStream(file);
rlm@3: writeFrame(in);
rlm@3: } finally {
rlm@3: if (in != null) {
rlm@3: in.close();
rlm@3: }
rlm@3: }
rlm@3: }
rlm@3:
rlm@3: /**
rlm@3: * Writes a frame to the video track.
rlm@3: *
rlm@3: * This method does not inspect the contents of the file.
rlm@3: * For example, its your responsibility to only add JPG files if you have
rlm@3: * chosen the JPEG video format.
rlm@3: *
rlm@3: * If you add all frames from files or from input streams, then you
rlm@3: * have to explicitly set the dimension of the video track before you
rlm@3: * call finish() or close().
rlm@3: *
rlm@3: * @param in The input stream which holds the image data.
rlm@3: *
rlm@3: * @throws IllegalArgumentException if the duration is less than 1.
rlm@3: * @throws IOException if writing the image failed.
rlm@3: */
rlm@3: public void writeFrame(InputStream in) throws IOException {
rlm@3: ensureOpen();
rlm@3: ensureStarted();
rlm@3:
rlm@3: DataChunk videoFrameChunk = new DataChunk(
rlm@3: videoFormat == VideoFormat.RAW ? "00db" : "00dc");
rlm@3: moviChunk.add(videoFrameChunk);
rlm@3: OutputStream mdatOut = videoFrameChunk.getOutputStream();
rlm@3: long offset = getRelativeStreamPosition();
rlm@3: byte[] buf = new byte[512];
rlm@3: int len;
rlm@3: while ((len = in.read(buf)) != -1) {
rlm@3: mdatOut.write(buf, 0, len);
rlm@3: }
rlm@3: long length = getRelativeStreamPosition() - offset;
rlm@3: videoFrameChunk.finish();
rlm@3: videoFrames.add(new Sample(videoFrameChunk.chunkType, frameRate, offset, length - 8, true));
rlm@3: if (getRelativeStreamPosition() > 1L << 32) {
rlm@3: throw new IOException("AVI file is larger than 4 GB");
rlm@3: }
rlm@3: }
rlm@3:
rlm@3: /**
rlm@3: * Closes the movie file as well as the stream being filtered.
rlm@3: *
rlm@3: * @exception IOException if an I/O error has occurred
rlm@3: */
rlm@3: public void close() throws IOException {
rlm@3: if (state == States.STARTED) {
rlm@3: finish();
rlm@3: }
rlm@3: if (state != States.CLOSED) {
rlm@3: out.close();
rlm@3: state = States.CLOSED;
rlm@3: }
rlm@3: }
rlm@3:
rlm@3: /**
rlm@3: * Finishes writing the contents of the AVI output stream without closing
rlm@3: * the underlying stream. Use this method when applying multiple filters
rlm@3: * in succession to the same output stream.
rlm@3: *
rlm@3: * @exception IllegalStateException if the dimension of the video track
rlm@3: * has not been specified or determined yet.
rlm@3: * @exception IOException if an I/O exception has occurred
rlm@3: */
rlm@3: public void finish() throws IOException {
rlm@3: ensureOpen();
rlm@3: if (state != States.FINISHED) {
rlm@3: if (imgWidth == -1 || imgHeight == -1) {
rlm@3: throw new IllegalStateException("image width and height must be specified");
rlm@3: }
rlm@3:
rlm@3: moviChunk.finish();
rlm@3: writeEpilog();
rlm@3: state = States.FINISHED;
rlm@3: imgWidth = imgHeight = -1;
rlm@3: }
rlm@3: }
rlm@3:
rlm@3: /**
rlm@3: * Check to make sure that this stream has not been closed
rlm@3: */
rlm@3: private void ensureOpen() throws IOException {
rlm@3: if (state == States.CLOSED) {
rlm@3: throw new IOException("Stream closed");
rlm@3: }
rlm@3: }
rlm@3:
rlm@3: /** Gets the position relative to the beginning of the QuickTime stream.
rlm@3: *
rlm@3: * Usually this value is equal to the stream position of the underlying
rlm@3: * ImageOutputStream, but can be larger if the underlying stream already
rlm@3: * contained data.
rlm@3: *
rlm@3: * @return The relative stream position.
rlm@3: * @throws IOException
rlm@3: */
rlm@3: private long getRelativeStreamPosition() throws IOException {
rlm@3: return out.getStreamPosition() - streamOffset;
rlm@3: }
rlm@3:
rlm@3: /** Seeks relative to the beginning of the QuickTime stream.
rlm@3: *
rlm@3: * Usually this equal to seeking in the underlying ImageOutputStream, but
rlm@3: * can be different if the underlying stream already contained data.
rlm@3: *
rlm@3: */
rlm@3: private void seekRelative(long newPosition) throws IOException {
rlm@3: out.seek(newPosition + streamOffset);
rlm@3: }
rlm@3:
rlm@3: private void writeProlog() throws IOException {
rlm@3: // The file has the following structure:
rlm@3: //
rlm@3: // .RIFF AVI
rlm@3: // ..avih (AVI Header Chunk)
rlm@3: // ..LIST strl
rlm@3: // ...strh (Stream Header Chunk)
rlm@3: // ...strf (Stream Format Chunk)
rlm@3: // ..LIST movi
rlm@3: // ...00dc (Compressed video data chunk in Track 00, repeated for each frame)
rlm@3: // ..idx1 (List of video data chunks and their location in the file)
rlm@3:
rlm@3: // The RIFF AVI Chunk holds the complete movie
rlm@3: aviChunk = new CompositeChunk("RIFF", "AVI ");
rlm@3: CompositeChunk hdrlChunk = new CompositeChunk("LIST", "hdrl");
rlm@3:
rlm@3: // Write empty AVI Main Header Chunk - we fill the data in later
rlm@3: aviChunk.add(hdrlChunk);
rlm@3: avihChunk = new FixedSizeDataChunk("avih", 56);
rlm@3: avihChunk.seekToEndOfChunk();
rlm@3: hdrlChunk.add(avihChunk);
rlm@3:
rlm@3: CompositeChunk strlChunk = new CompositeChunk("LIST", "strl");
rlm@3: hdrlChunk.add(strlChunk);
rlm@3:
rlm@3: // Write empty AVI Stream Header Chunk - we fill the data in later
rlm@3: strhChunk = new FixedSizeDataChunk("strh", 56);
rlm@3: strhChunk.seekToEndOfChunk();
rlm@3: strlChunk.add(strhChunk);
rlm@3: strfChunk = new FixedSizeDataChunk("strf", palette == null ? 40 : 40 + palette.getMapSize() * 4);
rlm@3: strfChunk.seekToEndOfChunk();
rlm@3: strlChunk.add(strfChunk);
rlm@3:
rlm@3: moviChunk = new CompositeChunk("LIST", "movi");
rlm@3: aviChunk.add(moviChunk);
rlm@3:
rlm@3:
rlm@3: }
rlm@3:
rlm@3: private void writeEpilog() throws IOException {
rlm@3: // Compute values
rlm@3: int duration = 0;
rlm@3: for (Sample s : videoFrames) {
rlm@3: duration += s.duration;
rlm@3: }
rlm@3: long bufferSize = 0;
rlm@3: for (Sample s : videoFrames) {
rlm@3: if (s.length > bufferSize) {
rlm@3: bufferSize = s.length;
rlm@3: }
rlm@3: }
rlm@3:
rlm@3:
rlm@3: DataChunkOutputStream d;
rlm@3:
rlm@3: /* Create Idx1 Chunk and write data
rlm@3: * -------------
rlm@3: typedef struct _avioldindex {
rlm@3: FOURCC fcc;
rlm@3: DWORD cb;
rlm@3: struct _avioldindex_entry {
rlm@3: DWORD dwChunkId;
rlm@3: DWORD dwFlags;
rlm@3: DWORD dwOffset;
rlm@3: DWORD dwSize;
rlm@3: } aIndex[];
rlm@3: } AVIOLDINDEX;
rlm@3: */
rlm@3: DataChunk idx1Chunk = new DataChunk("idx1");
rlm@3: aviChunk.add(idx1Chunk);
rlm@3: d = idx1Chunk.getOutputStream();
rlm@3: long moviListOffset = moviChunk.offset + 8;
rlm@3: //moviListOffset = 0;
rlm@3: for (Sample f : videoFrames) {
rlm@3:
rlm@3: d.writeType(f.chunkType); // dwChunkId
rlm@3: // Specifies a FOURCC that identifies a stream in the AVI file. The
rlm@3: // FOURCC must have the form 'xxyy' where xx is the stream number and yy
rlm@3: // is a two-character code that identifies the contents of the stream:
rlm@3: //
rlm@3: // Two-character code Description
rlm@3: // db Uncompressed video frame
rlm@3: // dc Compressed video frame
rlm@3: // pc Palette change
rlm@3: // wb Audio data
rlm@3:
rlm@3: d.writeUInt((f.chunkType.endsWith("pc") ? 0x100 : 0x0)//
rlm@3: | (f.isSync ? 0x10 : 0x0)); // dwFlags
rlm@3: // Specifies a bitwise combination of zero or more of the following
rlm@3: // flags:
rlm@3: //
rlm@3: // Value Name Description
rlm@3: // 0x10 AVIIF_KEYFRAME The data chunk is a key frame.
rlm@3: // 0x1 AVIIF_LIST The data chunk is a 'rec ' list.
rlm@3: // 0x100 AVIIF_NO_TIME The data chunk does not affect the timing of the
rlm@3: // stream. For example, this flag should be set for
rlm@3: // palette changes.
rlm@3:
rlm@3: d.writeUInt(f.offset - moviListOffset); // dwOffset
rlm@3: // Specifies the location of the data chunk in the file. The value
rlm@3: // should be specified as an offset, in bytes, from the start of the
rlm@3: // 'movi' list; however, in some AVI files it is given as an offset from
rlm@3: // the start of the file.
rlm@3:
rlm@3: d.writeUInt(f.length); // dwSize
rlm@3: // Specifies the size of the data chunk, in bytes.
rlm@3: }
rlm@3: idx1Chunk.finish();
rlm@3:
rlm@3: /* Write Data into AVI Main Header Chunk
rlm@3: * -------------
rlm@3: * The AVIMAINHEADER structure defines global information in an AVI file.
rlm@3: * see http://msdn.microsoft.com/en-us/library/ms779632(VS.85).aspx
rlm@3: typedef struct _avimainheader {
rlm@3: FOURCC fcc;
rlm@3: DWORD cb;
rlm@3: DWORD dwMicroSecPerFrame;
rlm@3: DWORD dwMaxBytesPerSec;
rlm@3: DWORD dwPaddingGranularity;
rlm@3: DWORD dwFlags;
rlm@3: DWORD dwTotalFrames;
rlm@3: DWORD dwInitialFrames;
rlm@3: DWORD dwStreams;
rlm@3: DWORD dwSuggestedBufferSize;
rlm@3: DWORD dwWidth;
rlm@3: DWORD dwHeight;
rlm@3: DWORD dwReserved[4];
rlm@3: } AVIMAINHEADER; */
rlm@3: avihChunk.seekToStartOfData();
rlm@3: d = avihChunk.getOutputStream();
rlm@3:
rlm@3: d.writeUInt((1000000L * (long) timeScale) / (long) frameRate); // dwMicroSecPerFrame
rlm@3: // Specifies the number of microseconds between frames.
rlm@3: // This value indicates the overall timing for the file.
rlm@3:
rlm@3: d.writeUInt(0); // dwMaxBytesPerSec
rlm@3: // Specifies the approximate maximum data rate of the file.
rlm@3: // This value indicates the number of bytes per second the system
rlm@3: // must handle to present an AVI sequence as specified by the other
rlm@3: // parameters contained in the main header and stream header chunks.
rlm@3:
rlm@3: d.writeUInt(0); // dwPaddingGranularity
rlm@3: // Specifies the alignment for data, in bytes. Pad the data to multiples
rlm@3: // of this value.
rlm@3:
rlm@3: d.writeUInt(0x10); // dwFlags (0x10 == hasIndex)
rlm@3: // Contains a bitwise combination of zero or more of the following
rlm@3: // flags:
rlm@3: //
rlm@3: // Value Name Description
rlm@3: // 0x10 AVIF_HASINDEX Indicates the AVI file has an index.
rlm@3: // 0x20 AVIF_MUSTUSEINDEX Indicates that application should use the
rlm@3: // index, rather than the physical ordering of the
rlm@3: // chunks in the file, to determine the order of
rlm@3: // presentation of the data. For example, this flag
rlm@3: // could be used to create a list of frames for
rlm@3: // editing.
rlm@3: // 0x100 AVIF_ISINTERLEAVED Indicates the AVI file is interleaved.
rlm@3: // 0x1000 AVIF_WASCAPTUREFILE Indicates the AVI file is a specially
rlm@3: // allocated file used for capturing real-time
rlm@3: // video. Applications should warn the user before
rlm@3: // writing over a file with this flag set because
rlm@3: // the user probably defragmented this file.
rlm@3: // 0x20000 AVIF_COPYRIGHTED Indicates the AVI file contains copyrighted
rlm@3: // data and software. When this flag is used,
rlm@3: // software should not permit the data to be
rlm@3: // duplicated.
rlm@3:
rlm@3: d.writeUInt(videoFrames.size()); // dwTotalFrames
rlm@3: // Specifies the total number of frames of data in the file.
rlm@3:
rlm@3: d.writeUInt(0); // dwInitialFrames
rlm@3: // Specifies the initial frame for interleaved files. Noninterleaved
rlm@3: // files should specify zero. If you are creating interleaved files,
rlm@3: // specify the number of frames in the file prior to the initial frame
rlm@3: // of the AVI sequence in this member.
rlm@3: // To give the audio driver enough audio to work with, the audio data in
rlm@3: // an interleaved file must be skewed from the video data. Typically,
rlm@3: // the audio data should be moved forward enough frames to allow
rlm@3: // approximately 0.75 seconds of audio data to be preloaded. The
rlm@3: // dwInitialRecords member should be set to the number of frames the
rlm@3: // audio is skewed. Also set the same value for the dwInitialFrames
rlm@3: // member of the AVISTREAMHEADER structure in the audio stream header
rlm@3:
rlm@3: d.writeUInt(1); // dwStreams
rlm@3: // Specifies the number of streams in the file. For example, a file with
rlm@3: // audio and video has two streams.
rlm@3:
rlm@3: d.writeUInt(bufferSize); // dwSuggestedBufferSize
rlm@3: // Specifies the suggested buffer size for reading the file. Generally,
rlm@3: // this size should be large enough to contain the largest chunk in the
rlm@3: // file. If set to zero, or if it is too small, the playback software
rlm@3: // will have to reallocate memory during playback, which will reduce
rlm@3: // performance. For an interleaved file, the buffer size should be large
rlm@3: // enough to read an entire record, and not just a chunk.
rlm@3:
rlm@3:
rlm@3: d.writeUInt(imgWidth); // dwWidth
rlm@3: // Specifies the width of the AVI file in pixels.
rlm@3:
rlm@3: d.writeUInt(imgHeight); // dwHeight
rlm@3: // Specifies the height of the AVI file in pixels.
rlm@3:
rlm@3: d.writeUInt(0); // dwReserved[0]
rlm@3: d.writeUInt(0); // dwReserved[1]
rlm@3: d.writeUInt(0); // dwReserved[2]
rlm@3: d.writeUInt(0); // dwReserved[3]
rlm@3: // Reserved. Set this array to zero.
rlm@3:
rlm@3: /* Write Data into AVI Stream Header Chunk
rlm@3: * -------------
rlm@3: * The AVISTREAMHEADER structure contains information about one stream
rlm@3: * in an AVI file.
rlm@3: * see http://msdn.microsoft.com/en-us/library/ms779638(VS.85).aspx
rlm@3: typedef struct _avistreamheader {
rlm@3: FOURCC fcc;
rlm@3: DWORD cb;
rlm@3: FOURCC fccType;
rlm@3: FOURCC fccHandler;
rlm@3: DWORD dwFlags;
rlm@3: WORD wPriority;
rlm@3: WORD wLanguage;
rlm@3: DWORD dwInitialFrames;
rlm@3: DWORD dwScale;
rlm@3: DWORD dwRate;
rlm@3: DWORD dwStart;
rlm@3: DWORD dwLength;
rlm@3: DWORD dwSuggestedBufferSize;
rlm@3: DWORD dwQuality;
rlm@3: DWORD dwSampleSize;
rlm@3: struct {
rlm@3: short int left;
rlm@3: short int top;
rlm@3: short int right;
rlm@3: short int bottom;
rlm@3: } rcFrame;
rlm@3: } AVISTREAMHEADER;
rlm@3: */
rlm@3: strhChunk.seekToStartOfData();
rlm@3: d = strhChunk.getOutputStream();
rlm@3: d.writeType("vids"); // fccType - vids for video stream
rlm@3: // Contains a FOURCC that specifies the type of the data contained in
rlm@3: // the stream. The following standard AVI values for video and audio are
rlm@3: // defined:
rlm@3: //
rlm@3: // FOURCC Description
rlm@3: // 'auds' Audio stream
rlm@3: // 'mids' MIDI stream
rlm@3: // 'txts' Text stream
rlm@3: // 'vids' Video stream
rlm@3:
rlm@3: switch (videoFormat) {
rlm@3: case RAW:
rlm@3: d.writeType("DIB "); // fccHandler - DIB for Raw RGB
rlm@3: break;
rlm@3: case RLE:
rlm@3: d.writeType("RLE "); // fccHandler - Microsoft RLE
rlm@3: break;
rlm@3: case JPG:
rlm@3: d.writeType("MJPG"); // fccHandler - MJPG for Motion JPEG
rlm@3: break;
rlm@3: case PNG:
rlm@3: default:
rlm@3: d.writeType("png "); // fccHandler - png for PNG
rlm@3: break;
rlm@3: }
rlm@3: // Optionally, contains a FOURCC that identifies a specific data
rlm@3: // handler. The data handler is the preferred handler for the stream.
rlm@3: // For audio and video streams, this specifies the codec for decoding
rlm@3: // the stream.
rlm@3:
rlm@3: if (imgDepth <= 8) {
rlm@3: d.writeUInt(0x00010000); // dwFlags - AVISF_VIDEO_PALCHANGES
rlm@3: } else {
rlm@3: d.writeUInt(0); // dwFlags
rlm@3: }
rlm@3:
rlm@3: // Contains any flags for the data stream. The bits in the high-order
rlm@3: // word of these flags are specific to the type of data contained in the
rlm@3: // stream. The following standard flags are defined:
rlm@3: //
rlm@3: // Value Name Description
rlm@3: // AVISF_DISABLED 0x00000001 Indicates this stream should not
rlm@3: // be enabled by default.
rlm@3: // AVISF_VIDEO_PALCHANGES 0x00010000
rlm@3: // Indicates this video stream contains
rlm@3: // palette changes. This flag warns the playback
rlm@3: // software that it will need to animate the
rlm@3: // palette.
rlm@3:
rlm@3: d.writeUShort(0); // wPriority
rlm@3: // Specifies priority of a stream type. For example, in a file with
rlm@3: // multiple audio streams, the one with the highest priority might be
rlm@3: // the default stream.
rlm@3:
rlm@3: d.writeUShort(0); // wLanguage
rlm@3: // Language tag.
rlm@3:
rlm@3: d.writeUInt(0); // dwInitialFrames
rlm@3: // Specifies how far audio data is skewed ahead of the video frames in
rlm@3: // interleaved files. Typically, this is about 0.75 seconds. If you are
rlm@3: // creating interleaved files, specify the number of frames in the file
rlm@3: // prior to the initial frame of the AVI sequence in this member. For
rlm@3: // more information, see the remarks for the dwInitialFrames member of
rlm@3: // the AVIMAINHEADER structure.
rlm@3:
rlm@3: d.writeUInt(timeScale); // dwScale
rlm@3: // Used with dwRate to specify the time scale that this stream will use.
rlm@3: // Dividing dwRate by dwScale gives the number of samples per second.
rlm@3: // For video streams, this is the frame rate. For audio streams, this
rlm@3: // rate corresponds to the time needed to play nBlockAlign bytes of
rlm@3: // audio, which for PCM audio is the just the sample rate.
rlm@3:
rlm@3: d.writeUInt(frameRate); // dwRate
rlm@3: // See dwScale.
rlm@3:
rlm@3: d.writeUInt(0); // dwStart
rlm@3: // Specifies the starting time for this stream. The units are defined by
rlm@3: // the dwRate and dwScale members in the main file header. Usually, this
rlm@3: // is zero, but it can specify a delay time for a stream that does not
rlm@3: // start concurrently with the file.
rlm@3:
rlm@3: d.writeUInt(videoFrames.size()); // dwLength
rlm@3: // Specifies the length of this stream. The units are defined by the
rlm@3: // dwRate and dwScale members of the stream's header.
rlm@3:
rlm@3: d.writeUInt(bufferSize); // dwSuggestedBufferSize
rlm@3: // Specifies how large a buffer should be used to read this stream.
rlm@3: // Typically, this contains a value corresponding to the largest chunk
rlm@3: // present in the stream. Using the correct buffer size makes playback
rlm@3: // more efficient. Use zero if you do not know the correct buffer size.
rlm@3:
rlm@3: d.writeInt(-1); // dwQuality
rlm@3: // Specifies an indicator of the quality of the data in the stream.
rlm@3: // Quality is represented as a number between 0 and 10,000.
rlm@3: // For compressed data, this typically represents the value of the
rlm@3: // quality parameter passed to the compression software. If set to –1,
rlm@3: // drivers use the default quality value.
rlm@3:
rlm@3: d.writeUInt(0); // dwSampleSize
rlm@3: // Specifies the size of a single sample of data. This is set to zero
rlm@3: // if the samples can vary in size. If this number is nonzero, then
rlm@3: // multiple samples of data can be grouped into a single chunk within
rlm@3: // the file. If it is zero, each sample of data (such as a video frame)
rlm@3: // must be in a separate chunk. For video streams, this number is
rlm@3: // typically zero, although it can be nonzero if all video frames are
rlm@3: // the same size. For audio streams, this number should be the same as
rlm@3: // the nBlockAlign member of the WAVEFORMATEX structure describing the
rlm@3: // audio.
rlm@3:
rlm@3: d.writeUShort(0); // rcFrame.left
rlm@3: d.writeUShort(0); // rcFrame.top
rlm@3: d.writeUShort(imgWidth); // rcFrame.right
rlm@3: d.writeUShort(imgHeight); // rcFrame.bottom
rlm@3: // Specifies the destination rectangle for a text or video stream within
rlm@3: // the movie rectangle specified by the dwWidth and dwHeight members of
rlm@3: // the AVI main header structure. The rcFrame member is typically used
rlm@3: // in support of multiple video streams. Set this rectangle to the
rlm@3: // coordinates corresponding to the movie rectangle to update the whole
rlm@3: // movie rectangle. Units for this member are pixels. The upper-left
rlm@3: // corner of the destination rectangle is relative to the upper-left
rlm@3: // corner of the movie rectangle.
rlm@3:
rlm@3: /* Write BITMAPINFOHEADR Data into AVI Stream Format Chunk
rlm@3: /* -------------
rlm@3: * see http://msdn.microsoft.com/en-us/library/ms779712(VS.85).aspx
rlm@3: typedef struct tagBITMAPINFOHEADER {
rlm@3: DWORD biSize;
rlm@3: LONG biWidth;
rlm@3: LONG biHeight;
rlm@3: WORD biPlanes;
rlm@3: WORD biBitCount;
rlm@3: DWORD biCompression;
rlm@3: DWORD biSizeImage;
rlm@3: LONG biXPelsPerMeter;
rlm@3: LONG biYPelsPerMeter;
rlm@3: DWORD biClrUsed;
rlm@3: DWORD biClrImportant;
rlm@3: } BITMAPINFOHEADER;
rlm@3: */
rlm@3: strfChunk.seekToStartOfData();
rlm@3: d = strfChunk.getOutputStream();
rlm@3: d.writeUInt(40); // biSize
rlm@3: // Specifies the number of bytes required by the structure. This value
rlm@3: // does not include the size of the color table or the size of the color
rlm@3: // masks, if they are appended to the end of structure.
rlm@3:
rlm@3: d.writeInt(imgWidth); // biWidth
rlm@3: // Specifies the width of the bitmap, in pixels.
rlm@3:
rlm@3: d.writeInt(imgHeight); // biHeight
rlm@3: // Specifies the height of the bitmap, in pixels.
rlm@3: //
rlm@3: // For uncompressed RGB bitmaps, if biHeight is positive, the bitmap is
rlm@3: // a bottom-up DIB with the origin at the lower left corner. If biHeight
rlm@3: // is negative, the bitmap is a top-down DIB with the origin at the
rlm@3: // upper left corner.
rlm@3: // For YUV bitmaps, the bitmap is always top-down, regardless of the
rlm@3: // sign of biHeight. Decoders should offer YUV formats with postive
rlm@3: // biHeight, but for backward compatibility they should accept YUV
rlm@3: // formats with either positive or negative biHeight.
rlm@3: // For compressed formats, biHeight must be positive, regardless of
rlm@3: // image orientation.
rlm@3:
rlm@3: d.writeShort(1); // biPlanes
rlm@3: // Specifies the number of planes for the target device. This value must
rlm@3: // be set to 1.
rlm@3:
rlm@3: d.writeShort(imgDepth); // biBitCount
rlm@3: // Specifies the number of bits per pixel (bpp). For uncompressed
rlm@3: // formats, this value is the average number of bits per pixel. For
rlm@3: // compressed formats, this value is the implied bit depth of the
rlm@3: // uncompressed image, after the image has been decoded.
rlm@3:
rlm@3: switch (videoFormat) {
rlm@3: case RAW:
rlm@3: default:
rlm@3: d.writeInt(0); // biCompression - BI_RGB for uncompressed RGB
rlm@3: break;
rlm@3: case RLE:
rlm@3: if (imgDepth == 8) {
rlm@3: d.writeInt(1); // biCompression - BI_RLE8
rlm@3: } else if (imgDepth == 4) {
rlm@3: d.writeInt(2); // biCompression - BI_RLE4
rlm@3: } else {
rlm@3: throw new UnsupportedOperationException("RLE only supports 4-bit and 8-bit images");
rlm@3: }
rlm@3: break;
rlm@3: case JPG:
rlm@3: d.writeType("MJPG"); // biCompression - MJPG for Motion JPEG
rlm@3: break;
rlm@3: case PNG:
rlm@3: d.writeType("png "); // biCompression - png for PNG
rlm@3: break;
rlm@3: }
rlm@3: // For compressed video and YUV formats, this member is a FOURCC code,
rlm@3: // specified as a DWORD in little-endian order. For example, YUYV video
rlm@3: // has the FOURCC 'VYUY' or 0x56595559. For more information, see FOURCC
rlm@3: // Codes.
rlm@3: //
rlm@3: // For uncompressed RGB formats, the following values are possible:
rlm@3: //
rlm@3: // Value Description
rlm@3: // BI_RGB 0x00000000 Uncompressed RGB.
rlm@3: // BI_BITFIELDS 0x00000003 Uncompressed RGB with color masks.
rlm@3: // Valid for 16-bpp and 32-bpp bitmaps.
rlm@3: //
rlm@3: // Note that BI_JPG and BI_PNG are not valid video formats.
rlm@3: //
rlm@3: // For 16-bpp bitmaps, if biCompression equals BI_RGB, the format is
rlm@3: // always RGB 555. If biCompression equals BI_BITFIELDS, the format is
rlm@3: // either RGB 555 or RGB 565. Use the subtype GUID in the AM_MEDIA_TYPE
rlm@3: // structure to determine the specific RGB type.
rlm@3:
rlm@3: switch (videoFormat) {
rlm@3: case RAW:
rlm@3: d.writeInt(0); // biSizeImage
rlm@3: break;
rlm@3: case RLE:
rlm@3: case JPG:
rlm@3: case PNG:
rlm@3: default:
rlm@3: if (imgDepth == 4) {
rlm@3: d.writeInt(imgWidth * imgHeight / 2); // biSizeImage
rlm@3: } else {
rlm@3: int bytesPerPixel = Math.max(1, imgDepth / 8);
rlm@3: d.writeInt(imgWidth * imgHeight * bytesPerPixel); // biSizeImage
rlm@3: }
rlm@3: break;
rlm@3: }
rlm@3: // Specifies the size, in bytes, of the image. This can be set to 0 for
rlm@3: // uncompressed RGB bitmaps.
rlm@3:
rlm@3: d.writeInt(0); // biXPelsPerMeter
rlm@3: // Specifies the horizontal resolution, in pixels per meter, of the
rlm@3: // target device for the bitmap.
rlm@3:
rlm@3: d.writeInt(0); // biYPelsPerMeter
rlm@3: // Specifies the vertical resolution, in pixels per meter, of the target
rlm@3: // device for the bitmap.
rlm@3:
rlm@3: d.writeInt(palette == null ? 0 : palette.getMapSize()); // biClrUsed
rlm@3: // Specifies the number of color indices in the color table that are
rlm@3: // actually used by the bitmap.
rlm@3:
rlm@3: d.writeInt(0); // biClrImportant
rlm@3: // Specifies the number of color indices that are considered important
rlm@3: // for displaying the bitmap. If this value is zero, all colors are
rlm@3: // important.
rlm@3:
rlm@3: if (palette != null) {
rlm@3: for (int i = 0, n = palette.getMapSize(); i < n; ++i) {
rlm@3: /*
rlm@3: * typedef struct tagRGBQUAD {
rlm@3: BYTE rgbBlue;
rlm@3: BYTE rgbGreen;
rlm@3: BYTE rgbRed;
rlm@3: BYTE rgbReserved; // This member is reserved and must be zero.
rlm@3: } RGBQUAD;
rlm@3: */
rlm@3: d.write(palette.getBlue(i));
rlm@3: d.write(palette.getGreen(i));
rlm@3: d.write(palette.getRed(i));
rlm@3: d.write(0);
rlm@3: }
rlm@3: }
rlm@3:
rlm@3:
rlm@3: // -----------------
rlm@3: aviChunk.finish();
rlm@3: }
rlm@3: }