private final int mDataWidth;
private final int mCropTop;
private final int mCropLeft;
- private final int mCropBottom;
- private final int mCropRight;
/**
* Builds an object around a YUV buffer from the camera. The image is not cropped.
int cropLeft,
int cropBottom,
int cropRight) {
+ super(cropBottom - cropTop, cropRight - cropLeft);
if (cropRight - cropLeft > dataWidth || cropBottom - cropTop > dataHeight) {
throw new IllegalArgumentException();
}
mDataWidth = dataWidth;
this.mCropTop = cropTop;
this.mCropLeft = cropLeft;
- this.mCropBottom = cropBottom;
- this.mCropRight = cropRight;
- }
-
- @Override
- public int getHeight() {
- return mCropBottom - mCropTop;
- }
-
- @Override
- public int getWidth() {
- return mCropRight - mCropLeft;
}
/**
public final class RGBMonochromeBitmapSource extends BaseMonochromeBitmapSource {
- private final int mWidth;
- private final int mHeight;
private final byte[] mLuminances;
public RGBMonochromeBitmapSource(String path) throws FileNotFoundException {
+ this(loadBitmap(path));
+ }
+
+ private static Bitmap loadBitmap(String path) throws FileNotFoundException {
Bitmap bitmap = BitmapFactory.decodeFile(path);
if (bitmap == null) {
throw new FileNotFoundException("Couldn't open " + path);
}
+ return bitmap;
+ }
+ public RGBMonochromeBitmapSource(Bitmap bitmap) {
+ super(bitmap.getHeight(), bitmap.getWidth());
int width = bitmap.getWidth();
int height = bitmap.getHeight();
int[] pixels = new int[width * height];
// In order to measure pure decoding speed, we convert the entire image to a greyscale array up
// front, which is the same as the Y channel of the YUVMonochromeBitmapSource in the real app.
mLuminances = new byte[width * height];
- mWidth = width;
- mHeight = height;
for (int y = 0; y < height; y++) {
int offset = y * height;
for (int x = 0; x < width; x++) {
}
}
- @Override
- public int getHeight() {
- return mHeight;
- }
-
- @Override
- public int getWidth() {
- return mWidth;
- }
-
@Override
protected int getLuminance(int x, int y) {
- return mLuminances[y * mWidth + x] & 0xff;
+ return mLuminances[y * getWidth() + x] & 0xff;
}
@Override
protected int[] getLuminanceRow(int y, int[] row) {
- int width = mWidth;
+ int width = getWidth();
if (row == null || row.length < width) {
row = new int[width];
}
@Override
protected int[] getLuminanceColumn(int x, int[] column) {
- int width = mWidth;
- int height = mHeight;
+ int width = getWidth();
+ int height = getHeight();
if (column == null || column.length < height) {
column = new int[height];
}
*/
public final class AWTImageMonochromeBitmapSource extends BaseMonochromeBitmapSource {
- private final int height;
- private final int width;
private final int[] pixels;
public AWTImageMonochromeBitmapSource(Image image) throws ReaderException {
- height = image.getHeight(null);
- width = image.getWidth(null);
+ super(image.getHeight(null), image.getWidth(null));
+ int height = getHeight();
+ int width = getWidth();
pixels = new int[height * width];
// Seems best in this situation to grab all pixels upfront. Grabbing any individual pixel
// entails creating a relatively expensive object and calling through several methods.
}
}
- public int getHeight() {
- return height;
- }
-
- public int getWidth() {
- return width;
- }
-
/**
* See <code>com.google.zxing.client.j2me.LCDUIImageMonochromeBitmapSource</code> for more explanation
* of the computation used in this method.
*/
protected int getLuminance(int x, int y) {
- int pixel = pixels[y * width + x];
+ int pixel = pixels[y * getWidth() + x];
return (((pixel & 0x00FF0000) >> 16) +
((pixel & 0x0000FF00) >> 7) +
(pixel & 0x000000FF )) >> 2;
}
protected int[] getLuminanceRow(int y, int[] row) {
+ int width = getWidth();
if (row == null || row.length < width) {
row = new int[width];
}
}
protected int[] getLuminanceColumn(int x, int[] column) {
+ int height = getHeight();
+ int width = getWidth();
if (column == null || column.length < height) {
column = new int[height];
}
private static final int LUMINANCE_SHIFT = 8 - LUMINANCE_BITS;
private static final int LUMINANCE_BUCKETS = 1 << LUMINANCE_BITS;
+ private final int height;
+ private final int width;
private int blackPoint;
private BlackPointEstimationMethod lastMethod;
private int lastArgument;
private int[] luminances;
- protected BaseMonochromeBitmapSource() {
+ protected BaseMonochromeBitmapSource(int height, int width) {
+ this.height = height;
+ this.width = width;
blackPoint = 0x7F;
lastMethod = null;
lastArgument = 0;
return false;
}
- // These two methods should not need to exist because they are defined in the interface that
- // this abstract class implements. However this seems to cause problems on some Nokias.
- // So we write these redundant declarations.
+ public final int getHeight() {
+ return height;
+ }
- public abstract int getHeight();
+ public final int getWidth() {
+ return width;
+ }
- public abstract int getWidth();
+ // These methods below should not need to exist because they are defined in the interface that
+ // this abstract class implements. However this seems to cause problems on some Nokias.
+ // So we write these redundant declarations.
/**
* Retrieves the luminance at the pixel x,y in the bitmap. This method is only used for estimating
public final class LCDUIImageMonochromeBitmapSource extends BaseMonochromeBitmapSource {
private final Image image;
- private final int height;
- private final int width;
private final int[] pixelHolder;
public LCDUIImageMonochromeBitmapSource(Image image) {
+ super(image.getHeight(), image.getWidth());
this.image = image;
- height = image.getHeight();
- width = image.getWidth();
pixelHolder = new int[1];
}
- public int getHeight() {
- return height;
- }
-
- public int getWidth() {
- return width;
- }
-
// This is expensive and should be used very sparingly.
protected int getLuminance(int x, int y) {
- image.getRGB(pixelHolder, 0, width, x, y, 1, 1);
+ image.getRGB(pixelHolder, 0, getWidth(), x, y, 1, 1);
int pixel = pixelHolder[0];
// Instead of multiplying by 306, 601, 117, we multiply by 256, 512, 256, so that
// For efficiency, the RGB data and the luminance data share the same array.
protected int[] getLuminanceRow(int y, int[] row) {
+ int width = getWidth();
if (row == null || row.length < width) {
row = new int[width];
}
}
protected int[] getLuminanceColumn(int x, int[] column) {
+ int height = getHeight();
if (column == null || column.length < height) {
column = new int[height];
}
private final BufferedImage image;
private final int left;
private final int top;
- private final int width;
- private final int height;
/**
* Creates an instance that uses the entire given image as a source of pixels to decode.
* @param bottom likewise, one more than the y coordinate of the bottommost pixels to decode
*/
public BufferedImageMonochromeBitmapSource(BufferedImage image, int left, int top, int right, int bottom) {
+ super(bottom - top, right - left);
this.image = image;
int sourceHeight = image.getHeight();
int sourceWidth = image.getWidth();
}
this.left = left;
this.top = top;
- this.width = right - left;
- this.height = bottom - top;
}
/**
return image;
}
- @Override
- public int getHeight() {
- return height;
- }
-
- @Override
- public int getWidth() {
- return width;
- }
-
@Override
public MonochromeBitmapSource rotateCounterClockwise() {
if (!isRotateSupported()) {
op.filter(image, rotatedImage);
return new BufferedImageMonochromeBitmapSource(rotatedImage,
top,
- sourceWidth - (left + width),
- top + height,
+ sourceWidth - (left + getWidth()),
+ top + getHeight(),
sourceWidth - left);
}
@Override
protected int[] getLuminanceRow(int y, int[] row) {
+ int width = getWidth();
if (row == null || row.length < width) {
row = new int[width];
}
@Override
protected int[] getLuminanceColumn(int x, int[] column) {
+ int height = getHeight();
if (column == null || column.length < height) {
column = new int[height];
}