import com.google.zxing.common.BlackPointEstimator;
/**
- * This object implements MonochromeBitmapSource around an Android Bitmap. Rather than capturing an
- * RGB image and calculating the grey value at each pixel, we ask the camera driver for YUV data and
- * strip out the luminance channel directly. This should be faster but provides fewer bits, i.e.
- * fewer grey levels.
+ * This object implements MonochromeBitmapSource around an Android Bitmap.
*
* @author dswitkin@google.com (Daniel Switkin)
* @author srowen@google.com (Sean Owen)
}
public BitArray getBlackRow(int y, BitArray row, int startX, int getWidth) {
- if (row == null) {
+ if (row == null || row.getSize() < getWidth) {
row = new BitArray(getWidth);
} else {
row.clear();
}
int[] pixelRow = new int[getWidth];
- image.getPixels(pixelRow, 0, getWidth, startX, y, getWidth, 1);
- for (int i = 0; i < getWidth; i++) {
- if (computeRGBLuminance(pixelRow[i]) < blackPoint) {
- row.set(i);
+ image.getPixels(pixelRow, 0, getWidth, startX, y, getWidth, 1);
+
+ // If the current decoder calculated the blackPoint based on one row, assume we're trying to
+ // decode a 1D barcode, and apply some sharpening.
+ // TODO: We may want to add a fifth parameter to request the amount of shapening to be done.
+ if (lastMethod == BlackPointEstimationMethod.ROW_SAMPLING) {
+ int left = computeRGBLuminance(pixelRow[0]);
+ int center = computeRGBLuminance(pixelRow[1]);
+ for (int i = 1; i < getWidth - 1; i++) {
+ int right = computeRGBLuminance(pixelRow[i + 1]);
+ // Simple -1 4 -1 box filter with a weight of 2
+ int luminance = ((center << 2) - left - right) >> 1;
+ if (luminance < blackPoint) {
+ row.set(i);
+ }
+ left = center;
+ center = right;
+ }
+ } else {
+ for (int i = 0; i < getWidth; i++) {
+ if (computeRGBLuminance(pixelRow[i]) < blackPoint) {
+ row.set(i);
+ }
}
}
return row;
int width = image.width();
int height = image.height();
int[] histogram = new int[LUMINANCE_BUCKETS];
- float biasTowardsWhite = 1.0f;
if (method.equals(BlackPointEstimationMethod.TWO_D_SAMPLING)) {
int minDimension = width < height ? width : height;
int startI = height == minDimension ? 0 : (height - width) >> 1;
if (argument < 0 || argument >= height) {
throw new IllegalArgumentException("Row is not within the image: " + argument);
}
- biasTowardsWhite = 2.0f;
int[] pixelRow = new int[width];
image.getPixels(pixelRow, 0, width, 0, argument, width, 1);
for (int x = 0; x < width; x++) {
} else {
throw new IllegalArgumentException("Unknown method: " + method);
}
- blackPoint = BlackPointEstimator.estimate(histogram, biasTowardsWhite) << LUMINANCE_SHIFT;
+ blackPoint = BlackPointEstimator.estimate(histogram) << LUMINANCE_SHIFT;
lastMethod = method;
lastArgument = argument;
}
/**
* An optimized approximation of a more proper conversion from RGB to luminance which
* only uses shifts. See BufferedImageMonochromeBitmapSource for an original version.
+ *
+ * @param pixel An ARGB input pixel
+ * @return An eight bit luminance value
*/
private static int computeRGBLuminance(int pixel) {
// Instead of multiplying by 306, 601, 117, we multiply by 256, 512, 256, so that
// corrupts the conversion. Not significant for our purposes.
//
// But we can get even cleverer and eliminate a few shifts:
- return (((pixel & 0x00FF0000) >> 8) +
- ((pixel & 0x0000FF00) << 1) +
- ((pixel & 0x000000FF) << 8)) >> 10;
+ return (((pixel & 0x00FF0000) >> 16) +
+ ((pixel & 0x0000FF00) >> 7) +
+ ( pixel & 0x000000FF )) >> 2;
}
}
\ No newline at end of file