import com.google.zxing.ReaderException;
import com.google.zxing.common.BitSource;
import com.google.zxing.common.CharacterSetECI;
+import com.google.zxing.common.DecoderResult;
import java.io.UnsupportedEncodingException;
+import java.util.Vector;
/**
* <p>QR Codes can encode text as bits in one of several modes, and can use multiple modes
*
* <p>See ISO 18004:2006, 6.4.3 - 6.4.7</p>
*
- * @author srowen@google.com (Sean Owen)
+ * @author Sean Owen
*/
final class DecodedBitStreamParser {
private DecodedBitStreamParser() {
}
- static String decode(byte[] bytes, Version version) throws ReaderException {
+ static DecoderResult decode(byte[] bytes, Version version, ErrorCorrectionLevel ecLevel) throws ReaderException {
BitSource bits = new BitSource(bytes);
StringBuffer result = new StringBuffer();
CharacterSetECI currentCharacterSetECI = null;
boolean fc1InEffect = false;
+ Vector byteSegments = new Vector(1);
Mode mode;
do {
// While still another segment to read...
// OK, assume we're done. Really, a TERMINATOR mode should have been recorded here
mode = Mode.TERMINATOR;
} else {
- mode = Mode.forBits(bits.readBits(4)); // mode is encoded by 4 bits
+ try {
+ mode = Mode.forBits(bits.readBits(4)); // mode is encoded by 4 bits
+ } catch (IllegalArgumentException iae) {
+ throw ReaderException.getInstance();
+ }
}
if (!mode.equals(Mode.TERMINATOR)) {
if (mode.equals(Mode.FNC1_FIRST_POSITION) || mode.equals(Mode.FNC1_SECOND_POSITION)) {
// We do little with FNC1 except alter the parsed result a bit according to the spec
fc1InEffect = true;
+ } else if (mode.equals(Mode.STRUCTURED_APPEND)) {
+ // not really supported; all we do is ignore it
+ // Read next 8 bits (symbol sequence #) and 8 bits (parity data), then continue
+ bits.readBits(16);
} else if (mode.equals(Mode.ECI)) {
// Count doesn't apply to ECI
int value = parseECIValue(bits);
- try {
- currentCharacterSetECI = CharacterSetECI.getCharacterSetECIByValue(value);
- } catch (IllegalArgumentException iae) {
- // unsupported... just continue?
+ currentCharacterSetECI = CharacterSetECI.getCharacterSetECIByValue(value);
+ if (currentCharacterSetECI == null) {
+ throw ReaderException.getInstance();
}
} else {
// How many characters will follow, encoded in this mode?
} else if (mode.equals(Mode.ALPHANUMERIC)) {
decodeAlphanumericSegment(bits, result, count, fc1InEffect);
} else if (mode.equals(Mode.BYTE)) {
- decodeByteSegment(bits, result, count, currentCharacterSetECI);
+ decodeByteSegment(bits, result, count, currentCharacterSetECI, byteSegments);
} else if (mode.equals(Mode.KANJI)) {
decodeKanjiSegment(bits, result, count);
} else {
- throw new ReaderException("Unsupported mode indicator");
+ throw ReaderException.getInstance();
}
}
}
} while (!mode.equals(Mode.TERMINATOR));
- // I thought it wasn't allowed to leave extra bytes after the terminator but it happens
- /*
- int bitsLeft = bits.available();
- if (bitsLeft > 0) {
- if (bitsLeft > 6 || bits.readBits(bitsLeft) != 0) {
- throw new ReaderException("Excess bits or non-zero bits after terminator mode indicator");
- }
- }
- */
- return result.toString();
+ return new DecoderResult(bytes, result.toString(), byteSegments.isEmpty() ? null : byteSegments, ecLevel);
}
private static void decodeKanjiSegment(BitSource bits,
try {
result.append(new String(buffer, SHIFT_JIS));
} catch (UnsupportedEncodingException uee) {
- throw new ReaderException(SHIFT_JIS + " encoding is not supported on this device");
+ throw ReaderException.getInstance();
}
}
private static void decodeByteSegment(BitSource bits,
StringBuffer result,
int count,
- CharacterSetECI currentCharacterSetECI) throws ReaderException {
+ CharacterSetECI currentCharacterSetECI,
+ Vector byteSegments) throws ReaderException {
byte[] readBytes = new byte[count];
if (count << 3 > bits.available()) {
- throw new ReaderException("Count too large: " + count);
+ throw ReaderException.getInstance();
}
for (int i = 0; i < count; i++) {
readBytes[i] = (byte) bits.readBits(8);
try {
result.append(new String(readBytes, encoding));
} catch (UnsupportedEncodingException uce) {
- throw new ReaderException(uce.toString());
+ throw ReaderException.getInstance();
}
+ byteSegments.addElement(readBytes);
}
private static void decodeAlphanumericSegment(BitSource bits,
// Each 10 bits encodes three digits
int threeDigitsBits = bits.readBits(10);
if (threeDigitsBits >= 1000) {
- throw new ReaderException("Illegal value for 3-digit unit: " + threeDigitsBits);
+ throw ReaderException.getInstance();
}
result.append(ALPHANUMERIC_CHARS[threeDigitsBits / 100]);
result.append(ALPHANUMERIC_CHARS[(threeDigitsBits / 10) % 10]);
// Two digits left over to read, encoded in 7 bits
int twoDigitsBits = bits.readBits(7);
if (twoDigitsBits >= 100) {
- throw new ReaderException("Illegal value for 2-digit unit: " + twoDigitsBits);
+ throw ReaderException.getInstance();
}
result.append(ALPHANUMERIC_CHARS[twoDigitsBits / 10]);
result.append(ALPHANUMERIC_CHARS[twoDigitsBits % 10]);
// One digit left over to read
int digitBits = bits.readBits(4);
if (digitBits >= 10) {
- throw new ReaderException("Illegal value for digit unit: " + digitBits);
+ throw ReaderException.getInstance();
}
result.append(ALPHANUMERIC_CHARS[digitBits]);
}
// that it's UTF-8.
int length = bytes.length;
boolean canBeISO88591 = true;
+ boolean canBeShiftJIS = true;
+ int maybeDoubleByteCount = 0;
+ int maybeSingleByteKatakanaCount = 0;
+ boolean sawLatin1Supplement = false;
boolean lastWasPossibleDoubleByteStart = false;
- for (int i = 0; i < length; i++) {
+ for (int i = 0; i < length && (canBeISO88591 || canBeShiftJIS); i++) {
int value = bytes[i] & 0xFF;
- if (value >= 0x80 && value <= 0x9F && i < length - 1) {
+ if ((value == 0xC2 || value == 0xC3) && i < length - 1) {
+ // This is really a poor hack. The slightly more exotic characters people might want to put in
+ // a QR Code, by which I mean the Latin-1 supplement characters (e.g. u-umlaut) have encodings
+ // that start with 0xC2 followed by [0xA0,0xBF], or start with 0xC3 followed by [0x80,0xBF].
+ int nextValue = bytes[i + 1] & 0xFF;
+ if (nextValue <= 0xBF && ((value == 0xC2 && nextValue >= 0xA0) || (value == 0xC3 && nextValue >= 0x80))) {
+ sawLatin1Supplement = true;
+ }
+ }
+ if (value >= 0x7F && value <= 0x9F) {
canBeISO88591 = false;
- // ISO-8859-1 shouldn't use this, but before we decide it is Shift_JIS,
- // just double check that it is followed by a byte that's valid in
- // the Shift_JIS encoding
+ }
+ if (value >= 0xA1 && value <= 0xDF) {
+ // count the number of characters that might be a Shift_JIS single-byte Katakana character
+ if (!lastWasPossibleDoubleByteStart) {
+ maybeSingleByteKatakanaCount++;
+ }
+ }
+ if (!lastWasPossibleDoubleByteStart && ((value >= 0xF0 && value <= 0xFF) || value == 0x80 || value == 0xA0)) {
+ canBeShiftJIS = false;
+ }
+ if (((value >= 0x81 && value <= 0x9F) || (value >= 0xE0 && value <= 0xEF))) {
+ // These start double-byte characters in Shift_JIS. Let's see if it's followed by a valid
+ // second byte.
if (lastWasPossibleDoubleByteStart) {
// If we just checked this and the last byte for being a valid double-byte
// char, don't check starting on this byte. If this and the last byte
// ... otherwise do check to see if this plus the next byte form a valid
// double byte pair encoding a character.
lastWasPossibleDoubleByteStart = true;
- int nextValue = bytes[i + 1] & 0xFF;
- if (nextValue < 0x40 || nextValue > 0xFC) {
- return UTF8;
- }
- // There is some conflicting information out there about which bytes can follow which in
- // double-byte Shift_JIS characters. The rule above seems to be the one that matches practice.
- // The stricter rule below, however, is given by other resources.
- /*
- if ((value & 0x1) == 0) {
- // if even, next value should be in [0x9F,0xFC]
- // if not, we'll guess UTF-8
- if (nextValue < 0x9F || nextValue > 0xFC) {
- return UTF8;
- }
+ if (i >= bytes.length - 1) {
+ canBeShiftJIS = false;
} else {
- // if odd, next value should be in [0x40,0x9E]
- // if not, we'll guess UTF-8
- if (nextValue < 0x40 || nextValue > 0x9E) {
- return UTF8;
+ int nextValue = bytes[i + 1] & 0xFF;
+ if (nextValue < 0x40 || nextValue > 0xFC) {
+ canBeShiftJIS = false;
+ } else {
+ maybeDoubleByteCount++;
}
+ // There is some conflicting information out there about which bytes can follow which in
+ // double-byte Shift_JIS characters. The rule above seems to be the one that matches practice.
}
- */
}
+ } else {
+ lastWasPossibleDoubleByteStart = false;
}
}
- return canBeISO88591 ? ISO88591 : SHIFT_JIS;
+ // Distinguishing Shift_JIS and ISO-8859-1 can be a little tough. The crude heuristic is:
+ // - If we saw
+ // - at least three byte that starts a double-byte value (bytes that are rare in ISO-8859-1), or
+ // - over 5% of bytes that could be single-byte Katakana (also rare in ISO-8859-1),
+ // - and, saw no sequences that are invalid in Shift_JIS, then we conclude Shift_JIS
+ if (canBeShiftJIS && (maybeDoubleByteCount >= 3 || 20 * maybeSingleByteKatakanaCount > length)) {
+ return SHIFT_JIS;
+ }
+ // Otherwise, we default to ISO-8859-1 unless we know it can't be
+ if (!sawLatin1Supplement && canBeISO88591) {
+ return ISO88591;
+ }
+ // Otherwise, we take a wild guess with UTF-8
+ return UTF8;
}
private static int parseECIValue(BitSource bits) {