Re: New utf8string design may make UTF-8 the superior encoding
On 17 May, 22:58, Joshua Maurice <joshuamaur...@gmail.com> wrote:
On May 17, 12:25 pm, I V <ivle...@gmail.com> wrote:
[...]
Can't you just do something like the following? I understand that it is
a finite state machine in fact, but it uses no frameworks, no regular
expressions, etc. I'd expect that this is pretty good in terms of speed and
readability. It would be quite simple to add some code using bit operations
to convert from the utf8 array to Unicode code points.
//COMPLETELY UNTESTED
bool validate_utf8(unsigned char * utf8str_start, unsigned char *
utf8str_end)
{
for (unsigned char * x = utf8str_start; x != utf8str_end; )
{
if ((*x & 0x80) == 0)
{
++x;
}
else if ((*x & (0x80 + 0x40 + 0x20)) == (0x80 + 0x40))
{
if (++x == utf8str_end || (*x & (0x80 + 0x40)) != (0x80))
return false;
++x;
}
else if ((*x & (0x80 + 0x40 + 0x20 + 0x10)) == (0x80 + 0x40 +
0x20))
{
if (++x == utf8str_end || (*x & (0x80 + 0x40)) != (0x80))
return false;
if (++x == utf8str_end || (*x & (0x80 + 0x40)) != (0x80))
return false;
++x;
}
else if ((*x & (0x80 + 0x40 + 0x20 + 0x10 + 0x08)) == (0x80 + 0x40
+ 0x20 + 0x10))
{
if (++x == utf8str_end || (*x & (0x80 + 0x40)) != (0x80))
return false;
if (++x == utf8str_end || (*x & (0x80 + 0x40)) != (0x80))
return false;
if (++x == utf8str_end || (*x & (0x80 + 0x40)) != (0x80))
return false;
++x;
} else
return false;
}
return true;
}
First, this doesn't actually "validate" UTF-8, since it accepts
non-minimal sequences, encodings for surrogates, etc. I'm not
sure that this is really an issue, however, since you'd normally
only do such validation on input (when IO speed dominates). For
the rest, I'd still use a loop:
int byteCount = byteCountTable[*p];
if (byteCount == 0) {
error...
} else {
++ p;
-- byteCount;
while (byteCount != 0) {
if ((*p & 0xC0) != 0x80) {
error...
}
++ p;
-- byteCount;
}
}
I don't know if it's faster or slower than yours (although both
are almost certainly faster than a DFA), but you can't get much
simpler. (Note that you can easily modify it to generate UTF-32
on the fly, almost as quickly, thus killing two birds with one
stone.)
My complete conversion routine is:
//!@cond implementation
struct EncodingInfos
{
CodePoint limitValue ;
Byte firstBytePrefix ;
Byte firstByteMask ;
} ;
extern EncodingInfos const
infoTable[ 7 ] ;
extern size_t const byteCountTable[] ;
Byte const nextBytePrefix = 0x80U ;
Byte const nextByteDataMask = 0x3FU ;
Byte const nextBytePrefixMask = 0xC0U ;
int const nextByteShift = 6 ;
//!@endcond
template< typename InputIterator >
CodePoint
codePoint(
InputIterator begin,
InputIterator end )
{
size_t byteCount = begin != end
? size( *begin )
: 0 ;
EncodingInfos const* const
info = infoTable + byteCount ;
CodePoint result = byteCount > 0
? *begin ++ & info->firstByteMask
: error ;
while ( result != error && -- byteCount > 0 ) {
if ( begin == end
|| (*begin & nextBytePrefixMask) != nextBytePrefix ) {
result = error ;
} else {
result = (result << nextByteShift)
| (*begin ++ & nextByteDataMask) ;
}
}
if ( result != error ) {
if ( result < (info - 1)->limitValue || result >= info-
limitValue ) {
result = error ;
}
}
return result ;
}
Note that this still lets encodings for surrogates through, but
it catches most of the rest. (And testing for surrogates is
simple once you have the UTF-32.)
--
James Kanze