--- uniform_int.hpp.orig 2004-07-26 23:43:34.000000000 -0400 +++ uniform_int.hpp 2006-03-15 09:20:32.000000000 -0500 @@ -97,21 +97,37 @@ if(result <= _range) return result + _min; } - } else { // brange > range - if(brange / _range > 4 /* quantization_cutoff */ ) { - // the new range is vastly smaller than the source range, - // so quantization effects are not relevant - return boost::uniform_smallint(_min, _max)(eng); - } else { - // use rejection method to handle cases like 0..5 -> 0..4 - for(;;) { - base_result result = eng() - bmin; - // result and range are non-negative, and result is possibly larger - // than range, so the cast is safe - if(result <= static_cast(_range)) - return result + _min; - } + } else { // brange > _range + base_result _rangep1(_range); + ++_rangep1; // _rangep1 = _range + 1. + // _range < brange, so it didn't overflow. + + base_result bucket_width = brange / _rangep1; + // Note bucket_width >= 1 + base_result cutoff = (_rangep1 * bucket_width + bmin) - 1; + if (brange % _rangep1 == (base_result) _range) { + ++bucket_width; + cutoff += _rangep1; } + // Above is equivalent to + // bucket_width = (brange + 1) / (_range + 1); + // cutoff = (_range + 1) * bucket_width + bmin - 1; + // without overflow (brange might be a maximal value). + + // We only accept a random number at most cutoff. + // Note cutoff <= bmax + base_result result; + + do { + result = eng(); + } while (result > cutoff); + // Now result-bmin is uniformly distributed in + // [0, (_range + 1) * bucket_width - 1], so + // (result-bmin)/bucket_width is uniformly distributed in + // [0, _range]. + // Also, this division uses high-order bits, instead of + // low-order bits. + return (result - bmin) / bucket_width + _min; } }