diff --git a/changelog.md b/changelog.md index cb87e960e9..ea2c8a927d 100644 --- a/changelog.md +++ b/changelog.md @@ -45,6 +45,7 @@ - A bug allowed `macro foo(): int = 123` to compile even though a macros has to return a `NimNode`. This has been fixed. +- With the exception of `uint` and `uint64`, conversion to unsigned types are now range checked during runtime. #### Breaking changes in the standard library diff --git a/compiler/ccgexprs.nim b/compiler/ccgexprs.nim index 2991ca2e26..2312ce9fd7 100644 --- a/compiler/ccgexprs.nim +++ b/compiler/ccgexprs.nim @@ -1959,9 +1959,7 @@ proc genCast(p: BProc, e: PNode, d: var TLoc) = proc genRangeChck(p: BProc, n: PNode, d: var TLoc, magic: string) = var a: TLoc var dest = skipTypes(n.typ, abstractVar) - # range checks for unsigned turned out to be buggy and annoying: - if optRangeCheck notin p.options or dest.skipTypes({tyRange}).kind in - {tyUInt..tyUInt64}: + if optRangeCheck notin p.options: initLocExpr(p, n.sons[0], a) putIntoDest(p, d, n, "(($1) ($2))" % [getTypeDesc(p.module, dest), rdCharLoc(a)], a.storage) diff --git a/compiler/lexer.nim b/compiler/lexer.nim index 0dd6245b02..52559dad51 100644 --- a/compiler/lexer.nim +++ b/compiler/lexer.nim @@ -538,9 +538,9 @@ proc getNumber(L: var TLexer, result: var TToken) = of tkInt16Lit: result.iNumber = BiggestInt(int16(toU16(int(xi)))) of tkInt32Lit: result.iNumber = BiggestInt(int32(toU32(int64(xi)))) of tkUIntLit, tkUInt64Lit: result.iNumber = xi - of tkUInt8Lit: result.iNumber = BiggestInt(uint8(toU8(int(xi)))) - of tkUInt16Lit: result.iNumber = BiggestInt(uint16(toU16(int(xi)))) - of tkUInt32Lit: result.iNumber = BiggestInt(uint32(toU32(int64(xi)))) + of tkUInt8Lit: result.iNumber = BiggestInt(cast[uint8](toU8(int(xi)))) + of tkUInt16Lit: result.iNumber = BiggestInt(cast[uint16](toU16(int(xi)))) + of tkUInt32Lit: result.iNumber = BiggestInt(cast[uint32](toU32(int64(xi)))) of tkFloat32Lit: result.fNumber = (cast[PFloat32](addr(xi)))[] # note: this code is endian neutral! diff --git a/lib/pure/bitops.nim b/lib/pure/bitops.nim index b32b5dc671..51e4a6c6a0 100644 --- a/lib/pure/bitops.nim +++ b/lib/pure/bitops.nim @@ -33,6 +33,12 @@ const useICC_builtins = defined(icc) and useBuiltins const useVCC_builtins = defined(vcc) and useBuiltins const arch64 = sizeof(int) == 8 +template toUnsigned(x: int8): uint8 = cast[uint8](x) +template toUnsigned(x: int16): uint16 = cast[uint16](x) +template toUnsigned(x: int32): uint32 = cast[uint32](x) +template toUnsigned(x: int64): uint64 = cast[uint64](x) +template toUnsigned(x: int): uint = cast[uint](x) + template forwardImpl(impl, arg) {.dirty.} = when sizeof(x) <= 4: when x is SomeSignedInt: @@ -242,6 +248,8 @@ proc countSetBits*(x: SomeInteger): int {.inline, nosideeffect.} = ## Counts the set bits in integer. (also called `Hamming weight`:idx:.) # TODO: figure out if ICC support _popcnt32/_popcnt64 on platform without POPCNT. # like GCC and MSVC + when x is SomeSignedInt: + let x = x.toUnsigned when nimvm: result = forwardImpl(countSetBits_nim, x) else: @@ -272,6 +280,8 @@ proc parityBits*(x: SomeInteger): int {.inline, nosideeffect.} = ## is odd parity is 1, otherwise 0. # Can be used a base if creating ASM version. # https://stackoverflow.com/questions/21617970/how-to-check-if-value-has-even-parity-of-bits-or-odd + when x is SomeSignedInt: + let x = x.toUnsigned when nimvm: result = forwardImpl(parity_impl, x) else: @@ -287,6 +297,8 @@ proc firstSetBit*(x: SomeInteger): int {.inline, nosideeffect.} = ## If `x` is zero, when ``noUndefinedBitOpts`` is set, result is 0, ## otherwise result is undefined. # GCC builtin 'builtin_ffs' already handle zero input. + when x is SomeSignedInt: + let x = x.toUnsigned when nimvm: when noUndefined: if x == 0: @@ -321,6 +333,8 @@ proc fastLog2*(x: SomeInteger): int {.inline, nosideeffect.} = ## Quickly find the log base 2 of an integer. ## If `x` is zero, when ``noUndefinedBitOpts`` is set, result is -1, ## otherwise result is undefined. + when x is SomeSignedInt: + let x = x.toUnsigned when noUndefined: if x == 0: return -1 @@ -352,6 +366,8 @@ proc countLeadingZeroBits*(x: SomeInteger): int {.inline, nosideeffect.} = ## Returns the number of leading zero bits in integer. ## If `x` is zero, when ``noUndefinedBitOpts`` is set, result is 0, ## otherwise result is undefined. + when x is SomeSignedInt: + let x = x.toUnsigned when noUndefined: if x == 0: return 0 @@ -369,6 +385,8 @@ proc countTrailingZeroBits*(x: SomeInteger): int {.inline, nosideeffect.} = ## Returns the number of trailing zeros in integer. ## If `x` is zero, when ``noUndefinedBitOpts`` is set, result is 0, ## otherwise result is undefined. + when x is SomeSignedInt: + let x = x.toUnsigned when noUndefined: if x == 0: return 0 diff --git a/tests/misc/tsizeof3.nim b/tests/misc/tsizeof3.nim index 50ad883713..e41e0a2688 100644 --- a/tests/misc/tsizeof3.nim +++ b/tests/misc/tsizeof3.nim @@ -13,7 +13,7 @@ proc toByteArrayBE*[T: SomeInteger](num: T): ByteArrayBE[sizeof(T)]= ## Notice the result type const N = T.sizeof for i in 0 ..< N: - result[i] = byte(num shr ((N-1-i) * 8)) + result[i] = byte((num shr ((N-1-i) * 8)) and high(int8)) let a = 12345.toByteArrayBE echo a[^2 .. ^1] # to make it work on both 32-bit and 64-bit