renderer: use the biggest integer type for masking literals (#15482)

On 32-bit system the mask would have a size of 32-bit, which is smaller
than the BiggestInt (usually 64-bit) it was masked against.

For some reason this only affect 32-bit Windows but not 32-bit Linux.
Might just be a difference in how gcc handle out of bound shifts for
Windows and Linux.
This commit is contained in:
alaviss
2020-10-03 17:39:56 -05:00
committed by GitHub
parent 2288188fe9
commit fc973b2c0c
2 changed files with 3 additions and 1 deletions

View File

@@ -336,7 +336,7 @@ proc litAux(g: TSrcGen; n: PNode, x: BiggestInt, size: int): string =
if nfBase2 in n.flags: result = "0b" & toBin(x, size * 8)
elif nfBase8 in n.flags:
var y = if size < sizeof(BiggestInt): x and ((1 shl (size*8)) - 1)
var y = if size < sizeof(BiggestInt): x and ((1.BiggestInt shl (size*8)) - 1)
else: x
result = "0o" & toOct(y, size * 3)
elif nfBase16 in n.flags: result = "0x" & toHex(x, size * 2)

View File

@@ -29,6 +29,7 @@ array[0 .. 100, int]
10
test
0o377'i8
0o000000000755'i32
1
2
3
@@ -257,6 +258,7 @@ macro toRendererBug(n): untyped =
result = newLit repr(n)
echo toRendererBug(0o377'i8)
echo toRendererBug(0o755'i32)
# bug #12129
macro foobar() =