mirror of
https://github.com/odin-lang/Odin.git
synced 2026-01-03 03:32:37 +00:00
The max digest size for the foreseeable future will be 512 bits, and the max block size is currently 1152 bits (SHA3-224). If people add more exotic hash algorithms without bumping the constants when required, tests will fail. The stream buffer will currently be 576 bytes, which is "fine" to just stick on the stack, and is a sensible multiple of the more common block size of 64 bytes.
117 lines
2.8 KiB
Odin
117 lines
2.8 KiB
Odin
package crypto_hash
|
|
|
|
/*
|
|
Copyright 2021 zhibog
|
|
Made available under the BSD-3 license.
|
|
|
|
List of contributors:
|
|
zhibog, dotbmp: Initial implementation.
|
|
*/
|
|
|
|
import "core:io"
|
|
import "core:mem"
|
|
import "core:os"
|
|
|
|
// hash_bytes will hash the given input and return the computed digest
|
|
// in a newly allocated slice.
|
|
hash_string :: proc(algorithm: Algorithm, data: string, allocator := context.allocator) -> []byte {
|
|
return hash_bytes(algorithm, transmute([]byte)(data), allocator)
|
|
}
|
|
|
|
// hash_bytes will hash the given input and return the computed digest
|
|
// in a newly allocated slice.
|
|
hash_bytes :: proc(algorithm: Algorithm, data: []byte, allocator := context.allocator) -> []byte {
|
|
dst := make([]byte, DIGEST_SIZES[algorithm], allocator)
|
|
hash_bytes_to_buffer(algorithm, data, dst)
|
|
return dst
|
|
}
|
|
|
|
// hash_string_to_buffer will hash the given input and assign the
|
|
// computed digest to the third parameter. It requires that the
|
|
// destination buffer is at least as big as the digest size.
|
|
hash_string_to_buffer :: proc(algorithm: Algorithm, data: string, hash: []byte) {
|
|
hash_bytes_to_buffer(algorithm, transmute([]byte)(data), hash)
|
|
}
|
|
|
|
// hash_bytes_to_buffer will hash the given input and write the
|
|
// computed digest into the third parameter. It requires that the
|
|
// destination buffer is at least as big as the digest size.
|
|
hash_bytes_to_buffer :: proc(algorithm: Algorithm, data, hash: []byte) {
|
|
ctx: Context
|
|
|
|
init(&ctx, algorithm)
|
|
update(&ctx, data)
|
|
final(&ctx, hash)
|
|
}
|
|
|
|
// hash_stream will incrementally fully consume a stream, and return the
|
|
// computed digest in a newly allocated slice.
|
|
hash_stream :: proc(
|
|
algorithm: Algorithm,
|
|
s: io.Stream,
|
|
allocator := context.allocator,
|
|
) -> (
|
|
[]byte,
|
|
io.Error,
|
|
) {
|
|
ctx: Context
|
|
|
|
buf: [MAX_BLOCK_SIZE * 4]byte
|
|
defer mem.zero_explicit(&buf, size_of(buf))
|
|
|
|
init(&ctx, algorithm)
|
|
|
|
loop: for {
|
|
n, err := io.read(s, buf[:])
|
|
if n > 0 {
|
|
// XXX/yawning: Can io.read return n > 0 and EOF?
|
|
update(&ctx, buf[:n])
|
|
}
|
|
#partial switch err {
|
|
case .None:
|
|
case .EOF:
|
|
break loop
|
|
case:
|
|
return nil, err
|
|
}
|
|
}
|
|
|
|
dst := make([]byte, DIGEST_SIZES[algorithm], allocator)
|
|
final(&ctx, dst)
|
|
|
|
return dst, io.Error.None
|
|
}
|
|
|
|
// hash_file will read the file provided by the given handle and return the
|
|
// computed digest in a newly allocated slice.
|
|
hash_file :: proc(
|
|
algorithm: Algorithm,
|
|
hd: os.Handle,
|
|
load_at_once := false,
|
|
allocator := context.allocator,
|
|
) -> (
|
|
[]byte,
|
|
io.Error,
|
|
) {
|
|
if !load_at_once {
|
|
return hash_stream(algorithm, os.stream_from_handle(hd), allocator)
|
|
}
|
|
|
|
buf, ok := os.read_entire_file(hd, allocator)
|
|
if !ok {
|
|
return nil, io.Error.Unknown
|
|
}
|
|
defer delete(buf, allocator)
|
|
|
|
return hash_bytes(algorithm, buf, allocator), io.Error.None
|
|
}
|
|
|
|
hash :: proc {
|
|
hash_stream,
|
|
hash_file,
|
|
hash_bytes,
|
|
hash_string,
|
|
hash_bytes_to_buffer,
|
|
hash_string_to_buffer,
|
|
}
|