mirror of
				https://github.com/go-gitea/gitea.git
				synced 2025-10-26 12:27:06 +00:00 
			
		
		
		
	Integrate public as bindata optionally (#293)
* Dropped unused codekit config * Integrated dynamic and static bindata for public * Ignore public bindata * Add a general generate make task * Integrated flexible public assets into web command * Updated vendoring, added all missiong govendor deps * Made the linter happy with the bindata and dynamic code * Moved public bindata definition to modules directory * Ignoring the new bindata path now * Updated to the new public modules import path * Updated public bindata command and drop the new prefix
This commit is contained in:
		 Thomas Boerger
					Thomas Boerger
				
			
				
					committed by
					
						 Lunny Xiao
						Lunny Xiao
					
				
			
			
				
	
			
			
			 Lunny Xiao
						Lunny Xiao
					
				
			
						parent
						
							4680c349dd
						
					
				
				
					commit
					b6a95a8cb3
				
			
							
								
								
									
										15
									
								
								vendor/github.com/golang/snappy/AUTHORS
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								vendor/github.com/golang/snappy/AUTHORS
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,15 @@ | ||||
| # This is the official list of Snappy-Go authors for copyright purposes. | ||||
| # This file is distinct from the CONTRIBUTORS files. | ||||
| # See the latter for an explanation. | ||||
|  | ||||
| # Names should be added to this file as | ||||
| #	Name or Organization <email address> | ||||
| # The email address is not required for organizations. | ||||
|  | ||||
| # Please keep the list sorted. | ||||
|  | ||||
| Damian Gryski <dgryski@gmail.com> | ||||
| Google Inc. | ||||
| Jan Mercl <0xjnml@gmail.com> | ||||
| Rodolfo Carvalho <rhcarvalho@gmail.com> | ||||
| Sebastien Binet <seb.binet@gmail.com> | ||||
							
								
								
									
										37
									
								
								vendor/github.com/golang/snappy/CONTRIBUTORS
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								vendor/github.com/golang/snappy/CONTRIBUTORS
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,37 @@ | ||||
| # This is the official list of people who can contribute | ||||
| # (and typically have contributed) code to the Snappy-Go repository. | ||||
| # The AUTHORS file lists the copyright holders; this file | ||||
| # lists people.  For example, Google employees are listed here | ||||
| # but not in AUTHORS, because Google holds the copyright. | ||||
| # | ||||
| # The submission process automatically checks to make sure | ||||
| # that people submitting code are listed in this file (by email address). | ||||
| # | ||||
| # Names should be added to this file only after verifying that | ||||
| # the individual or the individual's organization has agreed to | ||||
| # the appropriate Contributor License Agreement, found here: | ||||
| # | ||||
| #     http://code.google.com/legal/individual-cla-v1.0.html | ||||
| #     http://code.google.com/legal/corporate-cla-v1.0.html | ||||
| # | ||||
| # The agreement for individuals can be filled out on the web. | ||||
| # | ||||
| # When adding J Random Contributor's name to this file, | ||||
| # either J's name or J's organization's name should be | ||||
| # added to the AUTHORS file, depending on whether the | ||||
| # individual or corporate CLA was used. | ||||
|  | ||||
| # Names should be added to this file like so: | ||||
| #     Name <email address> | ||||
|  | ||||
| # Please keep the list sorted. | ||||
|  | ||||
| Damian Gryski <dgryski@gmail.com> | ||||
| Jan Mercl <0xjnml@gmail.com> | ||||
| Kai Backman <kaib@golang.org> | ||||
| Marc-Antoine Ruel <maruel@chromium.org> | ||||
| Nigel Tao <nigeltao@golang.org> | ||||
| Rob Pike <r@golang.org> | ||||
| Rodolfo Carvalho <rhcarvalho@gmail.com> | ||||
| Russ Cox <rsc@golang.org> | ||||
| Sebastien Binet <seb.binet@gmail.com> | ||||
							
								
								
									
										27
									
								
								vendor/github.com/golang/snappy/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								vendor/github.com/golang/snappy/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,27 @@ | ||||
| Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. | ||||
|  | ||||
| Redistribution and use in source and binary forms, with or without | ||||
| modification, are permitted provided that the following conditions are | ||||
| met: | ||||
|  | ||||
|    * Redistributions of source code must retain the above copyright | ||||
| notice, this list of conditions and the following disclaimer. | ||||
|    * Redistributions in binary form must reproduce the above | ||||
| copyright notice, this list of conditions and the following disclaimer | ||||
| in the documentation and/or other materials provided with the | ||||
| distribution. | ||||
|    * Neither the name of Google Inc. nor the names of its | ||||
| contributors may be used to endorse or promote products derived from | ||||
| this software without specific prior written permission. | ||||
|  | ||||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||||
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||||
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||||
| OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||||
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||||
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||||
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||||
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
							
								
								
									
										7
									
								
								vendor/github.com/golang/snappy/README
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								vendor/github.com/golang/snappy/README
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,7 @@ | ||||
| The Snappy compression format in the Go programming language. | ||||
|  | ||||
| To download and install from source: | ||||
| $ go get github.com/golang/snappy | ||||
|  | ||||
| Unless otherwise noted, the Snappy-Go source files are distributed | ||||
| under the BSD-style license found in the LICENSE file. | ||||
							
								
								
									
										237
									
								
								vendor/github.com/golang/snappy/decode.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										237
									
								
								vendor/github.com/golang/snappy/decode.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,237 @@ | ||||
| // Copyright 2011 The Snappy-Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package snappy | ||||
|  | ||||
| import ( | ||||
| 	"encoding/binary" | ||||
| 	"errors" | ||||
| 	"io" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	// ErrCorrupt reports that the input is invalid. | ||||
| 	ErrCorrupt = errors.New("snappy: corrupt input") | ||||
| 	// ErrTooLarge reports that the uncompressed length is too large. | ||||
| 	ErrTooLarge = errors.New("snappy: decoded block is too large") | ||||
| 	// ErrUnsupported reports that the input isn't supported. | ||||
| 	ErrUnsupported = errors.New("snappy: unsupported input") | ||||
|  | ||||
| 	errUnsupportedCopy4Tag      = errors.New("snappy: unsupported COPY_4 tag") | ||||
| 	errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") | ||||
| ) | ||||
|  | ||||
| // DecodedLen returns the length of the decoded block. | ||||
| func DecodedLen(src []byte) (int, error) { | ||||
| 	v, _, err := decodedLen(src) | ||||
| 	return v, err | ||||
| } | ||||
|  | ||||
| // decodedLen returns the length of the decoded block and the number of bytes | ||||
| // that the length header occupied. | ||||
| func decodedLen(src []byte) (blockLen, headerLen int, err error) { | ||||
| 	v, n := binary.Uvarint(src) | ||||
| 	if n <= 0 || v > 0xffffffff { | ||||
| 		return 0, 0, ErrCorrupt | ||||
| 	} | ||||
|  | ||||
| 	const wordSize = 32 << (^uint(0) >> 32 & 1) | ||||
| 	if wordSize == 32 && v > 0x7fffffff { | ||||
| 		return 0, 0, ErrTooLarge | ||||
| 	} | ||||
| 	return int(v), n, nil | ||||
| } | ||||
|  | ||||
| const ( | ||||
| 	decodeErrCodeCorrupt                  = 1 | ||||
| 	decodeErrCodeUnsupportedLiteralLength = 2 | ||||
| 	decodeErrCodeUnsupportedCopy4Tag      = 3 | ||||
| ) | ||||
|  | ||||
| // Decode returns the decoded form of src. The returned slice may be a sub- | ||||
| // slice of dst if dst was large enough to hold the entire decoded block. | ||||
| // Otherwise, a newly allocated slice will be returned. | ||||
| // | ||||
| // The dst and src must not overlap. It is valid to pass a nil dst. | ||||
| func Decode(dst, src []byte) ([]byte, error) { | ||||
| 	dLen, s, err := decodedLen(src) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	if dLen <= len(dst) { | ||||
| 		dst = dst[:dLen] | ||||
| 	} else { | ||||
| 		dst = make([]byte, dLen) | ||||
| 	} | ||||
| 	switch decode(dst, src[s:]) { | ||||
| 	case 0: | ||||
| 		return dst, nil | ||||
| 	case decodeErrCodeUnsupportedLiteralLength: | ||||
| 		return nil, errUnsupportedLiteralLength | ||||
| 	case decodeErrCodeUnsupportedCopy4Tag: | ||||
| 		return nil, errUnsupportedCopy4Tag | ||||
| 	} | ||||
| 	return nil, ErrCorrupt | ||||
| } | ||||
|  | ||||
| // NewReader returns a new Reader that decompresses from r, using the framing | ||||
| // format described at | ||||
| // https://github.com/google/snappy/blob/master/framing_format.txt | ||||
| func NewReader(r io.Reader) *Reader { | ||||
| 	return &Reader{ | ||||
| 		r:       r, | ||||
| 		decoded: make([]byte, maxBlockSize), | ||||
| 		buf:     make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Reader is an io.Reader that can read Snappy-compressed bytes. | ||||
| type Reader struct { | ||||
| 	r       io.Reader | ||||
| 	err     error | ||||
| 	decoded []byte | ||||
| 	buf     []byte | ||||
| 	// decoded[i:j] contains decoded bytes that have not yet been passed on. | ||||
| 	i, j       int | ||||
| 	readHeader bool | ||||
| } | ||||
|  | ||||
| // Reset discards any buffered data, resets all state, and switches the Snappy | ||||
| // reader to read from r. This permits reusing a Reader rather than allocating | ||||
| // a new one. | ||||
| func (r *Reader) Reset(reader io.Reader) { | ||||
| 	r.r = reader | ||||
| 	r.err = nil | ||||
| 	r.i = 0 | ||||
| 	r.j = 0 | ||||
| 	r.readHeader = false | ||||
| } | ||||
|  | ||||
| func (r *Reader) readFull(p []byte) (ok bool) { | ||||
| 	if _, r.err = io.ReadFull(r.r, p); r.err != nil { | ||||
| 		if r.err == io.ErrUnexpectedEOF { | ||||
| 			r.err = ErrCorrupt | ||||
| 		} | ||||
| 		return false | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // Read satisfies the io.Reader interface. | ||||
| func (r *Reader) Read(p []byte) (int, error) { | ||||
| 	if r.err != nil { | ||||
| 		return 0, r.err | ||||
| 	} | ||||
| 	for { | ||||
| 		if r.i < r.j { | ||||
| 			n := copy(p, r.decoded[r.i:r.j]) | ||||
| 			r.i += n | ||||
| 			return n, nil | ||||
| 		} | ||||
| 		if !r.readFull(r.buf[:4]) { | ||||
| 			return 0, r.err | ||||
| 		} | ||||
| 		chunkType := r.buf[0] | ||||
| 		if !r.readHeader { | ||||
| 			if chunkType != chunkTypeStreamIdentifier { | ||||
| 				r.err = ErrCorrupt | ||||
| 				return 0, r.err | ||||
| 			} | ||||
| 			r.readHeader = true | ||||
| 		} | ||||
| 		chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 | ||||
| 		if chunkLen > len(r.buf) { | ||||
| 			r.err = ErrUnsupported | ||||
| 			return 0, r.err | ||||
| 		} | ||||
|  | ||||
| 		// The chunk types are specified at | ||||
| 		// https://github.com/google/snappy/blob/master/framing_format.txt | ||||
| 		switch chunkType { | ||||
| 		case chunkTypeCompressedData: | ||||
| 			// Section 4.2. Compressed data (chunk type 0x00). | ||||
| 			if chunkLen < checksumSize { | ||||
| 				r.err = ErrCorrupt | ||||
| 				return 0, r.err | ||||
| 			} | ||||
| 			buf := r.buf[:chunkLen] | ||||
| 			if !r.readFull(buf) { | ||||
| 				return 0, r.err | ||||
| 			} | ||||
| 			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 | ||||
| 			buf = buf[checksumSize:] | ||||
|  | ||||
| 			n, err := DecodedLen(buf) | ||||
| 			if err != nil { | ||||
| 				r.err = err | ||||
| 				return 0, r.err | ||||
| 			} | ||||
| 			if n > len(r.decoded) { | ||||
| 				r.err = ErrCorrupt | ||||
| 				return 0, r.err | ||||
| 			} | ||||
| 			if _, err := Decode(r.decoded, buf); err != nil { | ||||
| 				r.err = err | ||||
| 				return 0, r.err | ||||
| 			} | ||||
| 			if crc(r.decoded[:n]) != checksum { | ||||
| 				r.err = ErrCorrupt | ||||
| 				return 0, r.err | ||||
| 			} | ||||
| 			r.i, r.j = 0, n | ||||
| 			continue | ||||
|  | ||||
| 		case chunkTypeUncompressedData: | ||||
| 			// Section 4.3. Uncompressed data (chunk type 0x01). | ||||
| 			if chunkLen < checksumSize { | ||||
| 				r.err = ErrCorrupt | ||||
| 				return 0, r.err | ||||
| 			} | ||||
| 			buf := r.buf[:checksumSize] | ||||
| 			if !r.readFull(buf) { | ||||
| 				return 0, r.err | ||||
| 			} | ||||
| 			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 | ||||
| 			// Read directly into r.decoded instead of via r.buf. | ||||
| 			n := chunkLen - checksumSize | ||||
| 			if !r.readFull(r.decoded[:n]) { | ||||
| 				return 0, r.err | ||||
| 			} | ||||
| 			if crc(r.decoded[:n]) != checksum { | ||||
| 				r.err = ErrCorrupt | ||||
| 				return 0, r.err | ||||
| 			} | ||||
| 			r.i, r.j = 0, n | ||||
| 			continue | ||||
|  | ||||
| 		case chunkTypeStreamIdentifier: | ||||
| 			// Section 4.1. Stream identifier (chunk type 0xff). | ||||
| 			if chunkLen != len(magicBody) { | ||||
| 				r.err = ErrCorrupt | ||||
| 				return 0, r.err | ||||
| 			} | ||||
| 			if !r.readFull(r.buf[:len(magicBody)]) { | ||||
| 				return 0, r.err | ||||
| 			} | ||||
| 			for i := 0; i < len(magicBody); i++ { | ||||
| 				if r.buf[i] != magicBody[i] { | ||||
| 					r.err = ErrCorrupt | ||||
| 					return 0, r.err | ||||
| 				} | ||||
| 			} | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		if chunkType <= 0x7f { | ||||
| 			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). | ||||
| 			r.err = ErrUnsupported | ||||
| 			return 0, r.err | ||||
| 		} | ||||
| 		// Section 4.4 Padding (chunk type 0xfe). | ||||
| 		// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). | ||||
| 		if !r.readFull(r.buf[:chunkLen]) { | ||||
| 			return 0, r.err | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										10
									
								
								vendor/github.com/golang/snappy/decode_amd64.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								vendor/github.com/golang/snappy/decode_amd64.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,10 @@ | ||||
| // Copyright 2016 The Snappy-Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package snappy | ||||
|  | ||||
| // decode has the same semantics as in decode_other.go. | ||||
| // | ||||
| //go:noescape | ||||
| func decode(dst, src []byte) int | ||||
							
								
								
									
										472
									
								
								vendor/github.com/golang/snappy/decode_amd64.s
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										472
									
								
								vendor/github.com/golang/snappy/decode_amd64.s
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,472 @@ | ||||
| // Copyright 2016 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| #include "textflag.h" | ||||
|  | ||||
| // func decode(dst, src []byte) int | ||||
| // | ||||
| // The asm code generally follows the pure Go code in decode_other.go, except | ||||
| // where marked with a "!!!". | ||||
| // | ||||
| // All local variables fit into registers. The non-zero stack size is only to | ||||
| // spill registers and push args when issuing a CALL. The register allocation: | ||||
| //	- AX	scratch | ||||
| //	- BX	scratch | ||||
| //	- CX	length or x | ||||
| //	- DX	offset | ||||
| //	- SI	&src[s] | ||||
| //	- DI	&dst[d] | ||||
| //	+ R8	dst_base | ||||
| //	+ R9	dst_len | ||||
| //	+ R10	dst_base + dst_len | ||||
| //	+ R11	src_base | ||||
| //	+ R12	src_len | ||||
| //	+ R13	src_base + src_len | ||||
| //	- R14	used by doCopy | ||||
| //	- R15	used by doCopy | ||||
| // | ||||
| // The registers R8-R13 (marked with a "+") are set at the start of the | ||||
| // function, and after a CALL returns, and are not otherwise modified. | ||||
| // | ||||
| // The d variable is implicitly DI - R8,  and len(dst)-d is R10 - DI. | ||||
| // The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. | ||||
| TEXT ·decode(SB), NOSPLIT, $48-56 | ||||
| 	// Initialize SI, DI and R8-R13. | ||||
| 	MOVQ dst_base+0(FP), R8 | ||||
| 	MOVQ dst_len+8(FP), R9 | ||||
| 	MOVQ R8, DI | ||||
| 	MOVQ R8, R10 | ||||
| 	ADDQ R9, R10 | ||||
| 	MOVQ src_base+24(FP), R11 | ||||
| 	MOVQ src_len+32(FP), R12 | ||||
| 	MOVQ R11, SI | ||||
| 	MOVQ R11, R13 | ||||
| 	ADDQ R12, R13 | ||||
|  | ||||
| loop: | ||||
| 	// for s < len(src) | ||||
| 	CMPQ SI, R13 | ||||
| 	JEQ  end | ||||
|  | ||||
| 	// CX = uint32(src[s]) | ||||
| 	// | ||||
| 	// switch src[s] & 0x03 | ||||
| 	MOVBLZX (SI), CX | ||||
| 	MOVL    CX, BX | ||||
| 	ANDL    $3, BX | ||||
| 	CMPL    BX, $1 | ||||
| 	JAE     tagCopy | ||||
|  | ||||
| 	// ---------------------------------------- | ||||
| 	// The code below handles literal tags. | ||||
|  | ||||
| 	// case tagLiteral: | ||||
| 	// x := uint32(src[s] >> 2) | ||||
| 	// switch | ||||
| 	SHRL $2, CX | ||||
| 	CMPL CX, $60 | ||||
| 	JAE  tagLit60Plus | ||||
|  | ||||
| 	// case x < 60: | ||||
| 	// s++ | ||||
| 	INCQ SI | ||||
|  | ||||
| doLit: | ||||
| 	// This is the end of the inner "switch", when we have a literal tag. | ||||
| 	// | ||||
| 	// We assume that CX == x and x fits in a uint32, where x is the variable | ||||
| 	// used in the pure Go decode_other.go code. | ||||
|  | ||||
| 	// length = int(x) + 1 | ||||
| 	// | ||||
| 	// Unlike the pure Go code, we don't need to check if length <= 0 because | ||||
| 	// CX can hold 64 bits, so the increment cannot overflow. | ||||
| 	INCQ CX | ||||
|  | ||||
| 	// Prepare to check if copying length bytes will run past the end of dst or | ||||
| 	// src. | ||||
| 	// | ||||
| 	// AX = len(dst) - d | ||||
| 	// BX = len(src) - s | ||||
| 	MOVQ R10, AX | ||||
| 	SUBQ DI, AX | ||||
| 	MOVQ R13, BX | ||||
| 	SUBQ SI, BX | ||||
|  | ||||
| 	// !!! Try a faster technique for short (16 or fewer bytes) copies. | ||||
| 	// | ||||
| 	// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { | ||||
| 	//   goto callMemmove // Fall back on calling runtime·memmove. | ||||
| 	// } | ||||
| 	// | ||||
| 	// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s | ||||
| 	// against 21 instead of 16, because it cannot assume that all of its input | ||||
| 	// is contiguous in memory and so it needs to leave enough source bytes to | ||||
| 	// read the next tag without refilling buffers, but Go's Decode assumes | ||||
| 	// contiguousness (the src argument is a []byte). | ||||
| 	CMPQ CX, $16 | ||||
| 	JGT  callMemmove | ||||
| 	CMPQ AX, $16 | ||||
| 	JLT  callMemmove | ||||
| 	CMPQ BX, $16 | ||||
| 	JLT  callMemmove | ||||
|  | ||||
| 	// !!! Implement the copy from src to dst as a 16-byte load and store. | ||||
| 	// (Decode's documentation says that dst and src must not overlap.) | ||||
| 	// | ||||
| 	// This always copies 16 bytes, instead of only length bytes, but that's | ||||
| 	// OK. If the input is a valid Snappy encoding then subsequent iterations | ||||
| 	// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a | ||||
| 	// non-nil error), so the overrun will be ignored. | ||||
| 	// | ||||
| 	// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or | ||||
| 	// 16-byte loads and stores. This technique probably wouldn't be as | ||||
| 	// effective on architectures that are fussier about alignment. | ||||
| 	MOVOU 0(SI), X0 | ||||
| 	MOVOU X0, 0(DI) | ||||
|  | ||||
| 	// d += length | ||||
| 	// s += length | ||||
| 	ADDQ CX, DI | ||||
| 	ADDQ CX, SI | ||||
| 	JMP  loop | ||||
|  | ||||
| callMemmove: | ||||
| 	// if length > len(dst)-d || length > len(src)-s { etc } | ||||
| 	CMPQ CX, AX | ||||
| 	JGT  errCorrupt | ||||
| 	CMPQ CX, BX | ||||
| 	JGT  errCorrupt | ||||
|  | ||||
| 	// copy(dst[d:], src[s:s+length]) | ||||
| 	// | ||||
| 	// This means calling runtime·memmove(&dst[d], &src[s], length), so we push | ||||
| 	// DI, SI and CX as arguments. Coincidentally, we also need to spill those | ||||
| 	// three registers to the stack, to save local variables across the CALL. | ||||
| 	MOVQ DI, 0(SP) | ||||
| 	MOVQ SI, 8(SP) | ||||
| 	MOVQ CX, 16(SP) | ||||
| 	MOVQ DI, 24(SP) | ||||
| 	MOVQ SI, 32(SP) | ||||
| 	MOVQ CX, 40(SP) | ||||
| 	CALL runtime·memmove(SB) | ||||
|  | ||||
| 	// Restore local variables: unspill registers from the stack and | ||||
| 	// re-calculate R8-R13. | ||||
| 	MOVQ 24(SP), DI | ||||
| 	MOVQ 32(SP), SI | ||||
| 	MOVQ 40(SP), CX | ||||
| 	MOVQ dst_base+0(FP), R8 | ||||
| 	MOVQ dst_len+8(FP), R9 | ||||
| 	MOVQ R8, R10 | ||||
| 	ADDQ R9, R10 | ||||
| 	MOVQ src_base+24(FP), R11 | ||||
| 	MOVQ src_len+32(FP), R12 | ||||
| 	MOVQ R11, R13 | ||||
| 	ADDQ R12, R13 | ||||
|  | ||||
| 	// d += length | ||||
| 	// s += length | ||||
| 	ADDQ CX, DI | ||||
| 	ADDQ CX, SI | ||||
| 	JMP  loop | ||||
|  | ||||
| tagLit60Plus: | ||||
| 	// !!! This fragment does the | ||||
| 	// | ||||
| 	// s += x - 58; if uint(s) > uint(len(src)) { etc } | ||||
| 	// | ||||
| 	// checks. In the asm version, we code it once instead of once per switch case. | ||||
| 	ADDQ CX, SI | ||||
| 	SUBQ $58, SI | ||||
| 	MOVQ SI, BX | ||||
| 	SUBQ R11, BX | ||||
| 	CMPQ BX, R12 | ||||
| 	JA   errCorrupt | ||||
|  | ||||
| 	// case x == 60: | ||||
| 	CMPL CX, $61 | ||||
| 	JEQ  tagLit61 | ||||
| 	JA   tagLit62Plus | ||||
|  | ||||
| 	// x = uint32(src[s-1]) | ||||
| 	MOVBLZX -1(SI), CX | ||||
| 	JMP     doLit | ||||
|  | ||||
| tagLit61: | ||||
| 	// case x == 61: | ||||
| 	// x = uint32(src[s-2]) | uint32(src[s-1])<<8 | ||||
| 	MOVWLZX -2(SI), CX | ||||
| 	JMP     doLit | ||||
|  | ||||
| tagLit62Plus: | ||||
| 	CMPL CX, $62 | ||||
| 	JA   tagLit63 | ||||
|  | ||||
| 	// case x == 62: | ||||
| 	// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 | ||||
| 	MOVWLZX -3(SI), CX | ||||
| 	MOVBLZX -1(SI), BX | ||||
| 	SHLL    $16, BX | ||||
| 	ORL     BX, CX | ||||
| 	JMP     doLit | ||||
|  | ||||
| tagLit63: | ||||
| 	// case x == 63: | ||||
| 	// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 | ||||
| 	MOVL -4(SI), CX | ||||
| 	JMP  doLit | ||||
|  | ||||
| // The code above handles literal tags. | ||||
| // ---------------------------------------- | ||||
| // The code below handles copy tags. | ||||
|  | ||||
| tagCopy2: | ||||
| 	// case tagCopy2: | ||||
| 	// s += 3 | ||||
| 	ADDQ $3, SI | ||||
|  | ||||
| 	// if uint(s) > uint(len(src)) { etc } | ||||
| 	MOVQ SI, BX | ||||
| 	SUBQ R11, BX | ||||
| 	CMPQ BX, R12 | ||||
| 	JA   errCorrupt | ||||
|  | ||||
| 	// length = 1 + int(src[s-3])>>2 | ||||
| 	SHRQ $2, CX | ||||
| 	INCQ CX | ||||
|  | ||||
| 	// offset = int(src[s-2]) | int(src[s-1])<<8 | ||||
| 	MOVWQZX -2(SI), DX | ||||
| 	JMP     doCopy | ||||
|  | ||||
| tagCopy: | ||||
| 	// We have a copy tag. We assume that: | ||||
| 	//	- BX == src[s] & 0x03 | ||||
| 	//	- CX == src[s] | ||||
| 	CMPQ BX, $2 | ||||
| 	JEQ  tagCopy2 | ||||
| 	JA   errUC4T | ||||
|  | ||||
| 	// case tagCopy1: | ||||
| 	// s += 2 | ||||
| 	ADDQ $2, SI | ||||
|  | ||||
| 	// if uint(s) > uint(len(src)) { etc } | ||||
| 	MOVQ SI, BX | ||||
| 	SUBQ R11, BX | ||||
| 	CMPQ BX, R12 | ||||
| 	JA   errCorrupt | ||||
|  | ||||
| 	// offset = int(src[s-2])&0xe0<<3 | int(src[s-1]) | ||||
| 	MOVQ    CX, DX | ||||
| 	ANDQ    $0xe0, DX | ||||
| 	SHLQ    $3, DX | ||||
| 	MOVBQZX -1(SI), BX | ||||
| 	ORQ     BX, DX | ||||
|  | ||||
| 	// length = 4 + int(src[s-2])>>2&0x7 | ||||
| 	SHRQ $2, CX | ||||
| 	ANDQ $7, CX | ||||
| 	ADDQ $4, CX | ||||
|  | ||||
| doCopy: | ||||
| 	// This is the end of the outer "switch", when we have a copy tag. | ||||
| 	// | ||||
| 	// We assume that: | ||||
| 	//	- CX == length && CX > 0 | ||||
| 	//	- DX == offset | ||||
|  | ||||
| 	// if offset <= 0 { etc } | ||||
| 	CMPQ DX, $0 | ||||
| 	JLE  errCorrupt | ||||
|  | ||||
| 	// if d < offset { etc } | ||||
| 	MOVQ DI, BX | ||||
| 	SUBQ R8, BX | ||||
| 	CMPQ BX, DX | ||||
| 	JLT  errCorrupt | ||||
|  | ||||
| 	// if length > len(dst)-d { etc } | ||||
| 	MOVQ R10, BX | ||||
| 	SUBQ DI, BX | ||||
| 	CMPQ CX, BX | ||||
| 	JGT  errCorrupt | ||||
|  | ||||
| 	// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length | ||||
| 	// | ||||
| 	// Set: | ||||
| 	//	- R14 = len(dst)-d | ||||
| 	//	- R15 = &dst[d-offset] | ||||
| 	MOVQ R10, R14 | ||||
| 	SUBQ DI, R14 | ||||
| 	MOVQ DI, R15 | ||||
| 	SUBQ DX, R15 | ||||
|  | ||||
| 	// !!! Try a faster technique for short (16 or fewer bytes) forward copies. | ||||
| 	// | ||||
| 	// First, try using two 8-byte load/stores, similar to the doLit technique | ||||
| 	// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is | ||||
| 	// still OK if offset >= 8. Note that this has to be two 8-byte load/stores | ||||
| 	// and not one 16-byte load/store, and the first store has to be before the | ||||
| 	// second load, due to the overlap if offset is in the range [8, 16). | ||||
| 	// | ||||
| 	// if length > 16 || offset < 8 || len(dst)-d < 16 { | ||||
| 	//   goto slowForwardCopy | ||||
| 	// } | ||||
| 	// copy 16 bytes | ||||
| 	// d += length | ||||
| 	CMPQ CX, $16 | ||||
| 	JGT  slowForwardCopy | ||||
| 	CMPQ DX, $8 | ||||
| 	JLT  slowForwardCopy | ||||
| 	CMPQ R14, $16 | ||||
| 	JLT  slowForwardCopy | ||||
| 	MOVQ 0(R15), AX | ||||
| 	MOVQ AX, 0(DI) | ||||
| 	MOVQ 8(R15), BX | ||||
| 	MOVQ BX, 8(DI) | ||||
| 	ADDQ CX, DI | ||||
| 	JMP  loop | ||||
|  | ||||
| slowForwardCopy: | ||||
| 	// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we | ||||
| 	// can still try 8-byte load stores, provided we can overrun up to 10 extra | ||||
| 	// bytes. As above, the overrun will be fixed up by subsequent iterations | ||||
| 	// of the outermost loop. | ||||
| 	// | ||||
| 	// The C++ snappy code calls this technique IncrementalCopyFastPath. Its | ||||
| 	// commentary says: | ||||
| 	// | ||||
| 	// ---- | ||||
| 	// | ||||
| 	// The main part of this loop is a simple copy of eight bytes at a time | ||||
| 	// until we've copied (at least) the requested amount of bytes.  However, | ||||
| 	// if d and d-offset are less than eight bytes apart (indicating a | ||||
| 	// repeating pattern of length < 8), we first need to expand the pattern in | ||||
| 	// order to get the correct results. For instance, if the buffer looks like | ||||
| 	// this, with the eight-byte <d-offset> and <d> patterns marked as | ||||
| 	// intervals: | ||||
| 	// | ||||
| 	//    abxxxxxxxxxxxx | ||||
| 	//    [------]           d-offset | ||||
| 	//      [------]         d | ||||
| 	// | ||||
| 	// a single eight-byte copy from <d-offset> to <d> will repeat the pattern | ||||
| 	// once, after which we can move <d> two bytes without moving <d-offset>: | ||||
| 	// | ||||
| 	//    ababxxxxxxxxxx | ||||
| 	//    [------]           d-offset | ||||
| 	//        [------]       d | ||||
| 	// | ||||
| 	// and repeat the exercise until the two no longer overlap. | ||||
| 	// | ||||
| 	// This allows us to do very well in the special case of one single byte | ||||
| 	// repeated many times, without taking a big hit for more general cases. | ||||
| 	// | ||||
| 	// The worst case of extra writing past the end of the match occurs when | ||||
| 	// offset == 1 and length == 1; the last copy will read from byte positions | ||||
| 	// [0..7] and write to [4..11], whereas it was only supposed to write to | ||||
| 	// position 1. Thus, ten excess bytes. | ||||
| 	// | ||||
| 	// ---- | ||||
| 	// | ||||
| 	// That "10 byte overrun" worst case is confirmed by Go's | ||||
| 	// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy | ||||
| 	// and finishSlowForwardCopy algorithm. | ||||
| 	// | ||||
| 	// if length > len(dst)-d-10 { | ||||
| 	//   goto verySlowForwardCopy | ||||
| 	// } | ||||
| 	SUBQ $10, R14 | ||||
| 	CMPQ CX, R14 | ||||
| 	JGT  verySlowForwardCopy | ||||
|  | ||||
| makeOffsetAtLeast8: | ||||
| 	// !!! As above, expand the pattern so that offset >= 8 and we can use | ||||
| 	// 8-byte load/stores. | ||||
| 	// | ||||
| 	// for offset < 8 { | ||||
| 	//   copy 8 bytes from dst[d-offset:] to dst[d:] | ||||
| 	//   length -= offset | ||||
| 	//   d      += offset | ||||
| 	//   offset += offset | ||||
| 	//   // The two previous lines together means that d-offset, and therefore | ||||
| 	//   // R15, is unchanged. | ||||
| 	// } | ||||
| 	CMPQ DX, $8 | ||||
| 	JGE  fixUpSlowForwardCopy | ||||
| 	MOVQ (R15), BX | ||||
| 	MOVQ BX, (DI) | ||||
| 	SUBQ DX, CX | ||||
| 	ADDQ DX, DI | ||||
| 	ADDQ DX, DX | ||||
| 	JMP  makeOffsetAtLeast8 | ||||
|  | ||||
| fixUpSlowForwardCopy: | ||||
| 	// !!! Add length (which might be negative now) to d (implied by DI being | ||||
| 	// &dst[d]) so that d ends up at the right place when we jump back to the | ||||
| 	// top of the loop. Before we do that, though, we save DI to AX so that, if | ||||
| 	// length is positive, copying the remaining length bytes will write to the | ||||
| 	// right place. | ||||
| 	MOVQ DI, AX | ||||
| 	ADDQ CX, DI | ||||
|  | ||||
| finishSlowForwardCopy: | ||||
| 	// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative | ||||
| 	// length means that we overrun, but as above, that will be fixed up by | ||||
| 	// subsequent iterations of the outermost loop. | ||||
| 	CMPQ CX, $0 | ||||
| 	JLE  loop | ||||
| 	MOVQ (R15), BX | ||||
| 	MOVQ BX, (AX) | ||||
| 	ADDQ $8, R15 | ||||
| 	ADDQ $8, AX | ||||
| 	SUBQ $8, CX | ||||
| 	JMP  finishSlowForwardCopy | ||||
|  | ||||
| verySlowForwardCopy: | ||||
| 	// verySlowForwardCopy is a simple implementation of forward copy. In C | ||||
| 	// parlance, this is a do/while loop instead of a while loop, since we know | ||||
| 	// that length > 0. In Go syntax: | ||||
| 	// | ||||
| 	// for { | ||||
| 	//   dst[d] = dst[d - offset] | ||||
| 	//   d++ | ||||
| 	//   length-- | ||||
| 	//   if length == 0 { | ||||
| 	//     break | ||||
| 	//   } | ||||
| 	// } | ||||
| 	MOVB (R15), BX | ||||
| 	MOVB BX, (DI) | ||||
| 	INCQ R15 | ||||
| 	INCQ DI | ||||
| 	DECQ CX | ||||
| 	JNZ  verySlowForwardCopy | ||||
| 	JMP  loop | ||||
|  | ||||
| // The code above handles copy tags. | ||||
| // ---------------------------------------- | ||||
|  | ||||
| end: | ||||
| 	// This is the end of the "for s < len(src)". | ||||
| 	// | ||||
| 	// if d != len(dst) { etc } | ||||
| 	CMPQ DI, R10 | ||||
| 	JNE  errCorrupt | ||||
|  | ||||
| 	// return 0 | ||||
| 	MOVQ $0, ret+48(FP) | ||||
| 	RET | ||||
|  | ||||
| errCorrupt: | ||||
| 	// return decodeErrCodeCorrupt | ||||
| 	MOVQ $1, ret+48(FP) | ||||
| 	RET | ||||
|  | ||||
| errUC4T: | ||||
| 	// return decodeErrCodeUnsupportedCopy4Tag | ||||
| 	MOVQ $3, ret+48(FP) | ||||
| 	RET | ||||
							
								
								
									
										96
									
								
								vendor/github.com/golang/snappy/decode_other.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										96
									
								
								vendor/github.com/golang/snappy/decode_other.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,96 @@ | ||||
| // Copyright 2016 The Snappy-Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // +build !amd64 | ||||
|  | ||||
| package snappy | ||||
|  | ||||
| // decode writes the decoding of src to dst. It assumes that the varint-encoded | ||||
| // length of the decompressed bytes has already been read, and that len(dst) | ||||
| // equals that length. | ||||
| // | ||||
| // It returns 0 on success or a decodeErrCodeXxx error code on failure. | ||||
| func decode(dst, src []byte) int { | ||||
| 	var d, s, offset, length int | ||||
| 	for s < len(src) { | ||||
| 		switch src[s] & 0x03 { | ||||
| 		case tagLiteral: | ||||
| 			x := uint32(src[s] >> 2) | ||||
| 			switch { | ||||
| 			case x < 60: | ||||
| 				s++ | ||||
| 			case x == 60: | ||||
| 				s += 2 | ||||
| 				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | ||||
| 					return decodeErrCodeCorrupt | ||||
| 				} | ||||
| 				x = uint32(src[s-1]) | ||||
| 			case x == 61: | ||||
| 				s += 3 | ||||
| 				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | ||||
| 					return decodeErrCodeCorrupt | ||||
| 				} | ||||
| 				x = uint32(src[s-2]) | uint32(src[s-1])<<8 | ||||
| 			case x == 62: | ||||
| 				s += 4 | ||||
| 				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | ||||
| 					return decodeErrCodeCorrupt | ||||
| 				} | ||||
| 				x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 | ||||
| 			case x == 63: | ||||
| 				s += 5 | ||||
| 				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | ||||
| 					return decodeErrCodeCorrupt | ||||
| 				} | ||||
| 				x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 | ||||
| 			} | ||||
| 			length = int(x) + 1 | ||||
| 			if length <= 0 { | ||||
| 				return decodeErrCodeUnsupportedLiteralLength | ||||
| 			} | ||||
| 			if length > len(dst)-d || length > len(src)-s { | ||||
| 				return decodeErrCodeCorrupt | ||||
| 			} | ||||
| 			copy(dst[d:], src[s:s+length]) | ||||
| 			d += length | ||||
| 			s += length | ||||
| 			continue | ||||
|  | ||||
| 		case tagCopy1: | ||||
| 			s += 2 | ||||
| 			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | ||||
| 				return decodeErrCodeCorrupt | ||||
| 			} | ||||
| 			length = 4 + int(src[s-2])>>2&0x7 | ||||
| 			offset = int(src[s-2])&0xe0<<3 | int(src[s-1]) | ||||
|  | ||||
| 		case tagCopy2: | ||||
| 			s += 3 | ||||
| 			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | ||||
| 				return decodeErrCodeCorrupt | ||||
| 			} | ||||
| 			length = 1 + int(src[s-3])>>2 | ||||
| 			offset = int(src[s-2]) | int(src[s-1])<<8 | ||||
|  | ||||
| 		case tagCopy4: | ||||
| 			return decodeErrCodeUnsupportedCopy4Tag | ||||
| 		} | ||||
|  | ||||
| 		if offset <= 0 || d < offset || length > len(dst)-d { | ||||
| 			return decodeErrCodeCorrupt | ||||
| 		} | ||||
| 		// Copy from an earlier sub-slice of dst to a later sub-slice. Unlike | ||||
| 		// the built-in copy function, this byte-by-byte copy always runs | ||||
| 		// forwards, even if the slices overlap. Conceptually, this is: | ||||
| 		// | ||||
| 		// d += forwardCopy(dst[d:d+length], dst[d-offset:]) | ||||
| 		for end := d + length; d != end; d++ { | ||||
| 			dst[d] = dst[d-offset] | ||||
| 		} | ||||
| 	} | ||||
| 	if d != len(dst) { | ||||
| 		return decodeErrCodeCorrupt | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
							
								
								
									
										403
									
								
								vendor/github.com/golang/snappy/encode.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										403
									
								
								vendor/github.com/golang/snappy/encode.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,403 @@ | ||||
| // Copyright 2011 The Snappy-Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package snappy | ||||
|  | ||||
| import ( | ||||
| 	"encoding/binary" | ||||
| 	"errors" | ||||
| 	"io" | ||||
| ) | ||||
|  | ||||
| // maxOffset limits how far copy back-references can go, the same as the C++ | ||||
| // code. | ||||
| const maxOffset = 1 << 15 | ||||
|  | ||||
| // emitLiteral writes a literal chunk and returns the number of bytes written. | ||||
| func emitLiteral(dst, lit []byte) int { | ||||
| 	i, n := 0, uint(len(lit)-1) | ||||
| 	switch { | ||||
| 	case n < 60: | ||||
| 		dst[0] = uint8(n)<<2 | tagLiteral | ||||
| 		i = 1 | ||||
| 	case n < 1<<8: | ||||
| 		dst[0] = 60<<2 | tagLiteral | ||||
| 		dst[1] = uint8(n) | ||||
| 		i = 2 | ||||
| 	case n < 1<<16: | ||||
| 		dst[0] = 61<<2 | tagLiteral | ||||
| 		dst[1] = uint8(n) | ||||
| 		dst[2] = uint8(n >> 8) | ||||
| 		i = 3 | ||||
| 	case n < 1<<24: | ||||
| 		dst[0] = 62<<2 | tagLiteral | ||||
| 		dst[1] = uint8(n) | ||||
| 		dst[2] = uint8(n >> 8) | ||||
| 		dst[3] = uint8(n >> 16) | ||||
| 		i = 4 | ||||
| 	case int64(n) < 1<<32: | ||||
| 		dst[0] = 63<<2 | tagLiteral | ||||
| 		dst[1] = uint8(n) | ||||
| 		dst[2] = uint8(n >> 8) | ||||
| 		dst[3] = uint8(n >> 16) | ||||
| 		dst[4] = uint8(n >> 24) | ||||
| 		i = 5 | ||||
| 	default: | ||||
| 		panic("snappy: source buffer is too long") | ||||
| 	} | ||||
| 	if copy(dst[i:], lit) != len(lit) { | ||||
| 		panic("snappy: destination buffer is too short") | ||||
| 	} | ||||
| 	return i + len(lit) | ||||
| } | ||||
|  | ||||
| // emitCopy writes a copy chunk and returns the number of bytes written. | ||||
| func emitCopy(dst []byte, offset, length int32) int { | ||||
| 	i := 0 | ||||
| 	for length > 0 { | ||||
| 		x := length - 4 | ||||
| 		if 0 <= x && x < 1<<3 && offset < 1<<11 { | ||||
| 			dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1 | ||||
| 			dst[i+1] = uint8(offset) | ||||
| 			i += 2 | ||||
| 			break | ||||
| 		} | ||||
|  | ||||
| 		x = length | ||||
| 		if x > 1<<6 { | ||||
| 			x = 1 << 6 | ||||
| 		} | ||||
| 		dst[i+0] = uint8(x-1)<<2 | tagCopy2 | ||||
| 		dst[i+1] = uint8(offset) | ||||
| 		dst[i+2] = uint8(offset >> 8) | ||||
| 		i += 3 | ||||
| 		length -= x | ||||
| 	} | ||||
| 	return i | ||||
| } | ||||
|  | ||||
| // Encode returns the encoded form of src. The returned slice may be a sub- | ||||
| // slice of dst if dst was large enough to hold the entire encoded block. | ||||
| // Otherwise, a newly allocated slice will be returned. | ||||
| // | ||||
| // It is valid to pass a nil dst. | ||||
| func Encode(dst, src []byte) []byte { | ||||
| 	if n := MaxEncodedLen(len(src)); n < 0 { | ||||
| 		panic(ErrTooLarge) | ||||
| 	} else if len(dst) < n { | ||||
| 		dst = make([]byte, n) | ||||
| 	} | ||||
|  | ||||
| 	// The block starts with the varint-encoded length of the decompressed bytes. | ||||
| 	d := binary.PutUvarint(dst, uint64(len(src))) | ||||
|  | ||||
| 	for len(src) > 0 { | ||||
| 		p := src | ||||
| 		src = nil | ||||
| 		if len(p) > maxBlockSize { | ||||
| 			p, src = p[:maxBlockSize], p[maxBlockSize:] | ||||
| 		} | ||||
| 		d += encodeBlock(dst[d:], p) | ||||
| 	} | ||||
| 	return dst[:d] | ||||
| } | ||||
|  | ||||
| // encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It | ||||
| // assumes that the varint-encoded length of the decompressed bytes has already | ||||
| // been written. | ||||
| // | ||||
| // It also assumes that: | ||||
| //	len(dst) >= MaxEncodedLen(len(src)) && | ||||
| // 	0 < len(src) && len(src) <= maxBlockSize | ||||
| func encodeBlock(dst, src []byte) (d int) { | ||||
| 	// Return early if src is short. | ||||
| 	if len(src) <= 4 { | ||||
| 		return emitLiteral(dst, src) | ||||
| 	} | ||||
|  | ||||
| 	// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. | ||||
| 	const maxTableSize = 1 << 14 | ||||
| 	shift, tableSize := uint(32-8), 1<<8 | ||||
| 	for tableSize < maxTableSize && tableSize < len(src) { | ||||
| 		shift-- | ||||
| 		tableSize *= 2 | ||||
| 	} | ||||
| 	var table [maxTableSize]int32 | ||||
|  | ||||
| 	// Iterate over the source bytes. | ||||
| 	var ( | ||||
| 		s   int32 // The iterator position. | ||||
| 		t   int32 // The last position with the same hash as s. | ||||
| 		lit int32 // The start position of any pending literal bytes. | ||||
|  | ||||
| 		// Copied from the C++ snappy implementation: | ||||
| 		// | ||||
| 		// Heuristic match skipping: If 32 bytes are scanned with no matches | ||||
| 		// found, start looking only at every other byte. If 32 more bytes are | ||||
| 		// scanned, look at every third byte, etc.. When a match is found, | ||||
| 		// immediately go back to looking at every byte. This is a small loss | ||||
| 		// (~5% performance, ~0.1% density) for compressible data due to more | ||||
| 		// bookkeeping, but for non-compressible data (such as JPEG) it's a | ||||
| 		// huge win since the compressor quickly "realizes" the data is | ||||
| 		// incompressible and doesn't bother looking for matches everywhere. | ||||
| 		// | ||||
| 		// The "skip" variable keeps track of how many bytes there are since | ||||
| 		// the last match; dividing it by 32 (ie. right-shifting by five) gives | ||||
| 		// the number of bytes to move ahead for each iteration. | ||||
| 		skip uint32 = 32 | ||||
| 	) | ||||
| 	for uint32(s+3) < uint32(len(src)) { // The uint32 conversions catch overflow from the +3. | ||||
| 		// Update the hash table. | ||||
| 		b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3] | ||||
| 		h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24 | ||||
| 		p := &table[(h*0x1e35a7bd)>>shift] | ||||
| 		// We need to to store values in [-1, inf) in table. To save | ||||
| 		// some initialization time, (re)use the table's zero value | ||||
| 		// and shift the values against this zero: add 1 on writes, | ||||
| 		// subtract 1 on reads. | ||||
| 		t, *p = *p-1, s+1 | ||||
| 		// If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte. | ||||
| 		if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] { | ||||
| 			s += int32(skip >> 5) | ||||
| 			skip++ | ||||
| 			continue | ||||
| 		} | ||||
| 		skip = 32 | ||||
| 		// Otherwise, we have a match. First, emit any pending literal bytes. | ||||
| 		if lit != s { | ||||
| 			d += emitLiteral(dst[d:], src[lit:s]) | ||||
| 		} | ||||
| 		// Extend the match to be as long as possible. | ||||
| 		s0 := s | ||||
| 		s, t = s+4, t+4 | ||||
| 		for int(s) < len(src) && src[s] == src[t] { | ||||
| 			s++ | ||||
| 			t++ | ||||
| 		} | ||||
| 		// Emit the copied bytes. | ||||
| 		d += emitCopy(dst[d:], s-t, s-s0) | ||||
| 		lit = s | ||||
| 	} | ||||
|  | ||||
| 	// Emit any final pending literal bytes and return. | ||||
| 	if int(lit) != len(src) { | ||||
| 		d += emitLiteral(dst[d:], src[lit:]) | ||||
| 	} | ||||
| 	return d | ||||
| } | ||||
|  | ||||
| // MaxEncodedLen returns the maximum length of a snappy block, given its | ||||
| // uncompressed length. | ||||
| // | ||||
| // It will return a negative value if srcLen is too large to encode. | ||||
| func MaxEncodedLen(srcLen int) int { | ||||
| 	n := uint64(srcLen) | ||||
| 	if n > 0xffffffff { | ||||
| 		return -1 | ||||
| 	} | ||||
| 	// Compressed data can be defined as: | ||||
| 	//    compressed := item* literal* | ||||
| 	//    item       := literal* copy | ||||
| 	// | ||||
| 	// The trailing literal sequence has a space blowup of at most 62/60 | ||||
| 	// since a literal of length 60 needs one tag byte + one extra byte | ||||
| 	// for length information. | ||||
| 	// | ||||
| 	// Item blowup is trickier to measure. Suppose the "copy" op copies | ||||
| 	// 4 bytes of data. Because of a special check in the encoding code, | ||||
| 	// we produce a 4-byte copy only if the offset is < 65536. Therefore | ||||
| 	// the copy op takes 3 bytes to encode, and this type of item leads | ||||
| 	// to at most the 62/60 blowup for representing literals. | ||||
| 	// | ||||
| 	// Suppose the "copy" op copies 5 bytes of data. If the offset is big | ||||
| 	// enough, it will take 5 bytes to encode the copy op. Therefore the | ||||
| 	// worst case here is a one-byte literal followed by a five-byte copy. | ||||
| 	// That is, 6 bytes of input turn into 7 bytes of "compressed" data. | ||||
| 	// | ||||
| 	// This last factor dominates the blowup, so the final estimate is: | ||||
| 	n = 32 + n + n/6 | ||||
| 	if n > 0xffffffff { | ||||
| 		return -1 | ||||
| 	} | ||||
| 	return int(n) | ||||
| } | ||||
|  | ||||
| var errClosed = errors.New("snappy: Writer is closed") | ||||
|  | ||||
| // NewWriter returns a new Writer that compresses to w. | ||||
| // | ||||
| // The Writer returned does not buffer writes. There is no need to Flush or | ||||
| // Close such a Writer. | ||||
| // | ||||
| // Deprecated: the Writer returned is not suitable for many small writes, only | ||||
| // for few large writes. Use NewBufferedWriter instead, which is efficient | ||||
| // regardless of the frequency and shape of the writes, and remember to Close | ||||
| // that Writer when done. | ||||
| func NewWriter(w io.Writer) *Writer { | ||||
| 	return &Writer{ | ||||
| 		w:    w, | ||||
| 		obuf: make([]byte, obufLen), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // NewBufferedWriter returns a new Writer that compresses to w, using the | ||||
| // framing format described at | ||||
| // https://github.com/google/snappy/blob/master/framing_format.txt | ||||
| // | ||||
| // The Writer returned buffers writes. Users must call Close to guarantee all | ||||
| // data has been forwarded to the underlying io.Writer. They may also call | ||||
| // Flush zero or more times before calling Close. | ||||
| func NewBufferedWriter(w io.Writer) *Writer { | ||||
| 	return &Writer{ | ||||
| 		w:    w, | ||||
| 		ibuf: make([]byte, 0, maxBlockSize), | ||||
| 		obuf: make([]byte, obufLen), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Writer is an io.Writer than can write Snappy-compressed bytes. | ||||
| type Writer struct { | ||||
| 	w   io.Writer | ||||
| 	err error | ||||
|  | ||||
| 	// ibuf is a buffer for the incoming (uncompressed) bytes. | ||||
| 	// | ||||
| 	// Its use is optional. For backwards compatibility, Writers created by the | ||||
| 	// NewWriter function have ibuf == nil, do not buffer incoming bytes, and | ||||
| 	// therefore do not need to be Flush'ed or Close'd. | ||||
| 	ibuf []byte | ||||
|  | ||||
| 	// obuf is a buffer for the outgoing (compressed) bytes. | ||||
| 	obuf []byte | ||||
|  | ||||
| 	// wroteStreamHeader is whether we have written the stream header. | ||||
| 	wroteStreamHeader bool | ||||
| } | ||||
|  | ||||
| // Reset discards the writer's state and switches the Snappy writer to write to | ||||
| // w. This permits reusing a Writer rather than allocating a new one. | ||||
| func (w *Writer) Reset(writer io.Writer) { | ||||
| 	w.w = writer | ||||
| 	w.err = nil | ||||
| 	if w.ibuf != nil { | ||||
| 		w.ibuf = w.ibuf[:0] | ||||
| 	} | ||||
| 	w.wroteStreamHeader = false | ||||
| } | ||||
|  | ||||
| // Write satisfies the io.Writer interface. | ||||
| func (w *Writer) Write(p []byte) (nRet int, errRet error) { | ||||
| 	if w.ibuf == nil { | ||||
| 		// Do not buffer incoming bytes. This does not perform or compress well | ||||
| 		// if the caller of Writer.Write writes many small slices. This | ||||
| 		// behavior is therefore deprecated, but still supported for backwards | ||||
| 		// compatibility with code that doesn't explicitly Flush or Close. | ||||
| 		return w.write(p) | ||||
| 	} | ||||
|  | ||||
| 	// The remainder of this method is based on bufio.Writer.Write from the | ||||
| 	// standard library. | ||||
|  | ||||
| 	for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { | ||||
| 		var n int | ||||
| 		if len(w.ibuf) == 0 { | ||||
| 			// Large write, empty buffer. | ||||
| 			// Write directly from p to avoid copy. | ||||
| 			n, _ = w.write(p) | ||||
| 		} else { | ||||
| 			n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) | ||||
| 			w.ibuf = w.ibuf[:len(w.ibuf)+n] | ||||
| 			w.Flush() | ||||
| 		} | ||||
| 		nRet += n | ||||
| 		p = p[n:] | ||||
| 	} | ||||
| 	if w.err != nil { | ||||
| 		return nRet, w.err | ||||
| 	} | ||||
| 	n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) | ||||
| 	w.ibuf = w.ibuf[:len(w.ibuf)+n] | ||||
| 	nRet += n | ||||
| 	return nRet, nil | ||||
| } | ||||
|  | ||||
| func (w *Writer) write(p []byte) (nRet int, errRet error) { | ||||
| 	if w.err != nil { | ||||
| 		return 0, w.err | ||||
| 	} | ||||
| 	for len(p) > 0 { | ||||
| 		obufStart := len(magicChunk) | ||||
| 		if !w.wroteStreamHeader { | ||||
| 			w.wroteStreamHeader = true | ||||
| 			copy(w.obuf, magicChunk) | ||||
| 			obufStart = 0 | ||||
| 		} | ||||
|  | ||||
| 		var uncompressed []byte | ||||
| 		if len(p) > maxBlockSize { | ||||
| 			uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] | ||||
| 		} else { | ||||
| 			uncompressed, p = p, nil | ||||
| 		} | ||||
| 		checksum := crc(uncompressed) | ||||
|  | ||||
| 		// Compress the buffer, discarding the result if the improvement | ||||
| 		// isn't at least 12.5%. | ||||
| 		compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) | ||||
| 		chunkType := uint8(chunkTypeCompressedData) | ||||
| 		chunkLen := 4 + len(compressed) | ||||
| 		obufEnd := obufHeaderLen + len(compressed) | ||||
| 		if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { | ||||
| 			chunkType = chunkTypeUncompressedData | ||||
| 			chunkLen = 4 + len(uncompressed) | ||||
| 			obufEnd = obufHeaderLen | ||||
| 		} | ||||
|  | ||||
| 		// Fill in the per-chunk header that comes before the body. | ||||
| 		w.obuf[len(magicChunk)+0] = chunkType | ||||
| 		w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) | ||||
| 		w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) | ||||
| 		w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) | ||||
| 		w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) | ||||
| 		w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) | ||||
| 		w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) | ||||
| 		w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) | ||||
|  | ||||
| 		if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { | ||||
| 			w.err = err | ||||
| 			return nRet, err | ||||
| 		} | ||||
| 		if chunkType == chunkTypeUncompressedData { | ||||
| 			if _, err := w.w.Write(uncompressed); err != nil { | ||||
| 				w.err = err | ||||
| 				return nRet, err | ||||
| 			} | ||||
| 		} | ||||
| 		nRet += len(uncompressed) | ||||
| 	} | ||||
| 	return nRet, nil | ||||
| } | ||||
|  | ||||
| // Flush flushes the Writer to its underlying io.Writer. | ||||
| func (w *Writer) Flush() error { | ||||
| 	if w.err != nil { | ||||
| 		return w.err | ||||
| 	} | ||||
| 	if len(w.ibuf) == 0 { | ||||
| 		return nil | ||||
| 	} | ||||
| 	w.write(w.ibuf) | ||||
| 	w.ibuf = w.ibuf[:0] | ||||
| 	return w.err | ||||
| } | ||||
|  | ||||
| // Close calls Flush and then closes the Writer. | ||||
| func (w *Writer) Close() error { | ||||
| 	w.Flush() | ||||
| 	ret := w.err | ||||
| 	if w.err == nil { | ||||
| 		w.err = errClosed | ||||
| 	} | ||||
| 	return ret | ||||
| } | ||||
							
								
								
									
										84
									
								
								vendor/github.com/golang/snappy/snappy.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										84
									
								
								vendor/github.com/golang/snappy/snappy.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,84 @@ | ||||
| // Copyright 2011 The Snappy-Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Package snappy implements the snappy block-based compression format. | ||||
| // It aims for very high speeds and reasonable compression. | ||||
| // | ||||
| // The C++ snappy implementation is at https://github.com/google/snappy | ||||
| package snappy // import "github.com/golang/snappy" | ||||
|  | ||||
| import ( | ||||
| 	"hash/crc32" | ||||
| ) | ||||
|  | ||||
| /* | ||||
| Each encoded block begins with the varint-encoded length of the decoded data, | ||||
| followed by a sequence of chunks. Chunks begin and end on byte boundaries. The | ||||
| first byte of each chunk is broken into its 2 least and 6 most significant bits | ||||
| called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. | ||||
| Zero means a literal tag. All other values mean a copy tag. | ||||
|  | ||||
| For literal tags: | ||||
|   - If m < 60, the next 1 + m bytes are literal bytes. | ||||
|   - Otherwise, let n be the little-endian unsigned integer denoted by the next | ||||
|     m - 59 bytes. The next 1 + n bytes after that are literal bytes. | ||||
|  | ||||
| For copy tags, length bytes are copied from offset bytes ago, in the style of | ||||
| Lempel-Ziv compression algorithms. In particular: | ||||
|   - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). | ||||
|     The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 | ||||
|     of the offset. The next byte is bits 0-7 of the offset. | ||||
|   - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). | ||||
|     The length is 1 + m. The offset is the little-endian unsigned integer | ||||
|     denoted by the next 2 bytes. | ||||
|   - For l == 3, this tag is a legacy format that is no longer supported. | ||||
| */ | ||||
| const ( | ||||
| 	tagLiteral = 0x00 | ||||
| 	tagCopy1   = 0x01 | ||||
| 	tagCopy2   = 0x02 | ||||
| 	tagCopy4   = 0x03 | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	checksumSize    = 4 | ||||
| 	chunkHeaderSize = 4 | ||||
| 	magicChunk      = "\xff\x06\x00\x00" + magicBody | ||||
| 	magicBody       = "sNaPpY" | ||||
|  | ||||
| 	// maxBlockSize is the maximum size of the input to encodeBlock. It is not | ||||
| 	// part of the wire format per se, but some parts of the encoder assume | ||||
| 	// that an offset fits into a uint16. | ||||
| 	// | ||||
| 	// Also, for the framing format (Writer type instead of Encode function), | ||||
| 	// https://github.com/google/snappy/blob/master/framing_format.txt says | ||||
| 	// that "the uncompressed data in a chunk must be no longer than 65536 | ||||
| 	// bytes". | ||||
| 	maxBlockSize = 65536 | ||||
|  | ||||
| 	// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is | ||||
| 	// hard coded to be a const instead of a variable, so that obufLen can also | ||||
| 	// be a const. Their equivalence is confirmed by | ||||
| 	// TestMaxEncodedLenOfMaxBlockSize. | ||||
| 	maxEncodedLenOfMaxBlockSize = 76490 | ||||
|  | ||||
| 	obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize | ||||
| 	obufLen       = obufHeaderLen + maxEncodedLenOfMaxBlockSize | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	chunkTypeCompressedData   = 0x00 | ||||
| 	chunkTypeUncompressedData = 0x01 | ||||
| 	chunkTypePadding          = 0xfe | ||||
| 	chunkTypeStreamIdentifier = 0xff | ||||
| ) | ||||
|  | ||||
| var crcTable = crc32.MakeTable(crc32.Castagnoli) | ||||
|  | ||||
| // crc implements the checksum specified in section 3 of | ||||
| // https://github.com/google/snappy/blob/master/framing_format.txt | ||||
| func crc(b []byte) uint32 { | ||||
| 	c := crc32.Update(0, crcTable, b) | ||||
| 	return uint32(c>>15|c<<17) + 0xa282ead8 | ||||
| } | ||||
		Reference in New Issue
	
	Block a user