diff --git a/Binary/ReadMe.txt b/Binary/ReadMe.txt index 9cef6740..6118199f 100644 --- a/Binary/ReadMe.txt +++ b/Binary/ReadMe.txt @@ -48,7 +48,7 @@ ROM Firmware Update Images (_.upd) The files with a ".upd" extension are binary images identical to the .rom files, but they only have the first 128K bytes. The first 128K is the system image without the ROM disk contents. These files can be -used to update the system image without modifyuing the ROM disk +used to update the system image without modifying the ROM disk contents. Refer to the Getting Started document for more information. ROM Executable Images (_.com) diff --git a/Source/HBIOS/Build.ps1 b/Source/HBIOS/Build.ps1 index 2610b25e..f2f5ee3f 100644 --- a/Source/HBIOS/Build.ps1 +++ b/Source/HBIOS/Build.ps1 @@ -234,29 +234,20 @@ cpmchattr -f $RomFmt $RomDiskFile r 0:*.* # Finally, the individual binary components are concatenated together to produce # the final images. # -$SystemFileList = "hbios_rom.bin", "osimg.bin", "osimg1.bin", "osimg.bin" if ($Platform -eq "UNA") { Copy-Item 'osimg.bin' ${OutDir}\UNA_WBW_SYS.bin Copy-Item $RomDiskFile ${OutDir}\UNA_WBW_ROM${ROMSize}.bin Concat '..\UBIOS\UNA-BIOS.BIN','osimg.bin','..\UBIOS\FSFAT.BIN',$RomDiskFile $RomFile - Remove-Item $RomDiskFile } else { Concat 'hbios_rom.bin','osimg.bin','osimg1.bin','osimg.bin',$RomDiskFile $RomFile + Concat 'hbios_rom.bin','osimg.bin','osimg1.bin','osimg.bin' $UpdFile Concat 'hbios_app.bin','osimg_small.bin' $ComFile # Concat 'hbios_img.bin','osimg_small.bin' $ImgFile - Remove-Item $RomDiskFile - Set-Content $UpdFile -Value $null - foreach ($InputFile in $SystemFileList) - { - Copy-Item $InputFile $RomDiskFile - Add-Content $UpdFile -Value ([System.IO.File]::ReadAllBytes($RomDiskFile)) -Encoding byte - Remove-Item $RomDiskFile - } } # Remove the temporary working ROM disk file -#Remove-Item $RomDiskFile +Remove-Item $RomDiskFile diff --git a/Source/HBIOS/updater.asm b/Source/HBIOS/updater.asm index 6d19c394..b402d0ba 100644 --- a/Source/HBIOS/updater.asm +++ b/Source/HBIOS/updater.asm @@ -58,7 +58,7 @@ ; IN THE CASE OF A FAILED UPDATE THIS OPTION COULD BE USED TO ATTEMPT TO ; LOAD CP/M AND PERFORM THE NORMAL XMODEM / FLASH PROCESS TO RECOVER. ; -; OPTION (H) - DEBUG OPTION - SWITCH ON CPU CLCOK DIVIDER ON SBC-V2-004+ +; OPTION (H) - DEBUG OPTION - SWITCH ON CPU CLOCK DIVIDER ON SBC-V2-004+ ; OPTION (T) - DEBUG OPTION - TEST TIMER FOR 20 SECONDS. * = START AND FINISH ; ; @@ -100,6 +100,8 @@ HBX_BNKSEL .EQU $FE2B HBX_START .EQU $FE00 ; +BIOSBID .EQU $00 ; BIOS BANK ID NUMBER +; #DEFINE HB_DI DI #DEFINE HB_EI EI ; @@ -130,6 +132,23 @@ BSPC: .EQU 'H'-40h ; ^H = Backspace LD DE,HBX_START-MD_CSIZ ; ROUTINES TO LD BC,MD_CSIZ ; HIGH MEMORY LDIR +; + LD BC,$F8F2 ; LOOKUP + RST 08 ; CURRENT + LD B,$FA ; CONSOLE + LD HL,$112 ; DEVICE + RST 08 ; TO USE AS + LD A,E ; DEFAULT + LD (CONDEV),A +; + LD BC,$F8F2 ; LOOKUP + RST 08 ; CURRENT + LD B,$FA ; SERIAL + LD HL,$110 ; DEVICE + RST 08 ; TO USE AS + LD A,E ; DEFAULT + LD (SERDEV),A +; RESTART: LD DE,$0000 ; SET UP LD HL,$0000 ; START @@ -159,7 +178,7 @@ MENULP: CALL MENU ; DISPLAY MENU ; CP 'X' ; CHECK FOR JP Z,FAILUX ; USER EXIT - +; CP 'R' ; CHECK FOR JP Z,REBOOT ; COLD REBOOT REQUEST ; @@ -169,6 +188,9 @@ MENULP: CALL MENU ; DISPLAY MENU CP 'S' ; CHECK FOR CALL Z,OPTIONS ; SERIAL CHANGE ; +; CP 'D' ; DUPLICATE +; CALL Z,OPTIOND ; FLASH +; #IF (XFUDBG) CP 'T' ; TEST TIMEOUT CALL Z,OPTIONT ; LOOP @@ -285,7 +307,7 @@ csloop: add A,(HL) ; JUST ADD UP THE BYTES LD HL,pktNo ; UPDATE THE PACKET COUNTERS INC (HL) INC HL - dec (HL) + DEC (HL) ; LD C,ACK ; TELL UPLOADER THAT WE'RE HAPPY WITH WITH CALL SEROUT ; PACKET AND GO BACK AND FETCH SOME MORE @@ -355,21 +377,19 @@ WSEC: PUSH HL ; WRITE A OR A ; IF TRANSFERRING JR Z,WSEC1 ; OVER CONSOLE ; - LD HL,MD_SECT ; IF SECTOR IS 0 - LD A,(HL) ; THEN DISPLAY - OR A ; BANK # PREFIX - JR NZ,NXTS1 + LD A,(MD_SECT) ; IF SECTOR IS 0 + OR A ; THEN DISPLAY + JR NZ,NXTS1 ; BANK # PREFIX LD HL,msgBank CALL PRTSTR0 - LD HL,MD_BANK - LD A,(HL) + LD A,(MD_BANK) + CALL PRTHEXB ; NXTS1: LD C,' ' ; DISPLAY CALL CONOUT ; CURRENT LD C,'S' ; SECTOR CALL CONOUT - LD HL,MD_SECT - LD A,(HL) + LD A,(MD_SECT) RRCA RRCA RRCA @@ -511,6 +531,9 @@ MENU1: CALL PRTSTR0 CALL PRTSTR0 ; MENU OPTIONS RET ; +OPTIOND: + RET +; OPTIONV:LD A,(WRTVER) ; TOGGLE CPL ; VERIFY LD (WRTVER),A ; FLAG @@ -1031,6 +1054,7 @@ msgWriteV: .DB CR,LF,"(V) Toggle Write Verify : ",0 msgBegin: .DB CR,LF,"(R) Reboot" .DB CR,LF,"(U) Begin Update" .DB CR,LF,"(X) Exit to Rom Loader" +; .DB CR,LF,"(D) Duplicate ROM #1 to #2" #IF (XFUDBG) .DB CR,LF,"(H) Select half speed" .DB CR,LF,"(T) Test 20s timeout" diff --git a/Tools/unix/lzsa/BlockFormat_LZSA2.md b/Tools/unix/lzsa/BlockFormat_LZSA2.md index 15ec3781..2a9b6c3f 100644 --- a/Tools/unix/lzsa/BlockFormat_LZSA2.md +++ b/Tools/unix/lzsa/BlockFormat_LZSA2.md @@ -44,7 +44,7 @@ The match offset is decoded according to the XYZ bits in the token XYZ 00Z 5-bit offset: read a nibble for offset bits 1-4 and use the inverted bit Z of the token as bit 0 of the offset. set bits 5-15 of the offset to 1. 01Z 9-bit offset: read a byte for offset bits 0-7 and use the inverted bit Z for bit 8 of the offset. set bits 9-15 of the offset to 1. - 10Z 13-bit offset: read a nibble for offset bits 9-12 and use the inverted bit Z for bit 8 of the offset, then read a byte for offset bits 0-7. set bits 13-15 of the offset to 1. + 10Z 13-bit offset: read a nibble for offset bits 9-12 and use the inverted bit Z for bit 8 of the offset, then read a byte for offset bits 0-7. set bits 13-15 of the offset to 1. substract 512 from the offset to get the final value. 110 16-bit offset: read a byte for offset bits 8-15, then another byte for offset bits 0-7. 111 repeat offset: reuse the offset value of the previous match command. diff --git a/Tools/unix/lzsa/README.md b/Tools/unix/lzsa/README.md index 6430ee5c..4646c8fb 100644 --- a/Tools/unix/lzsa/README.md +++ b/Tools/unix/lzsa/README.md @@ -7,6 +7,16 @@ Check out [The Hollow](https://www.pouet.net/prod.php?which=81909) by Darklite a [Gabba](https://www.pouet.net/prod.php?which=83539) by Stardust ranked 2nd in the ZX Spectrum demo compo at CAFe demoparty 2019 and also used LZSA on Z80. +[Myst Demake](http://www.deater.net/weave/vmwprod/mist/) for the Apple II by Vince Weaver, uses LZSA on 6502. + +The 8 bit guy's [Commander X16 ROM](https://github.com/commanderx16/x16-rom) uses LZSA on 6502 as well. + +[RomWBW](https://github.com/wwarthen/RomWBW) uses LZSA on Z80 for a variety of hobbyist computers. + +The popular [rasm](https://github.com/EdouardBERGE/rasm) assembler for Z80 features LZSA-compressed data sections. + +The [desolate](https://github.com/nzeemin/spectrum-desolate) game port to the ZX Spectrum uses LZSA compression on Z80. + The LZSA compression tool uses an aggressive optimal packing strategy to try to find the sequence of commands that gives the smallest packed file that decompresses to the original while maintaining the maximum possible decompression speed. The compression formats give the user choices that range from decompressing faster than LZ4 on 8-bit systems with better compression, to compressing as well as ZX7 with much better decompression speed. LZSA1 is designed to replace LZ4 and LZSA2 to replace ZX7, in 8-bit scenarios. @@ -55,7 +65,7 @@ Inspirations: * [LZ5/Lizard](https://github.com/inikep/lizard) by Przemyslaw Skibinski and Yann Collet. * The suffix array intervals in [Wimlib](https://wimlib.net/git/?p=wimlib;a=tree) by Eric Biggers. * ZX7 by Einar Saukas -* [apc](https://github.com/svendahl/cap) by Sven-Åke Dahl +* [apc](https://github.com/svendahl/cap) by Sven-Ã…ke Dahl * [Charles Bloom](http://cbloomrants.blogspot.com/)'s compression blog License: @@ -65,14 +75,19 @@ License: 8-bit assembly code: -* Z80 decompressors (size- and speed-optimized) written by [introspec](https://github.com/specke) +* Z80 decompressors (size- and speed-optimized) written by [introspec](https://github.com/specke) with optimizations by [uniabis](https://github.com/uniabis) * 6502 and 8088 size-optimized improvements by [Peter Ferrie](https://github.com/peterferrie) +* 6502 speed-optimized decompressor by [John Brandwood](https://github.com/jbrandwood) * 8088 speed-optimized decompressor by [Jim Leonard](https://github.com/mobygamer) +* 6809 decompressors (Tandy CoCo, Thomson MO/TO, Dragon 32/64..) optimized by [Doug Masten](https://github.com/dougmasten) +* Hitachi 6309 decompressors (Tandy CoCo 3) also contributed by [Doug Masten](https://github.com/dougmasten) External links: * [i8080 decompressors](https://gitlab.com/ivagor/lzsa8080/tree/master) by Ivan Gorodetsky * [PDP-11 decompressors](https://gitlab.com/ivagor/lzsa8080/tree/master/PDP11) also by Ivan Gorodetsky +* [MC68000 decompressors](https://github.com/tattlemuss/lz4-m68k/blob/master/src/lzsa.s) by Steven Tattersall +* [Gameboy decompressors](https://github.com/meltycode) by Meltycode, based on the Z80 code by introspec * LZSA's page on [Pouet](https://www.pouet.net/prod.php?which=81573) # Compressed format diff --git a/Tools/unix/lzsa/StreamFormat.md b/Tools/unix/lzsa/StreamFormat.md index 3f37f868..8eebb7e3 100644 --- a/Tools/unix/lzsa/StreamFormat.md +++ b/Tools/unix/lzsa/StreamFormat.md @@ -17,7 +17,7 @@ The 3-bytes LZSA header contains a signature and a traits byte: Trait bits: -* V: 3 bit code that indicates which block data encoding is used. 0 is LZSA1 and 2 is LZSA2. +* V: 3 bit code that indicates which block data encoding is used. 0 is LZSA1 and 1 is LZSA2. * Z: these bits in the traits are set to 0 for LZSA1 and LZSA2. # Frame format diff --git a/Tools/unix/lzsa/asm/6502/decompress_fast_v1.asm b/Tools/unix/lzsa/asm/6502/decompress_fast_v1.asm index b36cc176..7aa651d4 100644 --- a/Tools/unix/lzsa/asm/6502/decompress_fast_v1.asm +++ b/Tools/unix/lzsa/asm/6502/decompress_fast_v1.asm @@ -68,7 +68,7 @@ LARGE_VARLEN_LITERALS ; handle 16 bits literals count JSR GETLARGESRC ; grab low 8 bits in X, high 8 bits in A TAY ; put high 8 bits in Y TXA - JMP PREPARE_COPY_LARGE_LITERALS + BCS PREPARE_COPY_LARGE_LITERALS ; (*like JMP PREPARE_COPY_LITERALS_DIRECT but shorter) PREPARE_COPY_LITERALS TAX diff --git a/Tools/unix/lzsa/asm/6502/decompress_fast_v2.asm b/Tools/unix/lzsa/asm/6502/decompress_fast_v2.asm index 681d42d9..1e49a75d 100644 --- a/Tools/unix/lzsa/asm/6502/decompress_fast_v2.asm +++ b/Tools/unix/lzsa/asm/6502/decompress_fast_v2.asm @@ -114,11 +114,9 @@ NO_LITERALS BNE GOT_OFFSET_LO ; go store low byte of match offset and prepare match OFFSET_9_BIT ; 01Z: 9 bit offset - ;;ASL ; shift Z (offset bit 8) in place - ROL - ROL - AND #$01 - EOR #$FF ; set offset bits 15-9 to 1 + ROL ; carry: Z bit; A: xxxxxxx1 (carry known set from BCS OFFSET_9_BIT) + ADC #$00 ; if Z bit is set, add 1 to A (bit 0 of A is now 0), otherwise bit 0 is 1 + ORA #$FE ; set offset bits 15-9 to 1. reversed Z is already in bit 0 BNE GOT_OFFSET_HI ; go store high byte, read low byte of match offset and prepare match ; (*same as JMP GOT_OFFSET_HI but shorter) @@ -134,7 +132,6 @@ REPMATCH_OR_LARGE_OFFSET ; (*same as JMP GOT_OFFSET_HI but shorter) REPMATCH_OR_16_BIT ; rep-match or 16 bit offset - ;;ASL ; XYZ=111? BMI REP_MATCH ; reuse previous offset if so (rep-match) ; 110: handle 16 bit offset @@ -259,7 +256,6 @@ GETCOMBINEDBITS JSR GETNIBBLE ; get nibble into bits 0-3 (for offset bits 1-4) PLP ; merge Z bit as the carry bit (for offset bit 0) -COMBINEDBITZ ROL ; nibble -> bits 1-4; carry(!Z bit) -> bit 0 ; carry cleared DECOMPRESSION_DONE RTS diff --git a/Tools/unix/lzsa/asm/6502/decompress_faster_v1.asm b/Tools/unix/lzsa/asm/6502/decompress_faster_v1.asm index 93619070..1f65a40c 100644 --- a/Tools/unix/lzsa/asm/6502/decompress_faster_v1.asm +++ b/Tools/unix/lzsa/asm/6502/decompress_faster_v1.asm @@ -29,15 +29,6 @@ ; Decompression Options & Macros ; - ; - ; Save 6 bytes of code and 21 cycles by swapping the order - ; of bytes in the 16-bit length encoding? - ; - ; N.B. Setting this breaks compatibility with LZSA v1.2 - ; - -LZSA_SWAP_LEN16 = 0 - ; ; Choose size over space (within sane limits)? ; @@ -80,14 +71,6 @@ LZSA_SHORT_LZ = 1 LZSA_SHORT_LZ = 0 } - ; - ; Assume that we're decompessing from a large multi-bank - ; compressed data file, and that the next bank may need to - ; paged in when a page-boundary is crossed. - ; - -LZSA_FROM_BANK = 0 - ; ; Macro to increment the source pointer to the next page. ; @@ -95,14 +78,8 @@ LZSA_FROM_BANK = 0 ; has been crossed, and a new bank should be paged in. ; - !if LZSA_FROM_BANK { - !macro LZSA_INC_PAGE { - jsr lzsa1_next_page - } - } else { - !macro LZSA_INC_PAGE { + !macro LZSA_INC_PAGE { inc bits 1-4; carry(!Z bit) -> bit 0 ; carry cleared DECOMPRESSION_DONE RTS diff --git a/Tools/unix/lzsa/asm/65816/decompress_v1.asm b/Tools/unix/lzsa/asm/65816/decompress_v1.asm new file mode 100644 index 00000000..4754e553 --- /dev/null +++ b/Tools/unix/lzsa/asm/65816/decompress_v1.asm @@ -0,0 +1,281 @@ +; ----------------------------------------------------------------------------- +; Decompress raw LZSA1 block. Create one with lzsa -r +; +; in: +; * LZSA_SRC_LO/LZSA_SRC_HI/LZSA_SRC_BANK contain the compressed raw block address +; * LZSA_DST_LO/LZSA_DST_HI/LZSA_DST_BANK contain the destination buffer address +; +; out: +; * LZSA_DST_LO/LZSA_DST_HI/LZSA_DST_BANK contain the last decompressed byte address, +1 +; +; ----------------------------------------------------------------------------- +; Backward decompression is also supported, use lzsa -r -b +; To use it, also define BACKWARD_DECOMPRESS=1 before including this code! +; +; in: +; * LZSA_SRC_LO/LZSA_SRC_HI/LZSA_SRC_BANK must contain the address of the last byte of compressed data +; * LZSA_DST_LO/LZSA_DST_HI/LZSA_DST_BANK must contain the address of the last byte of the destination buffer +; +; out: +; * LZSA_DST_LO/LZSA_DST_HI/BANK contain the last decompressed byte address, -1 +; +; ----------------------------------------------------------------------------- +; +; Copyright (C) 2019-2020 Emmanuel Marty, Peter Ferrie +; +; This software is provided 'as-is', without any express or implied +; warranty. In no event will the authors be held liable for any damages +; arising from the use of this software. +; +; Permission is granted to anyone to use this software for any purpose, +; including commercial applications, and to alter it and redistribute it +; freely, subject to the following restrictions: +; +; 1. The origin of this software must not be misrepresented; you must not +; claim that you wrote the original software. If you use this software +; in a product, an acknowledgment in the product documentation would be +; appreciated but is not required. +; 2. Altered source versions must be plainly marked as such, and must not be +; misrepresented as being the original software. +; 3. This notice may not be removed or altered from any source distribution. +; ----------------------------------------------------------------------------- + +!cpu 65816 +DECOMPRESS_LZSA1 + SEP #$30 +!as +!rs + LDY #$00 + +DECODE_TOKEN + JSR GETSRC ; read token byte: O|LLL|MMMM + PHA ; preserve token on stack + + AND #$70 ; isolate literals count + BEQ NO_LITERALS ; skip if no literals to copy + CMP #$70 ; LITERALS_RUN_LEN? + BNE PREPARE_COPY_LITERALS ; if not, count is directly embedded in token + + JSR GETSRC ; get extra byte of variable literals count + ; the carry is always set by the CMP above + ; GETSRC doesn't change it + SBC #$F9 ; (LITERALS_RUN_LEN) + BCC PREPARE_COPY_LITERALS_DIRECT + BEQ LARGE_VARLEN_LITERALS ; if adding up to zero, go grab 16-bit count + + JSR GETSRC ; get single extended byte of variable literals count + INY ; add 256 to literals count + BCS PREPARE_COPY_LITERALS_DIRECT ; (*like JMP PREPARE_COPY_LITERALS_DIRECT but shorter) + +LARGE_VARLEN_LITERALS ; handle 16 bits literals count + ; literals count = directly these 16 bits + JSR GETLARGESRC ; grab low 8 bits in X, high 8 bits in A + TAY ; put high 8 bits in Y + TXA + BCS PREPARE_COPY_LARGE_LITERALS ; (*like JMP PREPARE_COPY_LITERALS_DIRECT but shorter) + +PREPARE_COPY_LITERALS + TAX + LDA SHIFT_TABLE-1,X ; shift literals length into place + ; -1 because position 00 is reserved +PREPARE_COPY_LITERALS_DIRECT + TAX + +PREPARE_COPY_LARGE_LITERALS + BEQ COPY_LITERALS + INY + +COPY_LITERALS + JSR GETPUT ; copy one byte of literals + DEX + BNE COPY_LITERALS + DEY + BNE COPY_LITERALS + +NO_LITERALS + PLA ; retrieve token from stack + PHA ; preserve token again + BMI GET_LONG_OFFSET ; $80: 16 bit offset + + JSR GETSRC ; get 8 bit offset from stream in A + TAX ; save for later + LDA #$FF ; high 8 bits + BNE GOT_OFFSET ; go prepare match + ; (*like JMP GOT_OFFSET but shorter) + +SHORT_VARLEN_MATCHLEN + JSR GETSRC ; get single extended byte of variable match len + INY ; add 256 to match length + +PREPARE_COPY_MATCH + TAX +PREPARE_COPY_MATCH_Y + TXA + BEQ COPY_MATCH_LOOP + INY + +COPY_MATCH_LOOP + LDA $AAAAAA ; get one byte of backreference + JSR PUTDST ; copy to destination + + REP #$20 +!ifdef BACKWARD_DECOMPRESS { + + ; Backward decompression -- put backreference bytes backward + + DEC COPY_MATCH_LOOP+1 + +} else { + + ; Forward decompression -- put backreference bytes forward + + INC COPY_MATCH_LOOP+1 + +} + SEP #$20 + + DEX + BNE COPY_MATCH_LOOP + DEY + BNE COPY_MATCH_LOOP + BEQ DECODE_TOKEN ; (*like JMP DECODE_TOKEN but shorter) + +GET_LONG_OFFSET ; handle 16 bit offset: + JSR GETLARGESRC ; grab low 8 bits in X, high 8 bits in A + +GOT_OFFSET + +!ifdef BACKWARD_DECOMPRESS { + + ; Backward decompression - substract match offset + + STA OFFSHI ; store high 8 bits of offset + STX OFFSLO + + SEC ; substract dest - match offset + REP #$20 +!al + LDA PUTDST+1 +OFFSLO = *+1 +OFFSHI = *+2 + SBC #$AAAA ; 16 bits + STA COPY_MATCH_LOOP+1 ; store back reference address + SEP #$20 +!as + SEC + +} else { + + ; Forward decompression - add match offset + + STA OFFSHI ; store high 8 bits of offset + TXA + + CLC ; add dest + match offset + ADC PUTDST+1 ; low 8 bits + STA COPY_MATCH_LOOP+1 ; store back reference address +OFFSHI = *+1 + LDA #$AA ; high 8 bits + + ADC PUTDST+2 + STA COPY_MATCH_LOOP+2 ; store high 8 bits of address + +} + + LDA PUTDST+3 ; bank + STA COPY_MATCH_LOOP+3 ; store back reference address + + PLA ; retrieve token from stack again + AND #$0F ; isolate match len (MMMM) + ADC #$02 ; plus carry which is always set by the high ADC + CMP #$12 ; MATCH_RUN_LEN? + BCC PREPARE_COPY_MATCH ; if not, count is directly embedded in token + + JSR GETSRC ; get extra byte of variable match length + ; the carry is always set by the CMP above + ; GETSRC doesn't change it + SBC #$EE ; add MATCH_RUN_LEN and MIN_MATCH_SIZE to match length + BCC PREPARE_COPY_MATCH + BNE SHORT_VARLEN_MATCHLEN + + ; Handle 16 bits match length + JSR GETLARGESRC ; grab low 8 bits in X, high 8 bits in A + TAY ; put high 8 bits in Y + ; large match length with zero high byte? + BNE PREPARE_COPY_MATCH_Y ; if not, continue + +DECOMPRESSION_DONE + RTS + +SHIFT_TABLE + !BYTE $00,$00,$00,$00,$00,$00,$00,$00,$00,$00,$00,$00,$00,$00,$00 + !BYTE $01,$01,$01,$01,$01,$01,$01,$01,$01,$01,$01,$01,$01,$01,$01,$01 + !BYTE $02,$02,$02,$02,$02,$02,$02,$02,$02,$02,$02,$02,$02,$02,$02,$02 + !BYTE $03,$03,$03,$03,$03,$03,$03,$03,$03,$03,$03,$03,$03,$03,$03,$03 + !BYTE $04,$04,$04,$04,$04,$04,$04,$04,$04,$04,$04,$04,$04,$04,$04,$04 + !BYTE $05,$05,$05,$05,$05,$05,$05,$05,$05,$05,$05,$05,$05,$05,$05,$05 + !BYTE $06,$06,$06,$06,$06,$06,$06,$06,$06,$06,$06,$06,$06,$06,$06,$06 + !BYTE $07,$07,$07,$07,$07,$07,$07,$07,$07,$07,$07,$07,$07,$07,$07,$07 + +!ifdef BACKWARD_DECOMPRESS { + + ; Backward decompression -- get and put bytes backward + +GETPUT + JSR GETSRC +PUTDST +LZSA_DST_LO = *+1 +LZSA_DST_HI = *+2 +LZSA_DST_BANK = *+3 + STA $AAAAAA + REP #$20 + DEC PUTDST+1 + SEP #$20 + RTS + +GETLARGESRC + JSR GETSRC ; grab low 8 bits + TAX ; move to X + ; fall through grab high 8 bits + +GETSRC +LZSA_SRC_LO = *+1 +LZSA_SRC_HI = *+2 +LZSA_SRC_BANK = *+3 + LDA $AAAAAA + REP #$20 + DEC GETSRC+1 + SEP #$20 + RTS + +} else { + + ; Forward decompression -- get and put bytes forward + +GETPUT + JSR GETSRC +PUTDST +LZSA_DST_LO = *+1 +LZSA_DST_HI = *+2 +LZSA_DST_BANK = *+3 + STA $AAAAAA + REP #$20 + INC PUTDST+1 + SEP #$20 + RTS + +GETLARGESRC + JSR GETSRC ; grab low 8 bits + TAX ; move to X + ; fall through grab high 8 bits + +GETSRC +LZSA_SRC_LO = *+1 +LZSA_SRC_HI = *+2 +LZSA_SRC_BANK = *+3 + LDA $AAAAAA + REP #$20 + INC GETSRC+1 + SEP #$20 + RTS +} diff --git a/Tools/unix/lzsa/asm/65816/decompress_v2.asm b/Tools/unix/lzsa/asm/65816/decompress_v2.asm new file mode 100644 index 00000000..08c2ac8f --- /dev/null +++ b/Tools/unix/lzsa/asm/65816/decompress_v2.asm @@ -0,0 +1,338 @@ +; ----------------------------------------------------------------------------- +; Decompress raw LZSA2 block. +; Create one with lzsa -r -f2 +; +; in: +; * LZSA_SRC_LO/LZSA_SRC_HI/LZSA_SRC_BANK contain the compressed raw block address +; * LZSA_DST_LO/LZSA_DST_HI/LZSA_DST_BANK contain the destination buffer address +; +; out: +; * LZSA_DST_LO/LZSA_DST_HI/LZSA_DST_BANK contain the last decompressed byte address, +1 +; +; ----------------------------------------------------------------------------- +; Backward decompression is also supported, use lzsa -r -b -f2 +; To use it, also define BACKWARD_DECOMPRESS=1 before including this code! +; +; in: +; * LZSA_SRC_LO/LZSA_SRC_HI/LZSA_SRC_BANK must contain the address of the last byte of compressed data +; * LZSA_DST_LO/LZSA_DST_HI/LZSA_DST_BANK must contain the address of the last byte of the destination buffer +; +; out: +; * LZSA_DST_LO/LZSA_DST_HI/BANK contain the last decompressed byte address, -1 +; +; ----------------------------------------------------------------------------- +; +; Copyright (C) 2019-2020 Emmanuel Marty, Peter Ferrie +; +; This software is provided 'as-is', without any express or implied +; warranty. In no event will the authors be held liable for any damages +; arising from the use of this software. +; +; Permission is granted to anyone to use this software for any purpose, +; including commercial applications, and to alter it and redistribute it +; freely, subject to the following restrictions: +; +; 1. The origin of this software must not be misrepresented; you must not +; claim that you wrote the original software. If you use this software +; in a product, an acknowledgment in the product documentation would be +; appreciated but is not required. +; 2. Altered source versions must be plainly marked as such, and must not be +; misrepresented as being the original software. +; 3. This notice may not be removed or altered from any source distribution. +; ----------------------------------------------------------------------------- + +!cpu 65816 +NIBCOUNT = $FC ; zero-page location for temp offset + +DECOMPRESS_LZSA2 + SEP #$30 +!as +!rs + LDY #$00 + STY NIBCOUNT + +DECODE_TOKEN + JSR GETSRC ; read token byte: XYZ|LL|MMM + PHA ; preserve token on stack + + AND #$18 ; isolate literals count (LL) + BEQ NO_LITERALS ; skip if no literals to copy + CMP #$18 ; LITERALS_RUN_LEN_V2? + BCC PREPARE_COPY_LITERALS ; if less, count is directly embedded in token + + JSR GETNIBBLE ; get extra literals length nibble + ; add nibble to len from token + ADC #$02 ; (LITERALS_RUN_LEN_V2) minus carry + CMP #$12 ; LITERALS_RUN_LEN_V2 + 15 ? + BCC PREPARE_COPY_LITERALS_DIRECT ; if less, literals count is complete + + JSR GETSRC ; get extra byte of variable literals count + ; the carry is always set by the CMP above + ; GETSRC doesn't change it + SBC #$EE ; overflow? + BRA PREPARE_COPY_LITERALS_DIRECT + +PREPARE_COPY_LITERALS_LARGE + ; handle 16 bits literals count + ; literals count = directly these 16 bits + JSR GETLARGESRC ; grab low 8 bits in X, high 8 bits in A + TAY ; put high 8 bits in Y + BCS PREPARE_COPY_LITERALS_HIGH ; (*same as JMP PREPARE_COPY_LITERALS_HIGH but shorter) + +PREPARE_COPY_LITERALS + LSR ; shift literals count into place + LSR + LSR + +PREPARE_COPY_LITERALS_DIRECT + TAX + BCS PREPARE_COPY_LITERALS_LARGE ; if so, literals count is large + +PREPARE_COPY_LITERALS_HIGH + TXA + BEQ COPY_LITERALS + INY + +COPY_LITERALS + JSR GETPUT ; copy one byte of literals + DEX + BNE COPY_LITERALS + DEY + BNE COPY_LITERALS + +NO_LITERALS + PLA ; retrieve token from stack + PHA ; preserve token again + ASL + BCS REPMATCH_OR_LARGE_OFFSET ; 1YZ: rep-match or 13/16 bit offset + + ASL ; 0YZ: 5 or 9 bit offset + BCS OFFSET_9_BIT + + ; 00Z: 5 bit offset + + LDX #$FF ; set offset bits 15-8 to 1 + + JSR GETCOMBINEDBITS ; rotate Z bit into bit 0, read nibble for bits 4-1 + ORA #$E0 ; set bits 7-5 to 1 + BNE GOT_OFFSET_LO ; go store low byte of match offset and prepare match + +OFFSET_9_BIT ; 01Z: 9 bit offset + ;;ASL ; shift Z (offset bit 8) in place + ROL + ROL + AND #$01 + EOR #$FF ; set offset bits 15-9 to 1 + BNE GOT_OFFSET_HI ; go store high byte, read low byte of match offset and prepare match + ; (*same as JMP GOT_OFFSET_HI but shorter) + +REPMATCH_OR_LARGE_OFFSET + ASL ; 13 bit offset? + BCS REPMATCH_OR_16_BIT ; handle rep-match or 16-bit offset if not + + ; 10Z: 13 bit offset + + JSR GETCOMBINEDBITS ; rotate Z bit into bit 8, read nibble for bits 12-9 + ADC #$DE ; set bits 15-13 to 1 and substract 2 (to substract 512) + BNE GOT_OFFSET_HI ; go store high byte, read low byte of match offset and prepare match + ; (*same as JMP GOT_OFFSET_HI but shorter) + +REPMATCH_OR_16_BIT ; rep-match or 16 bit offset + ;;ASL ; XYZ=111? + BMI REP_MATCH ; reuse previous offset if so (rep-match) + + ; 110: handle 16 bit offset + JSR GETSRC ; grab high 8 bits +GOT_OFFSET_HI + TAX + JSR GETSRC ; grab low 8 bits +GOT_OFFSET_LO + STA OFFSLO ; store low byte of match offset + STX OFFSHI ; store high byte of match offset + +REP_MATCH +!ifdef BACKWARD_DECOMPRESS { + + ; Backward decompression - substract match offset + + SEC ; add dest + match offset + REP #$20 +!al + LDA PUTDST+1 ; 16 bits +OFFSLO = *+1 +OFFSHI = *+2 + SBC #$AAAA + STA COPY_MATCH_LOOP+1 ; store back reference address + SEP #$20 +!as + SEC + +} else { + + ; Forward decompression - add match offset + + CLC ; add dest + match offset + REP #$20 +!al + LDA PUTDST+1 ; 16 bits +OFFSLO = *+1 +OFFSHI = *+2 + ADC #$AAAA + STA COPY_MATCH_LOOP+1 ; store back reference address + SEP #$20 +!as +} + + LDA PUTDST+3 ; bank + STA COPY_MATCH_LOOP+3 ; store back reference address + + PLA ; retrieve token from stack again + AND #$07 ; isolate match len (MMM) + ADC #$01 ; add MIN_MATCH_SIZE_V2 and carry + CMP #$09 ; MIN_MATCH_SIZE_V2 + MATCH_RUN_LEN_V2? + BCC PREPARE_COPY_MATCH ; if less, length is directly embedded in token + + JSR GETNIBBLE ; get extra match length nibble + ; add nibble to len from token + ADC #$08 ; (MIN_MATCH_SIZE_V2 + MATCH_RUN_LEN_V2) minus carry + CMP #$18 ; MIN_MATCH_SIZE_V2 + MATCH_RUN_LEN_V2 + 15? + BCC PREPARE_COPY_MATCH ; if less, match length is complete + + JSR GETSRC ; get extra byte of variable match length + ; the carry is always set by the CMP above + ; GETSRC doesn't change it + SBC #$E8 ; overflow? + +PREPARE_COPY_MATCH + TAX + BCC PREPARE_COPY_MATCH_Y ; if not, the match length is complete + BEQ DECOMPRESSION_DONE ; if EOD code, bail + + ; Handle 16 bits match length + JSR GETLARGESRC ; grab low 8 bits in X, high 8 bits in A + TAY ; put high 8 bits in Y + +PREPARE_COPY_MATCH_Y + TXA + BEQ COPY_MATCH_LOOP + INY + +COPY_MATCH_LOOP + LDA $AAAAAA ; get one byte of backreference + JSR PUTDST ; copy to destination + + REP #$20 +!ifdef BACKWARD_DECOMPRESS { + + ; Backward decompression -- put backreference bytes backward + + DEC COPY_MATCH_LOOP+1 + +} else { + + ; Forward decompression -- put backreference bytes forward + + INC COPY_MATCH_LOOP+1 + +} + SEP #$20 + + DEX + BNE COPY_MATCH_LOOP + DEY + BNE COPY_MATCH_LOOP + JMP DECODE_TOKEN + +GETCOMBINEDBITS + EOR #$80 + ASL + PHP + + JSR GETNIBBLE ; get nibble into bits 0-3 (for offset bits 1-4) + PLP ; merge Z bit as the carry bit (for offset bit 0) +COMBINEDBITZ + ROL ; nibble -> bits 1-4; carry(!Z bit) -> bit 0 ; carry cleared +DECOMPRESSION_DONE + RTS + +GETNIBBLE +NIBBLES = *+1 + LDA #$AA + LSR NIBCOUNT + BCC NEED_NIBBLES + AND #$0F ; isolate low 4 bits of nibble + RTS + +NEED_NIBBLES + INC NIBCOUNT + JSR GETSRC ; get 2 nibbles + STA NIBBLES + LSR + LSR + LSR + LSR + SEC + RTS + +!ifdef BACKWARD_DECOMPRESS { + + ; Backward decompression -- get and put bytes backward + +GETPUT + JSR GETSRC +PUTDST +LZSA_DST_LO = *+1 +LZSA_DST_HI = *+2 +LZSA_DST_BANK = *+3 + STA $AAAAAA + REP #$20 + DEC PUTDST+1 + SEP #$20 + RTS + +GETLARGESRC + JSR GETSRC ; grab low 8 bits + TAX ; move to X + ; fall through grab high 8 bits + +GETSRC +LZSA_SRC_LO = *+1 +LZSA_SRC_HI = *+2 +LZSA_SRC_BANK = *+3 + LDA $AAAAAA + REP #$20 + DEC GETSRC+1 + SEP #$20 + RTS + +} else { + + ; Forward decompression -- get and put bytes forward + +GETPUT + JSR GETSRC +PUTDST +LZSA_DST_LO = *+1 +LZSA_DST_HI = *+2 +LZSA_DST_BANK = *+3 + STA $AAAAAA + REP #$20 + INC PUTDST+1 + SEP #$20 + RTS + +GETLARGESRC + JSR GETSRC ; grab low 8 bits + TAX ; move to X + ; fall through grab high 8 bits + +GETSRC +LZSA_SRC_LO = *+1 +LZSA_SRC_HI = *+2 +LZSA_SRC_BANK = *+3 + LDA $AAAAAA + REP #$20 + INC GETSRC+1 + SEP #$20 + RTS +} diff --git a/Tools/unix/lzsa/asm/6809/unlzsa1-6309.s b/Tools/unix/lzsa/asm/6809/unlzsa1-6309.s new file mode 100644 index 00000000..5866e8db --- /dev/null +++ b/Tools/unix/lzsa/asm/6809/unlzsa1-6309.s @@ -0,0 +1,90 @@ +; unlzsa1-6309.s - Hitachi 6309 decompression routine for raw LZSA1 - 92 bytes +; compress with lzsa -f1 -r +; +; in: x = start of compressed data +; y = start of decompression buffer +; out: y = end of decompression buffer + 1 +; +; Copyright (C) 2020 Emmanuel Marty, Doug Masten +; +; This software is provided 'as-is', without any express or implied +; warranty. In no event will the authors be held liable for any damages +; arising from the use of this software. +; +; Permission is granted to anyone to use this software for any purpose, +; including commercial applications, and to alter it and redistribute it +; freely, subject to the following restrictions: +; +; 1. The origin of this software must not be misrepresented; you must not +; claim that you wrote the original software. If you use this software +; in a product, an acknowledgment in the product documentation would be +; appreciated but is not required. +; 2. Altered source versions must be plainly marked as such, and must not be +; misrepresented as being the original software. +; 3. This notice may not be removed or altered from any source distribution. + +decompress_lzsa1 equ lz1token + +lz1bigof lda ,x+ ; O set: load MSB 16-bit (negative, signed) offest +lz1gotof leau d,y ; put backreference start address in U (dst+offset) + + ldd #$000f ; clear MSB match length and set mask for MMMM + andb ,s+ ; isolate MMMM (embedded match length) in token + addb #$03 ; add MIN_MATCH_SIZE + cmpb #$12 ; MATCH_RUN_LEN? + bne lz1gotln ; no, we have the full match length, go copy + + addb ,x+ ; add extra match length byte + MIN_MATCH_SIZE + MATCH_RUN_LEN + bcc lz1gotln ; if no overflow, we have the full length + bne lz1midln + + ldb ,x+ ; load 16-bit len in D (low part in B, high in A) + lda ,x+ ; (little endian) + bne lz1gotln ; check if we hit EOD (16-bit length = 0) + tstb + bne lz1gotln ; go copy matched bytes if not + + rts ; done, bail + +lz1midln tfr b,a ; copy high part of len into A + ldb ,x+ ; grab low 8 bits of len in B + +lz1gotln tfr d,w ; set W with match length for TFM instruction + tfm u+,y+ ; copy match bytes + +lz1token ldb ,x+ ; load next token into B: O|LLL|MMMM + pshs b ; save it + + andb #$70 ; isolate LLL (embedded literals count) in B + beq lz1nolt ; skip if no literals + cmpb #$70 ; LITERALS_RUN_LEN? + bne lz1declt ; if not, we have the complete count, go unshift + + ldb ,x+ ; load extra literals count byte + addb #$07 ; add LITERALS_RUN_LEN + bcc lz1gotla ; if no overflow, we got the complete count, copy + bne lz1midlt + + ldb ,x+ ; load low 8 bits of little-endian literals count + lda ,x+ ; load high 8 bits of literal count + bra lz1gotlt ; we now have the complete count, go copy + +lz1midlt tfr b,a ; copy high part of literals count into A + ldb ,x+ ; load low 8 bits of literals count + bra lz1gotlt ; we now have the complete count, go copy + +lz1declt lsrb ; shift literals count into place + lsrb + lsrb + lsrb + +lz1gotla clra ; clear A (high part of literals count) +lz1gotlt tfr d,w ; set W with literals count for TFM instruction + tfm x+,y+ ; copy literal bytes + +lz1nolt ldb ,x+ ; load either 8-bit or LSB 16-bit offset (negative, signed) + lda ,s ; get token again, don't pop it from the stack + bmi lz1bigof ; test O bit (small or large offset) + + lda #$ff ; set high 8 bits + bra lz1gotof diff --git a/Tools/unix/lzsa/asm/6809/unlzsa1.s b/Tools/unix/lzsa/asm/6809/unlzsa1.s new file mode 100644 index 00000000..559a3032 --- /dev/null +++ b/Tools/unix/lzsa/asm/6809/unlzsa1.s @@ -0,0 +1,102 @@ +; unlzsa1.s - 6809 decompression routine for raw LZSA1 - 110 bytes +; compress with lzsa -r +; +; in: x = start of compressed data +; y = start of decompression buffer +; out: y = end of decompression buffer + 1 +; +; Copyright (C) 2020 Emmanuel Marty +; +; This software is provided 'as-is', without any express or implied +; warranty. In no event will the authors be held liable for any damages +; arising from the use of this software. +; +; Permission is granted to anyone to use this software for any purpose, +; including commercial applications, and to alter it and redistribute it +; freely, subject to the following restrictions: +; +; 1. The origin of this software must not be misrepresented; you must not +; claim that you wrote the original software. If you use this software +; in a product, an acknowledgment in the product documentation would be +; appreciated but is not required. +; 2. Altered source versions must be plainly marked as such, and must not be +; misrepresented as being the original software. +; 3. This notice may not be removed or altered from any source distribution. + +decompress_lzsa1 equ lz1token + +lz1bigof lda ,x+ ; O set: load MSB 16-bit (negative, signed) offest +lz1gotof leau d,y ; put backreference start address in U (dst+offset) + + ldd #$000f ; clear MSB match length and set mask for MMMM + andb ,s+ ; isolate MMMM (embedded match length) in token + addb #$03 ; add MIN_MATCH_SIZE + cmpb #$12 ; MATCH_RUN_LEN? + bne lz1gotln ; no, we have the full match length, go copy + + addb ,x+ ; add extra match length byte + MIN_MATCH_SIZE + MATCH_RUN_LEN + bcc lz1gotln ; if no overflow, we have the full length + bne lz1midln + + ldb ,x+ ; load 16-bit len in D (low part in B, high in A) + lda ,x+ ; (little endian) + bne lz1gotln ; check if we hit EOD (16-bit length = 0) + tstb + bne lz1gotln ; go copy matched bytes if not + + rts ; done, bail + +lz1midln tfr b,a ; copy high part of len into A + ldb ,x+ ; grab low 8 bits of len in B + +lz1gotln pshs x ; save source compressed data pointer + tfr d,x ; copy match length to X + +lz1cpymt lda ,u+ ; copy matched byte + sta ,y+ + leax -1,x ; decrement X + bne lz1cpymt ; loop until all matched bytes are copied + + puls x ; restore source compressed data pointer + +lz1token ldb ,x+ ; load next token into B: O|LLL|MMMM + pshs b ; save it + + andb #$70 ; isolate LLL (embedded literals count) in B + beq lz1nolt ; skip if no literals + cmpb #$70 ; LITERALS_RUN_LEN? + bne lz1declt ; if not, we have the complete count, go unshift + + ldb ,x+ ; load extra literals count byte + addb #$07 ; add LITERALS_RUN_LEN + bcc lz1gotla ; if no overflow, we got the complete count, copy + bne lz1midlt + + ldb ,x+ ; load low 8 bits of little-endian literals count + lda ,x+ ; load high 8 bits of literal count + bra lz1gotlt ; we now have the complete count, go copy + +lz1midlt tfr b,a ; copy high part of literals count into A + ldb ,x+ ; load low 8 bits of literals count + bra lz1gotlt ; we now have the complete count, go copy + +lz1declt lsrb ; shift literals count into place + lsrb + lsrb + lsrb +lz1gotla clra ; clear A (high part of literals count) + +lz1gotlt leau ,x + tfr d,x ; transfer 16-bit count into X +lz1cpylt lda ,u+ ; copy literal byte + sta ,y+ + leax -1,x ; decrement X and update Z flag + bne lz1cpylt ; loop until all literal bytes are copied + leax ,u + +lz1nolt ldb ,x+ ; load either 8-bit or LSB 16-bit offset (negative, signed) + lda ,s ; get token again, don't pop it from the stack + bmi lz1bigof ; test O bit (small or large offset) + + lda #$ff ; set high 8 bits + bra lz1gotof diff --git a/Tools/unix/lzsa/asm/6809/unlzsa1b-6309.s b/Tools/unix/lzsa/asm/6809/unlzsa1b-6309.s new file mode 100644 index 00000000..60780856 --- /dev/null +++ b/Tools/unix/lzsa/asm/6809/unlzsa1b-6309.s @@ -0,0 +1,92 @@ +; unlzsa1-6309.s - H6309 backward decompressor for raw LZSA1 - 97 bytes +; compress with lzsa -f1 -r -b +; +; in: x = last byte of compressed data +; y = last byte of decompression buffer +; out: y = first byte of decompressed data +; +; Copyright (C) 2020 Emmanuel Marty, Doug Masten +; +; This software is provided 'as-is', without any express or implied +; warranty. In no event will the authors be held liable for any damages +; arising from the use of this software. +; +; Permission is granted to anyone to use this software for any purpose, +; including commercial applications, and to alter it and redistribute it +; freely, subject to the following restrictions: +; +; 1. The origin of this software must not be misrepresented; you must not +; claim that you wrote the original software. If you use this software +; in a product, an acknowledgment in the product documentation would be +; appreciated but is not required. +; 2. Altered source versions must be plainly marked as such, and must not be +; misrepresented as being the original software. +; 3. This notice may not be removed or altered from any source distribution. + +decompress_lzsa1 + leax 1,x + bra lz1token + +lz1bigof ldd ,--x ; O set: load long 16-bit (negative, signed) offest +lz1gotof negd ; reverse sign of offset in D + leau d,y ; put backreference start address in U (dst+offset) + + ldd #$000f ; clear MSB match length and set mask for MMMM + andb ,s+ ; isolate MMMM (embedded match length) in token + addb #$03 ; add MIN_MATCH_SIZE + cmpb #$12 ; MATCH_RUN_LEN? + bne lz1gotln ; no, we have the full match length, go copy + + addb ,-x ; add extra match length byte + MIN_MATCH_SIZE + MATCH_RUN_LEN + bcc lz1gotln ; if no overflow, we have the full length + bne lz1midln + + ldd ,--x ; load 16-bit len in D (low part in B, high in A) + bne lz1gotln ; check if we hit EOD (16-bit length = 0) + + leay 1,y ; adjust pointer to first byte of decompressed data + rts ; done, bail + +lz1midln tfr b,a ; copy high part of len into A + ldb ,-x ; grab low 8 bits of len in B + +lz1gotln tfr d,w ; set W with match length for TFM instruction + tfm u-,y- ; copy match bytes + +lz1token ldb ,-x ; load next token into B: O|LLL|MMMM + pshs b ; save it + + andb #$70 ; isolate LLL (embedded literals count) in B + beq lz1nolt ; skip if no literals + cmpb #$70 ; LITERALS_RUN_LEN? + bne lz1declt ; if not, we have the complete count, go unshift + + ldb ,-x ; load extra literals count byte + addb #$07 ; add LITERALS_RUN_LEN + bcc lz1gotla ; if no overflow, we got the complete count, copy + bne lz1midlt + + ldd ,--x ; load 16 bit count in D (low part in B, high in A) + bra lz1gotlt ; we now have the complete count, go copy + +lz1midlt tfr b,a ; copy high part of literals count into A + ldb ,-x ; load low 8 bits of literals count + bra lz1gotlt ; we now have the complete count, go copy + +lz1declt lsrb ; shift literals count into place + lsrb + lsrb + lsrb + +lz1gotla clra ; clear A (high part of literals count) +lz1gotlt tfr d,w ; set W with literals count for TFM instruction + leax -1,x ; tfm is post-decrement + tfm x-,y- ; copy literal bytes + leax 1,x + +lz1nolt ldb ,s ; get token again, don't pop it from the stack + bmi lz1bigof ; test O bit (small or large offset) + + ldb ,-x ; load either 8-bit or LSB 16-bit offset (negative, signed) + lda #$ff ; set high 8 bits + bra lz1gotof diff --git a/Tools/unix/lzsa/asm/6809/unlzsa1b.s b/Tools/unix/lzsa/asm/6809/unlzsa1b.s new file mode 100644 index 00000000..ada6dcc9 --- /dev/null +++ b/Tools/unix/lzsa/asm/6809/unlzsa1b.s @@ -0,0 +1,105 @@ +; unlzsa1b.s - 6809 backward decompression routine for raw LZSA1 - 113 bytes +; compress with lzsa -r -b +; +; in: x = last byte of compressed data +; y = last byte of decompression buffer +; out: y = first byte of decompressed data +; +; Copyright (C) 2020 Emmanuel Marty +; +; This software is provided 'as-is', without any express or implied +; warranty. In no event will the authors be held liable for any damages +; arising from the use of this software. +; +; Permission is granted to anyone to use this software for any purpose, +; including commercial applications, and to alter it and redistribute it +; freely, subject to the following restrictions: +; +; 1. The origin of this software must not be misrepresented; you must not +; claim that you wrote the original software. If you use this software +; in a product, an acknowledgment in the product documentation would be +; appreciated but is not required. +; 2. Altered source versions must be plainly marked as such, and must not be +; misrepresented as being the original software. +; 3. This notice may not be removed or altered from any source distribution. + +decompress_lzsa1 + leax 1,x + leay 1,y + bra lz1token + +lz1bigof ldd ,--x ; O set: load long 16 bit (negative, signed) offset +lz1gotof nega ; reverse sign of offset in D + negb + sbca #0 + leau d,y ; put backreference start address in U (dst+offset) + + ldd #$000f ; clear MSB match length and set mask for MMMM + andb ,s+ ; isolate MMMM (embedded match length) in token + + addb #$03 ; add MIN_MATCH_SIZE + cmpb #$12 ; MATCH_RUN_LEN? + bne lz1gotln ; no, we have the full match length, go copy + + addb ,-x ; add extra match length byte + MIN_MATCH_SIZE + MATCH_RUN_LEN + bcc lz1gotln ; if no overflow, we have the full length + bne lz1midln + + ldd ,--x ; load 16-bit len in D (low part in B, high in A) + bne lz1gotln ; check if we hit EOD (16-bit length = 0) + + rts ; done, bail + +lz1midln tfr b,a ; copy high part of len into A + ldb ,-x ; grab low 8 bits of len in B + +lz1gotln pshs x ; save source compressed data pointer + tfr d,x ; copy match length to X + +lz1cpymt lda ,-u ; copy matched byte + sta ,-y + leax -1,x ; decrement X + bne lz1cpymt ; loop until all matched bytes are copied + + puls x ; restore source compressed data pointer + +lz1token ldb ,-x ; load next token into B: O|LLL|MMMM + pshs b ; save it + + andb #$70 ; isolate LLL (embedded literals count) in B + beq lz1nolt ; skip if no literals + cmpb #$70 ; LITERALS_RUN_LEN? + bne lz1declt ; if not, we have the complete count, go unshift + + ldb ,-x ; load extra literals count byte + addb #$07 ; add LITERALS_RUN_LEN + bcc lz1gotla ; if no overflow, we got the complete count, copy + bne lz1midlt + + ldd ,--x ; load 16 bit count in D (low part in B, high in A) + bra lz1gotlt ; we now have the complete count, go copy + +lz1midlt tfr b,a ; copy high part of literals count into A + ldb ,-x ; load low 8 bits of literals count + bra lz1gotlt ; we now have the complete count, go copy + +lz1declt lsrb ; shift literals count into place + lsrb + lsrb + lsrb + +lz1gotla clra ; clear A (high part of literals count) +lz1gotlt leau ,x + tfr d,x ; transfer 16-bit count into X +lz1cpylt lda ,-u ; copy literal byte + sta ,-y + leax -1,x ; decrement X and update Z flag + bne lz1cpylt ; loop until all literal bytes are copied + leax ,u + +lz1nolt ldb ,s ; get token again, don't pop it from the stack + bmi lz1bigof ; test O bit (small or large offset) + + ldb ,-x ; O clear: load 8 bit (negative, signed) offset + lda #$ff ; set high 8 bits + bra lz1gotof diff --git a/Tools/unix/lzsa/asm/6809/unlzsa2-6309.s b/Tools/unix/lzsa/asm/6809/unlzsa2-6309.s new file mode 100644 index 00000000..17970d8b --- /dev/null +++ b/Tools/unix/lzsa/asm/6809/unlzsa2-6309.s @@ -0,0 +1,129 @@ +; unlzsa2-6309.s - Hitachi 6309 decompression routine for raw LZSA2 - 150 bytes +; compress with lzsa -f2 -r +; +; in: x = start of compressed data +; y = start of decompression buffer +; out: y = end of decompression buffer + 1 +; +; Copyright (C) 2020 Emmanuel Marty, Doug Masten +; +; This software is provided 'as-is', without any express or implied +; warranty. In no event will the authors be held liable for any damages +; arising from the use of this software. +; +; Permission is granted to anyone to use this software for any purpose, +; including commercial applications, and to alter it and redistribute it +; freely, subject to the following restrictions: +; +; 1. The origin of this software must not be misrepresented; you must not +; claim that you wrote the original software. If you use this software +; in a product, an acknowledgment in the product documentation would be +; appreciated but is not required. +; 2. Altered source versions must be plainly marked as such, and must not be +; misrepresented as being the original software. +; 3. This notice may not be removed or altered from any source distribution. + +decompress_lzsa2 + clr lz2nibct ; reset nibble available flag + bra lz2token + +lz2nibct fcb 0 ; nibble ready flag + +lz2replg lslb ; push token's Y flag bit into carry + bcs lz2rep16 ; if token's Y bit is set, rep or 16 bit offset + + sex ; push token's Z flag bit into reg A + bsr lz2nibl ; get offset nibble in B + lsla ; push token's Z flag bit into carry + rolb ; shift Z flag from carry into bit 0 of B + eorb #$e1 ; set bits 13-15 of offset, reverse bit 8 + tfr b,a ; copy bits 8-15 of offset into A + suba #$02 ; substract 512 from offset + ldb ,x+ ; load low 8 bits of (negative, signed) offset + bra lz2gotof + +lz2rep16 bmi lz2repof ; if token's Z flag bit is set, rep match + ldd ,x++ ; load high then low 8 bits of offset + +lz2gotof std lz2moff+2 ; store match offset + +lz2repof ldd #$0007 ; clear MSB match length and set mask for MMM + andb ,u ; isolate MMM (embedded match length) in token +lz2moff leau $aaaa,y ; put backreference start address in U (dst+offset) + addb #$02 ; add MIN_MATCH_SIZE_V2 + cmpb #$09 ; MIN_MATCH_SIZE_V2 + MATCH_RUN_LEN_V2? + bne lz2gotln ; no, we have the full match length, go copy + + bsr lz2nibl ; get offset nibble in B + addb #$09 ; add MIN_MATCH_SIZE_V2 + MATCH_RUN_LEN_V2 + cmpb #$18 ; MIN_MATCH_SIZE_V2 + MATCH_RUN_LEN_V2 + 15? + bne lz2gotln ; if not, we have the full match length, go copy + + addb ,x+ ; add extra length byte + MIN_MATCH_SIZE_V2 + MATCH_RUN_LEN_V2 + 15 + bcc lz2gotln ; if no overflow, we have the full length + beq lz2done ; detect EOD code + + ldb ,x+ ; load 16-bit len in D (low part in B, high in A) + lda ,x+ ; (little endian) + +lz2gotln tfr d,w ; set W with match count for TFM instruction + tfm u+,y+ ; copy match bytes + +lz2token tfr x,u ; save token address + ldb ,x+ ; load next token into B: XYZ|LL|MMM + andb #$18 ; isolate LL (embedded literals count) in B + beq lz2nolt ; skip if no literals + cmpb #$18 ; LITERALS_RUN_LEN_V2? + bne lz2declt ; if not, we have the complete count, go unshift + + bsr lz2nibl ; get extra literals length nibble in B + addb #$03 ; add LITERALS_RUN_LEN_V2 + cmpb #$12 ; LITERALS_RUN_LEN_V2 + 15 ? + bne lz2gotla ; if not, we have the full literals count, go copy + + addb ,x+ ; add extra literals count byte + LITERALS_RUN_LEN + 15 + bcc lz2gotla ; if no overflow, we got the complete count, copy + + ldb ,x+ ; load low 8 bits of little-endian literals count + lda ,x+ ; load high 8 bits of literal count + bra lz2gotlt ; we now have the complete count, go copy + +lz2declt lsrb ; shift literals count into place + lsrb + lsrb +lz2gotla clra ; clear A (high part of literals count) + +lz2gotlt tfr d,w ; set W with literals count for TFM instruction + tfm x+,y+ ; copy literal bytes + +lz2nolt ldb ,u ; get token again + lslb ; push token's X flag bit into carry + bcs lz2replg ; if token's X bit is set, rep or large offset + + lslb ; push token's Y flag bit into carry + sex ; push token's Z flag bit into reg A (carry flag is not effected) + bcs lz2offs9 ; if token's Y bit is set, 9 bits offset + + bsr lz2nibl ; get offset nibble in B + lsla ; retrieve token's Z flag bit and push into carry + rolb ; shift Z flag from carry into bit 0 of B + eorb #$e1 ; set bits 5-7 of offset, reverse bit 0 + sex ; set bits 8-15 of offset to $FF + bra lz2gotof + +lz2offs9 deca ; set bits 9-15 of offset, reverse bit 8 + ldb ,x+ ; load low 8 bits of (negative, signed) offset + bra lz2gotof + +lz2nibl ldb #$aa + com lz2nibct ; nibble ready? + bpl lz2gotnb + + ldb ,x+ ; load two nibbles + stb lz2nibl+1 ; store nibble for next time (low 4 bits) + lsrb ; shift 4 high bits of nibble down + lsrb + lsrb + lsrb +lz2gotnb andb #$0f ; only keep low 4 bits +lz2done rts diff --git a/Tools/unix/lzsa/asm/6809/unlzsa2.s b/Tools/unix/lzsa/asm/6809/unlzsa2.s new file mode 100644 index 00000000..a620cadb --- /dev/null +++ b/Tools/unix/lzsa/asm/6809/unlzsa2.s @@ -0,0 +1,146 @@ +; unlzsa2.s - 6809 decompression routine for raw LZSA2 - 169 bytes +; compress with lzsa -f2 -r +; +; in: x = start of compressed data +; y = start of decompression buffer +; out: y = end of decompression buffer + 1 +; +; Copyright (C) 2020 Emmanuel Marty +; +; This software is provided 'as-is', without any express or implied +; warranty. In no event will the authors be held liable for any damages +; arising from the use of this software. +; +; Permission is granted to anyone to use this software for any purpose, +; including commercial applications, and to alter it and redistribute it +; freely, subject to the following restrictions: +; +; 1. The origin of this software must not be misrepresented; you must not +; claim that you wrote the original software. If you use this software +; in a product, an acknowledgment in the product documentation would be +; appreciated but is not required. +; 2. Altered source versions must be plainly marked as such, and must not be +; misrepresented as being the original software. +; 3. This notice may not be removed or altered from any source distribution. + +decompress_lzsa2 + clr +; +; in: x = last byte of compressed data +; y = last byte of decompression buffer +; out: y = first byte of decompressed data +; +; Copyright (C) 2020 Emmanuel Marty, Doug Masten +; +; This software is provided 'as-is', without any express or implied +; warranty. In no event will the authors be held liable for any damages +; arising from the use of this software. +; +; Permission is granted to anyone to use this software for any purpose, +; including commercial applications, and to alter it and redistribute it +; freely, subject to the following restrictions: +; +; 1. The origin of this software must not be misrepresented; you must not +; claim that you wrote the original software. If you use this software +; in a product, an acknowledgment in the product documentation would be +; appreciated but is not required. +; 2. Altered source versions must be plainly marked as such, and must not be +; misrepresented as being the original software. +; 3. This notice may not be removed or altered from any source distribution. + +decompress_lzsa2 + clr lz2nibct ; reset nibble available flag + leax 1,x ; adjust compressed data pointer + bra lz2token + +lz2nibct fcb 0 ; nibble ready flag + +lz2replg lslb ; push token's Y flag bit into carry + bcs lz2rep16 ; if token's Y bit is set, rep or 16 bit offset + + sex ; push token's Z flag bit into reg A + bsr lz2nibl ; get offset nibble in B + lsla ; push token's Z flag bit into carry + rolb ; shift Z flag from carry into bit 0 of B + eorb #$e1 ; set bits 13-15 of offset, reverse bit 8 + tfr b,a ; copy bits 8-15 of offset into A + suba #$02 ; substract 512 from offset + bra lz2lowof + +lz2rep16 bmi lz2repof ; if token's Z flag bit is set, rep match + lda ,-x ; load high 8 bits of (negative, signed) offset +lz2lowof ldb ,-x ; load low 8 bits of offset + +lz2gotof negd ; reverse sign of offset in D + std lz2moff+2 ; store match offset + +lz2repof ldd #$0007 ; clear MSB match length and set mask for MMM + andb ,u ; isolate MMM (embedded match length) in token +lz2moff leau $aaaa,y ; put backreference start address in U (dst+offset) + addb #$02 ; add MIN_MATCH_SIZE_V2 + cmpb #$09 ; MIN_MATCH_SIZE_V2 + MATCH_RUN_LEN_V2? + bne lz2gotln ; no, we have the full match length, go copy + + bsr lz2nibl ; get offset nibble in B + addb #$09 ; add MIN_MATCH_SIZE_V2 + MATCH_RUN_LEN_V2 + cmpb #$18 ; MIN_MATCH_SIZE_V2 + MATCH_RUN_LEN_V2 + 15? + bne lz2gotln ; if not, we have the full match length, go copy + + addb ,-x ; add extra length byte + MIN_MATCH_SIZE_V2 + MATCH_RUN_LEN_V2 + 15 + bcc lz2gotln ; if no overflow, we have the full length + beq lz2done ; detect EOD code + + ldd ,--x ; load 16-bit len in D (low part in B, high in A) + +lz2gotln tfr d,w ; set W with match count for TFM instruction + tfm u-,y- ; copy match bytes + +lz2token ldb ,-x ; load next token into B: XYZ|LL|MMM + tfr x,u ; save token address + andb #$18 ; isolate LL (embedded literals count) in B + beq lz2nolt ; skip if no literals + cmpb #$18 ; LITERALS_RUN_LEN_V2? + bne lz2declt ; if not, we have the complete count, go unshift + + bsr lz2nibl ; get extra literals length nibble in B + addb #$03 ; add LITERALS_RUN_LEN_V2 + cmpb #$12 ; LITERALS_RUN_LEN_V2 + 15 ? + bne lz2gotla ; if not, we have the full literals count, go copy + + addb ,-x ; add extra literals count byte + LITERALS_RUN_LEN + 15 + bcc lz2gotla ; if no overflow, we got the complete count, copy + + ldd ,--x ; load 16 bit count in D (low part in B, high in A) + bra lz2gotlt ; we now have the complete count, go copy + +lz2nibl com lz2nibct ; nibble ready? + bpl lz2gotnb + + ldb ,-x ; load two nibbles + stb lz2gotnb+1 ; store nibble for next time (low 4 bits) + lsrb ; shift 4 high bits of nibble down + lsrb + lsrb + lsrb + rts + +lz2declt lsrb ; shift literals count into place + lsrb + lsrb +lz2gotla clra ; clear A (high part of literals count) + +lz2gotlt tfr d,w ; set W with literals count for TFM instruction + leax -1,x ; tfm is post-decrement + tfm x-,y- ; copy literal bytes + leax 1,x + +lz2nolt ldb ,u ; get token again + lslb ; push token's X flag bit into carry + bcs lz2replg ; if token's X bit is set, rep or large offset + + lslb ; push token's Y flag bit into carry + sex ; push token's Z flag bit into reg A (carry flag is not effected) + bcs lz2offs9 ; if token's Y bit is set, 9 bits offset + + bsr lz2nibl ; get offset nibble in B + lsla ; retrieve token's Z flag bit and push into carry + rolb ; shift Z flag from carry into bit 0 of B + eorb #$e1 ; set bits 5-7 of offset, reverse bit 0 + sex ; set bits 8-15 of offset to $FF + bra lz2gotof + +lz2offs9 deca ; set bits 9-15 of offset, reverse bit 8 + bra lz2lowof + +lz2done leay 1,y ; adjust pointer to first byte of decompressed data and then exit +lz2gotnb ldb #$aa ; load nibble + andb #$0f ; only keep low 4 bits + rts diff --git a/Tools/unix/lzsa/asm/6809/unlzsa2b.s b/Tools/unix/lzsa/asm/6809/unlzsa2b.s new file mode 100644 index 00000000..b538cacb --- /dev/null +++ b/Tools/unix/lzsa/asm/6809/unlzsa2b.s @@ -0,0 +1,152 @@ +; unlzsa2b.s - 6809 backward decompression routine for raw LZSA2 - 171 bytes +; compress with lzsa -f2 -r -b +; +; in: x = last byte of compressed data +; y = last byte of decompression buffer +; out: y = first byte of decompressed data +; +; Copyright (C) 2020 Emmanuel Marty +; +; This software is provided 'as-is', without any express or implied +; warranty. In no event will the authors be held liable for any damages +; arising from the use of this software. +; +; Permission is granted to anyone to use this software for any purpose, +; including commercial applications, and to alter it and redistribute it +; freely, subject to the following restrictions: +; +; 1. The origin of this software must not be misrepresented; you must not +; claim that you wrote the original software. If you use this software +; in a product, an acknowledgment in the product documentation would be +; appreciated but is not required. +; 2. Altered source versions must be plainly marked as such, and must not be +; misrepresented as being the original software. +; 3. This notice may not be removed or altered from any source distribution. + +decompress_lzsa2 + clr ed shuttle 86765 alice 46864 robotron 303895 +++! +; baseline new test harness shuttle 83925 alice 37948 robotron 269002 *** +; Pavel optimizations shuttle 82225 alice 36798 robotron 261226 +++ +; OPTIMIZE_LONG_RLE 1 shuttle 82242 alice 36787 robotron 261392 **- +; +;------ +; +;Pavel's optimization history: +; shuttle alice robotron time in 1.193 MHz timer clocks +;baseline 19109 D9A6 570F6 +;adc cl,0->adc cl,cl 19035 D9A6 56FAB +;rep movsb->shr cx,1;jnc 18FD4 D998 56F14 +;cmp bp,-2->inc bp;inc bp 18F07 D999 56EA3 +;jz;jc->jc 18D81 D973 56B2F +;add al,3->movsb x3 18B1E D777 56197 +;more lit_len_mat tables 18A83 D341 54ACC diff --git a/Tools/unix/lzsa/src/lib.h b/Tools/unix/lzsa/src/lib.h index 5556d2a9..2520b13b 100644 --- a/Tools/unix/lzsa/src/lib.h +++ b/Tools/unix/lzsa/src/lib.h @@ -63,7 +63,7 @@ typedef enum _lzsa_status_t { /* Decompression-specific status codes */ LZSA_ERROR_FORMAT, /**< Invalid input format or magic number when decompressing */ - LZSA_ERROR_DECOMPRESSION, /**< Internal decompression error */ + LZSA_ERROR_DECOMPRESSION /**< Internal decompression error */ } lzsa_status_t; /* Compression flags */ diff --git a/Tools/unix/lzsa/src/lzsa.c b/Tools/unix/lzsa/src/lzsa.c index 4cce4041..3f6e3572 100644 --- a/Tools/unix/lzsa/src/lzsa.c +++ b/Tools/unix/lzsa/src/lzsa.c @@ -31,7 +31,6 @@ */ #include -#include #include #include #ifdef _WIN32 @@ -48,7 +47,7 @@ #define OPT_RAW_BACKWARD 8 #define OPT_STATS 16 -#define TOOL_VERSION "1.2.0" +#define TOOL_VERSION "1.3.6" /*---------------------------------------------------------------------------*/ @@ -295,7 +294,7 @@ int comparestream_open(lzsa_stream_t *stream, const char *pszCompareFilename, co pCompareStream->pCompareDataBuf = NULL; pCompareStream->nCompareDataSize = 0; - pCompareStream->f = (void*)fopen(pszCompareFilename, pszMode); + pCompareStream->f = (FILE*)fopen(pszCompareFilename, pszMode); if (pCompareStream->f) { stream->obj = pCompareStream; @@ -866,11 +865,11 @@ int main(int argc, char **argv) { const char *pszInFilename = NULL; const char *pszOutFilename = NULL; const char *pszDictionaryFilename = NULL; - bool bArgsError = false; - bool bCommandDefined = false; - bool bVerifyCompression = false; - bool bMinMatchDefined = false; - bool bFormatVersionDefined = false; + int nArgsError = 0; + int nCommandDefined = 0; + int nVerifyCompression = 0; + int nMinMatchDefined = 0; + int nFormatVersionDefined = 0; char cCommand = 'z'; int nMinMatchSize = 0; unsigned int nOptions = OPT_FAVOR_RATIO; @@ -878,51 +877,51 @@ int main(int argc, char **argv) { for (i = 1; i < argc; i++) { if (!strcmp(argv[i], "-d")) { - if (!bCommandDefined) { - bCommandDefined = true; + if (!nCommandDefined) { + nCommandDefined = 1; cCommand = 'd'; } else - bArgsError = true; + nArgsError = 1; } else if (!strcmp(argv[i], "-z")) { - if (!bCommandDefined) { - bCommandDefined = true; + if (!nCommandDefined) { + nCommandDefined = 1; cCommand = 'z'; } else - bArgsError = true; + nArgsError = 1; } else if (!strcmp(argv[i], "-c")) { - if (!bVerifyCompression) { - bVerifyCompression = true; + if (!nVerifyCompression) { + nVerifyCompression = 1; } else - bArgsError = true; + nArgsError = 1; } else if (!strcmp(argv[i], "-cbench")) { - if (!bCommandDefined) { - bCommandDefined = true; + if (!nCommandDefined) { + nCommandDefined = 1; cCommand = 'B'; } else - bArgsError = true; + nArgsError = 1; } else if (!strcmp(argv[i], "-dbench")) { - if (!bCommandDefined) { - bCommandDefined = true; + if (!nCommandDefined) { + nCommandDefined = 1; cCommand = 'b'; } else - bArgsError = true; + nArgsError = 1; } else if (!strcmp(argv[i], "-test")) { - if (!bCommandDefined) { - bCommandDefined = true; + if (!nCommandDefined) { + nCommandDefined = 1; cCommand = 't'; } else - bArgsError = true; + nArgsError = 1; } else if (!strcmp(argv[i], "-D")) { if (!pszDictionaryFilename && (i + 1) < argc) { @@ -930,119 +929,119 @@ int main(int argc, char **argv) { i++; } else - bArgsError = true; + nArgsError = 1; } else if (!strncmp(argv[i], "-D", 2)) { if (!pszDictionaryFilename) { pszDictionaryFilename = argv[i] + 2; } else - bArgsError = true; + nArgsError = 1; } else if (!strcmp(argv[i], "-m")) { - if (!bMinMatchDefined && (i + 1) < argc) { + if (!nMinMatchDefined && (i + 1) < argc) { char *pEnd = NULL; nMinMatchSize = (int)strtol(argv[i + 1], &pEnd, 10); if (pEnd && pEnd != argv[i + 1] && (nMinMatchSize >= 2 && nMinMatchSize <= 5)) { i++; - bMinMatchDefined = true; + nMinMatchDefined = 1; nOptions &= (~OPT_FAVOR_RATIO); } else { - bArgsError = true; + nArgsError = 1; } } else - bArgsError = true; + nArgsError = 1; } else if (!strncmp(argv[i], "-m", 2)) { - if (!bMinMatchDefined) { + if (!nMinMatchDefined) { char *pEnd = NULL; nMinMatchSize = (int)strtol(argv[i] + 2, &pEnd, 10); if (pEnd && pEnd != (argv[i]+2) && (nMinMatchSize >= 2 && nMinMatchSize <= 5)) { - bMinMatchDefined = true; + nMinMatchDefined = 1; nOptions &= (~OPT_FAVOR_RATIO); } else { - bArgsError = true; + nArgsError = 1; } } else - bArgsError = true; + nArgsError = 1; } else if (!strcmp(argv[i], "--prefer-ratio")) { - if (!bMinMatchDefined) { + if (!nMinMatchDefined) { nMinMatchSize = 0; - bMinMatchDefined = true; + nMinMatchDefined = 1; } else - bArgsError = true; + nArgsError = 1; } else if (!strcmp(argv[i], "--prefer-speed")) { - if (!bMinMatchDefined) { + if (!nMinMatchDefined) { nMinMatchSize = 3; nOptions &= (~OPT_FAVOR_RATIO); - bMinMatchDefined = true; + nMinMatchDefined = 1; } else - bArgsError = true; + nArgsError = 1; } else if (!strcmp(argv[i], "-f")) { - if (!bFormatVersionDefined && (i + 1) < argc) { + if (!nFormatVersionDefined && (i + 1) < argc) { char *pEnd = NULL; nFormatVersion = (int)strtol(argv[i + 1], &pEnd, 10); if (pEnd && pEnd != argv[i + 1] && (nFormatVersion >= 1 && nFormatVersion <= 2)) { i++; - bFormatVersionDefined = true; + nFormatVersionDefined = 1; } else { - bArgsError = true; + nArgsError = 1; } } else - bArgsError = true; + nArgsError = 1; } else if (!strncmp(argv[i], "-f", 2)) { - if (!bFormatVersionDefined) { + if (!nFormatVersionDefined) { char *pEnd = NULL; nFormatVersion = (int)strtol(argv[i] + 2, &pEnd, 10); if (pEnd && pEnd != (argv[i] + 2) && (nFormatVersion >= 1 && nFormatVersion <= 2)) { - bFormatVersionDefined = true; + nFormatVersionDefined = 1; } else { - bArgsError = true; + nArgsError = 1; } } else - bArgsError = true; + nArgsError = 1; } else if (!strcmp(argv[i], "-v")) { if ((nOptions & OPT_VERBOSE) == 0) { nOptions |= OPT_VERBOSE; } else - bArgsError = true; + nArgsError = 1; } else if (!strcmp(argv[i], "-r")) { if ((nOptions & OPT_RAW) == 0) { nOptions |= OPT_RAW; } else - bArgsError = true; + nArgsError = 1; } else if (!strcmp(argv[i], "-b")) { if ((nOptions & OPT_RAW_BACKWARD) == 0) { nOptions |= OPT_RAW_BACKWARD; } else - bArgsError = true; + nArgsError = 1; } else if (!strcmp(argv[i], "-stats")) { if ((nOptions & OPT_STATS) == 0) { nOptions |= OPT_STATS; } else - bArgsError = true; + nArgsError = 1; } else { if (!pszInFilename) @@ -1051,21 +1050,21 @@ int main(int argc, char **argv) { if (!pszOutFilename) pszOutFilename = argv[i]; else - bArgsError = true; + nArgsError = 1; } } } - if (!bArgsError && (nOptions & OPT_RAW_BACKWARD) && !(nOptions & OPT_RAW)) { + if (!nArgsError && (nOptions & OPT_RAW_BACKWARD) && !(nOptions & OPT_RAW)) { fprintf(stderr, "error: -b (compress backwards) requires -r (raw block format)\n"); return 100; } - if (!bArgsError && cCommand == 't') { + if (!nArgsError && cCommand == 't') { return do_self_test(nOptions, nMinMatchSize, nFormatVersion); } - if (bArgsError || !pszInFilename || !pszOutFilename) { + if (nArgsError || !pszInFilename || !pszOutFilename) { fprintf(stderr, "lzsa command-line tool v" TOOL_VERSION " by Emmanuel Marty and spke\n"); fprintf(stderr, "usage: %s [-c] [-d] [-v] [-r] \n", argv[0]); fprintf(stderr, " -c: check resulting stream after compressing\n"); @@ -1089,7 +1088,7 @@ int main(int argc, char **argv) { if (cCommand == 'z') { int nResult = do_compress(pszInFilename, pszOutFilename, pszDictionaryFilename, nOptions, nMinMatchSize, nFormatVersion); - if (nResult == 0 && bVerifyCompression) { + if (nResult == 0 && nVerifyCompression) { return do_compare(pszOutFilename, pszInFilename, pszDictionaryFilename, nOptions, nFormatVersion); } else { return nResult; diff --git a/Tools/unix/lzsa/src/matchfinder.c b/Tools/unix/lzsa/src/matchfinder.c index fbdc5ca3..3de2cfa6 100644 --- a/Tools/unix/lzsa/src/matchfinder.c +++ b/Tools/unix/lzsa/src/matchfinder.c @@ -66,7 +66,7 @@ int lzsa_build_suffix_array(lzsa_compressor *pCompressor, const unsigned char *p int *PLCP = (int*)pCompressor->pos_data; /* Use temporarily */ int *Phi = PLCP; int nCurLen = 0; - int i; + int i, r; /* Compute the permuted LCP first (Kärkkäinen method) */ Phi[intervals[0]] = -1; @@ -132,7 +132,7 @@ int lzsa_build_suffix_array(lzsa_compressor *pCompressor, const unsigned char *p intervals[0] = 0; next_interval_idx = 1; - for (int r = 1; r < nInWindowSize; r++) { + for (r = 1; r < nInWindowSize; r++) { const unsigned int next_pos = SA_and_LCP[r] & POS_MASK; const unsigned int next_lcp = SA_and_LCP[r] & LCP_MASK; const unsigned int top_lcp = *top & LCP_MASK; diff --git a/Tools/unix/lzsa/src/shrink_block_v1.c b/Tools/unix/lzsa/src/shrink_block_v1.c index c30e4a9f..32c5c385 100644 --- a/Tools/unix/lzsa/src/shrink_block_v1.c +++ b/Tools/unix/lzsa/src/shrink_block_v1.c @@ -157,66 +157,69 @@ static inline int lzsa_get_offset_cost_v1(const unsigned int nMatchOffset) { * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes */ static void lzsa_optimize_forward_v1(lzsa_compressor *pCompressor, lzsa_match *pBestMatch, const int nStartOffset, const int nEndOffset, const int nReduce) { - lzsa_arrival *arrival = pCompressor->arrival - (nStartOffset << MATCHES_PER_ARRIVAL_SHIFT); + lzsa_arrival *arrival = pCompressor->arrival - (nStartOffset << ARRIVALS_PER_POSITION_SHIFT); const int nMinMatchSize = pCompressor->min_match_size; const int nFavorRatio = (pCompressor->flags & LZSA_FLAG_FAVOR_RATIO) ? 1 : 0; + const int nModeSwitchPenalty = nFavorRatio ? 0 : MODESWITCH_PENALTY; const int nDisableScore = nReduce ? 0 : (2 * BLOCK_SIZE); int i, j, n; if ((nEndOffset - nStartOffset) > BLOCK_SIZE) return; - memset(arrival + (nStartOffset << MATCHES_PER_ARRIVAL_SHIFT), 0, sizeof(lzsa_arrival) * ((nEndOffset - nStartOffset + 1) << MATCHES_PER_ARRIVAL_SHIFT)); + memset(arrival + (nStartOffset << ARRIVALS_PER_POSITION_SHIFT), 0, sizeof(lzsa_arrival) * ((nEndOffset - nStartOffset + 1) << ARRIVALS_PER_POSITION_SHIFT)); - arrival[nStartOffset << MATCHES_PER_ARRIVAL_SHIFT].from_slot = -1; + arrival[nStartOffset << ARRIVALS_PER_POSITION_SHIFT].from_slot = -1; for (i = nStartOffset; i != nEndOffset; i++) { + lzsa_arrival* cur_arrival = &arrival[i << ARRIVALS_PER_POSITION_SHIFT]; int m; - for (j = 0; j < NMATCHES_PER_ARRIVAL_V1 && arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].from_slot; j++) { - int nPrevCost = arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].cost; + for (j = 0; j < NARRIVALS_PER_POSITION_V1 && cur_arrival[j].from_slot; j++) { + int nPrevCost = cur_arrival[j].cost; int nCodingChoiceCost = nPrevCost + 8 /* literal */; - int nScore = arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].score + 1; - int nNumLiterals = arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].num_literals + 1; + int nScore = cur_arrival[j].score + 1; + int nNumLiterals = cur_arrival[j].num_literals + 1; if (nNumLiterals == LITERALS_RUN_LEN_V1 || nNumLiterals == 256 || nNumLiterals == 512) { nCodingChoiceCost += 8; } - if (!nFavorRatio && nNumLiterals == 1) - nCodingChoiceCost += MODESWITCH_PENALTY; + if (nNumLiterals == 1) + nCodingChoiceCost += nModeSwitchPenalty; - for (n = 0; n < NMATCHES_PER_ARRIVAL_V1 /* we only need the literals + short match cost + long match cost cases */; n++) { - lzsa_arrival *pDestArrival = &arrival[((i + 1) << MATCHES_PER_ARRIVAL_SHIFT) + n]; + lzsa_arrival *pDestSlots = &arrival[(i + 1) << ARRIVALS_PER_POSITION_SHIFT]; + for (n = 0; n < NARRIVALS_PER_POSITION_V1 /* we only need the literals + short match cost + long match cost cases */; n++) { + lzsa_arrival *pDestArrival = &pDestSlots[n]; if (pDestArrival->from_slot == 0 || nCodingChoiceCost < pDestArrival->cost || (nCodingChoiceCost == pDestArrival->cost && nScore < (pDestArrival->score + nDisableScore))) { - memmove(&arrival[((i + 1) << MATCHES_PER_ARRIVAL_SHIFT) + n + 1], - &arrival[((i + 1) << MATCHES_PER_ARRIVAL_SHIFT) + n], - sizeof(lzsa_arrival) * (NMATCHES_PER_ARRIVAL_V1 - n - 1)); + memmove(&arrival[((i + 1) << ARRIVALS_PER_POSITION_SHIFT) + n + 1], + &arrival[((i + 1) << ARRIVALS_PER_POSITION_SHIFT) + n], + sizeof(lzsa_arrival) * (NARRIVALS_PER_POSITION_V1 - n - 1)); pDestArrival->cost = nCodingChoiceCost; pDestArrival->from_pos = i; pDestArrival->from_slot = j + 1; - pDestArrival->match_offset = 0; pDestArrival->match_len = 0; pDestArrival->num_literals = nNumLiterals; pDestArrival->score = nScore; - pDestArrival->rep_offset = arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].rep_offset; + pDestArrival->rep_offset = cur_arrival[j].rep_offset; break; } } } const lzsa_match *match = pCompressor->match + ((i - nStartOffset) << MATCHES_PER_INDEX_SHIFT_V1); + int nNumArrivalsForThisPos = j; for (m = 0; m < NMATCHES_PER_INDEX_V1 && match[m].length; m++) { int nMatchLen = match[m].length; int nMatchOffsetCost = lzsa_get_offset_cost_v1(match[m].offset); int nStartingMatchLen, k; - if ((i + nMatchLen) > (nEndOffset - LAST_LITERALS)) - nMatchLen = nEndOffset - LAST_LITERALS - i; + if ((i + nMatchLen) > nEndOffset) + nMatchLen = nEndOffset - i; if (nMatchLen >= LEAVE_ALONE_MATCH_SIZE) nStartingMatchLen = nMatchLen; @@ -225,43 +228,48 @@ static void lzsa_optimize_forward_v1(lzsa_compressor *pCompressor, lzsa_match *p for (k = nStartingMatchLen; k <= nMatchLen; k++) { int nMatchLenCost = lzsa_get_match_varlen_size_v1(k - MIN_MATCH_SIZE_V1); - for (j = 0; j < NMATCHES_PER_ARRIVAL_V1 && arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].from_slot; j++) { - int nPrevCost = arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].cost; + lzsa_arrival *pDestSlots = &arrival[(i + k) << ARRIVALS_PER_POSITION_SHIFT]; + + for (j = 0; j < nNumArrivalsForThisPos; j++) { + int nPrevCost = cur_arrival[j].cost; int nCodingChoiceCost = nPrevCost + 8 /* token */ /* the actual cost of the literals themselves accumulates up the chain */ + nMatchOffsetCost + nMatchLenCost; - int nScore = arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].score + 5; int exists = 0; - if (!nFavorRatio && !arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].num_literals) - nCodingChoiceCost += MODESWITCH_PENALTY; + if (!cur_arrival[j].num_literals) + nCodingChoiceCost += nModeSwitchPenalty; for (n = 0; - n < NMATCHES_PER_ARRIVAL_V1 && arrival[((i + k) << MATCHES_PER_ARRIVAL_SHIFT) + n].from_slot && arrival[((i + k) << MATCHES_PER_ARRIVAL_SHIFT) + n].cost <= nCodingChoiceCost; + n < NARRIVALS_PER_POSITION_V1 && pDestSlots[n].from_slot && pDestSlots[n].cost <= nCodingChoiceCost; n++) { - if (lzsa_get_offset_cost_v1(arrival[((i + k) << MATCHES_PER_ARRIVAL_SHIFT) + n].rep_offset) == lzsa_get_offset_cost_v1(match[m].offset)) { + if (lzsa_get_offset_cost_v1(pDestSlots[n].rep_offset) == nMatchOffsetCost) { exists = 1; break; } } - for (n = 0; !exists && n < NMATCHES_PER_ARRIVAL_V1 /* we only need the literals + short match cost + long match cost cases */; n++) { - lzsa_arrival *pDestArrival = &arrival[((i + k) << MATCHES_PER_ARRIVAL_SHIFT) + n]; - - if (pDestArrival->from_slot == 0 || - nCodingChoiceCost < pDestArrival->cost || - (nCodingChoiceCost == pDestArrival->cost && nScore < (pDestArrival->score + nDisableScore))) { - memmove(&arrival[((i + k) << MATCHES_PER_ARRIVAL_SHIFT) + n + 1], - &arrival[((i + k) << MATCHES_PER_ARRIVAL_SHIFT) + n], - sizeof(lzsa_arrival) * (NMATCHES_PER_ARRIVAL_V1 - n - 1)); - - pDestArrival->cost = nCodingChoiceCost; - pDestArrival->from_pos = i; - pDestArrival->from_slot = j + 1; - pDestArrival->match_offset = match[m].offset; - pDestArrival->match_len = k; - pDestArrival->num_literals = 0; - pDestArrival->score = nScore; - pDestArrival->rep_offset = match[m].offset; - break; + if (!exists) { + int nScore = cur_arrival[j].score + 5; + + for (n = 0; n < NARRIVALS_PER_POSITION_V1 /* we only need the literals + short match cost + long match cost cases */; n++) { + lzsa_arrival *pDestArrival = &pDestSlots[n]; + + if (pDestArrival->from_slot == 0 || + nCodingChoiceCost < pDestArrival->cost || + (nCodingChoiceCost == pDestArrival->cost && nScore < (pDestArrival->score + nDisableScore))) { + memmove(&pDestSlots[n + 1], + &pDestSlots[n], + sizeof(lzsa_arrival) * (NARRIVALS_PER_POSITION_V1 - n - 1)); + + pDestArrival->cost = nCodingChoiceCost; + pDestArrival->from_pos = i; + pDestArrival->from_slot = j + 1; + pDestArrival->match_len = k; + pDestArrival->num_literals = 0; + pDestArrival->score = nScore; + pDestArrival->rep_offset = match[m].offset; + j = NARRIVALS_PER_POSITION_V1; + break; + } } } } @@ -269,14 +277,17 @@ static void lzsa_optimize_forward_v1(lzsa_compressor *pCompressor, lzsa_match *p } } - lzsa_arrival *end_arrival = &arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + 0]; + lzsa_arrival *end_arrival = &arrival[(i << ARRIVALS_PER_POSITION_SHIFT) + 0]; while (end_arrival->from_slot > 0 && end_arrival->from_pos >= 0) { if (end_arrival->from_pos >= nEndOffset) return; pBestMatch[end_arrival->from_pos].length = end_arrival->match_len; - pBestMatch[end_arrival->from_pos].offset = end_arrival->match_offset; + if (end_arrival->match_len) + pBestMatch[end_arrival->from_pos].offset = end_arrival->rep_offset; + else + pBestMatch[end_arrival->from_pos].offset = 0; - end_arrival = &arrival[(end_arrival->from_pos << MATCHES_PER_ARRIVAL_SHIFT) + (end_arrival->from_slot - 1)]; + end_arrival = &arrival[(end_arrival->from_pos << ARRIVALS_PER_POSITION_SHIFT) + (end_arrival->from_slot - 1)]; } } @@ -301,12 +312,12 @@ static int lzsa_optimize_command_count_v1(lzsa_compressor *pCompressor, const un lzsa_match *pMatch = pBestMatch + i; if (pMatch->length == 0 && - (i + 1) < (nEndOffset - LAST_LITERALS) && + (i + 1) < nEndOffset && pBestMatch[i + 1].length >= MIN_MATCH_SIZE_V1 && pBestMatch[i + 1].length < MAX_VARLEN && pBestMatch[i + 1].offset && i >= pBestMatch[i + 1].offset && - (i + pBestMatch[i + 1].length + 1) <= (nEndOffset - LAST_LITERALS) && + (i + pBestMatch[i + 1].length + 1) <= nEndOffset && !memcmp(pInWindow + i - (pBestMatch[i + 1].offset), pInWindow + i, pBestMatch[i + 1].length + 1)) { int nCurLenSize = lzsa_get_match_varlen_size_v1(pBestMatch[i + 1].length - MIN_MATCH_SIZE_V1); int nReducedLenSize = lzsa_get_match_varlen_size_v1(pBestMatch[i + 1].length + 1 - MIN_MATCH_SIZE_V1); @@ -413,8 +424,6 @@ static int lzsa_get_compressed_size_v1(lzsa_compressor *pCompressor, lzsa_match int nMatchOffset = pMatch->offset; int nMatchLen = pMatch->length; int nEncodedMatchLen = nMatchLen - MIN_MATCH_SIZE_V1; - int nTokenLiteralsLen = (nNumLiterals >= LITERALS_RUN_LEN_V1) ? LITERALS_RUN_LEN_V1 : nNumLiterals; - int nTokenMatchLen = (nEncodedMatchLen >= MATCH_RUN_LEN_V1) ? MATCH_RUN_LEN_V1 : nEncodedMatchLen; int nTokenLongOffset = (nMatchOffset <= 256) ? 0x00 : 0x80; int nCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v1(nNumLiterals) + (nNumLiterals << 3) + (nTokenLongOffset ? 16 : 8) /* match offset */ + lzsa_get_match_varlen_size_v1(nEncodedMatchLen); @@ -429,7 +438,6 @@ static int lzsa_get_compressed_size_v1(lzsa_compressor *pCompressor, lzsa_match } { - int nTokenLiteralsLen = (nNumLiterals >= LITERALS_RUN_LEN_V1) ? LITERALS_RUN_LEN_V1 : nNumLiterals; int nCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v1(nNumLiterals) + (nNumLiterals << 3); nCompressedSize += nCommandSize; diff --git a/Tools/unix/lzsa/src/shrink_block_v2.c b/Tools/unix/lzsa/src/shrink_block_v2.c index eb4a16ed..fc6d2324 100644 --- a/Tools/unix/lzsa/src/shrink_block_v2.c +++ b/Tools/unix/lzsa/src/shrink_block_v2.c @@ -43,22 +43,18 @@ * @param nOutOffset current write index into output buffer * @param nMaxOutDataSize maximum size of output buffer, in bytes * @param nCurNibbleOffset write index into output buffer, of current byte being filled with nibbles - * @param nCurFreeNibbles current number of free nibbles in byte * @param nNibbleValue value to write (0..15) */ -static int lzsa_write_nibble_v2(unsigned char *pOutData, int nOutOffset, const int nMaxOutDataSize, int *nCurNibbleOffset, int *nCurFreeNibbles, int nNibbleValue) { +static int lzsa_write_nibble_v2(unsigned char *pOutData, int nOutOffset, const int nMaxOutDataSize, int *nCurNibbleOffset, int nNibbleValue) { if (nOutOffset < 0) return -1; if ((*nCurNibbleOffset) == -1) { if (nOutOffset >= nMaxOutDataSize) return -1; (*nCurNibbleOffset) = nOutOffset; - (*nCurFreeNibbles) = 2; - pOutData[nOutOffset++] = 0; + pOutData[nOutOffset++] = nNibbleValue << 4; } - - pOutData[*nCurNibbleOffset] = (pOutData[*nCurNibbleOffset] << 4) | (nNibbleValue & 0x0f); - (*nCurFreeNibbles)--; - if ((*nCurFreeNibbles) == 0) { + else { + pOutData[*nCurNibbleOffset] = (pOutData[*nCurNibbleOffset]) | (nNibbleValue & 0x0f); (*nCurNibbleOffset) = -1; } @@ -96,15 +92,17 @@ static inline int lzsa_get_literals_varlen_size_v2(const int nLength) { * * @param pOutData pointer to output buffer * @param nOutOffset current write index into output buffer + * @param nMaxOutDataSize maximum size of output buffer, in bytes + * @param nCurNibbleOffset write index into output buffer, of current byte being filled with nibbles * @param nLength literals length */ -static inline int lzsa_write_literals_varlen_v2(unsigned char *pOutData, int nOutOffset, const int nMaxOutDataSize, int *nCurNibbleOffset, int *nCurFreeNibbles, int nLength) { +static inline int lzsa_write_literals_varlen_v2(unsigned char *pOutData, int nOutOffset, const int nMaxOutDataSize, int *nCurNibbleOffset, int nLength) { if (nLength >= LITERALS_RUN_LEN_V2) { if (nLength < (LITERALS_RUN_LEN_V2 + 15)) { - nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, nCurNibbleOffset, nCurFreeNibbles, nLength - LITERALS_RUN_LEN_V2); + nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, nCurNibbleOffset, nLength - LITERALS_RUN_LEN_V2); } else { - nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, nCurNibbleOffset, nCurFreeNibbles, 15); + nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, nCurNibbleOffset, 15); if (nOutOffset < 0) return -1; if (nLength < 256) @@ -150,15 +148,17 @@ static inline int lzsa_get_match_varlen_size_v2(const int nLength) { * * @param pOutData pointer to output buffer * @param nOutOffset current write index into output buffer + * @param nMaxOutDataSize maximum size of output buffer, in bytes + * @param nCurNibbleOffset write index into output buffer, of current byte being filled with nibbles * @param nLength encoded match length (actual match length - MIN_MATCH_SIZE_V2) */ -static inline int lzsa_write_match_varlen_v2(unsigned char *pOutData, int nOutOffset, const int nMaxOutDataSize, int *nCurNibbleOffset, int *nCurFreeNibbles, int nLength) { +static inline int lzsa_write_match_varlen_v2(unsigned char *pOutData, int nOutOffset, const int nMaxOutDataSize, int *nCurNibbleOffset, int nLength) { if (nLength >= MATCH_RUN_LEN_V2) { if (nLength < (MATCH_RUN_LEN_V2 + 15)) { - nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, nCurNibbleOffset, nCurFreeNibbles, nLength - MATCH_RUN_LEN_V2); + nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, nCurNibbleOffset, nLength - MATCH_RUN_LEN_V2); } else { - nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, nCurNibbleOffset, nCurFreeNibbles, 15); + nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, nCurNibbleOffset, 15); if (nOutOffset < 0) return -1; if ((nLength + MIN_MATCH_SIZE_V2) < 256) @@ -183,58 +183,72 @@ static inline int lzsa_write_match_varlen_v2(unsigned char *pOutData, int nOutOf * @param nMatchOffset match offset to use as rep candidate * @param nStartOffset current offset in input window (typically the number of previously compressed bytes) * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes - * @param nMatchesPerArrival number of arrivals to record per input buffer position * @param nDepth current insertion depth */ -static void lzsa_insert_forward_match_v2(lzsa_compressor *pCompressor, const unsigned char *pInWindow, const int i, const int nMatchOffset, const int nStartOffset, const int nEndOffset, const int nMatchesPerArrival, int nDepth) { - lzsa_arrival *arrival = pCompressor->arrival - (nStartOffset << MATCHES_PER_ARRIVAL_SHIFT); +static void lzsa_insert_forward_match_v2(lzsa_compressor *pCompressor, const unsigned char *pInWindow, const int i, const int nMatchOffset, const int nStartOffset, const int nEndOffset, int nDepth) { + lzsa_arrival *arrival = pCompressor->arrival + ((i - nStartOffset) << ARRIVALS_PER_POSITION_SHIFT); + const int *rle_len = (int*)pCompressor->intervals /* reuse */; + lzsa_match* visited = ((lzsa_match*)pCompressor->pos_data) - nStartOffset /* reuse */; int j; - if (nDepth >= 10) return; + for (j = 0; j < NARRIVALS_PER_POSITION_V2_BIG && arrival[j].from_slot; j++) { + int nRepOffset = arrival[j].rep_offset; - for (j = 0; j < nMatchesPerArrival && arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].from_slot; j++) { - int nRepOffset = arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].rep_offset; - - if (nMatchOffset != nRepOffset && nRepOffset && arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].rep_len >= MIN_MATCH_SIZE_V2) { - int nRepPos = arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].rep_pos; - int nRepLen = arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].rep_len; + if (nMatchOffset != nRepOffset && nRepOffset && arrival[j].rep_len >= MIN_MATCH_SIZE_V2) { + int nRepPos = arrival[j].rep_pos; + int nRepLen = arrival[j].rep_len; if (nRepPos > nMatchOffset && - (nRepPos - nMatchOffset + nRepLen) <= (nEndOffset - LAST_LITERALS) && - !memcmp(pInWindow + nRepPos - nRepOffset, pInWindow + nRepPos - nMatchOffset, nRepLen)) { - int nCurRepLen = nRepLen; - - int nMaxRepLen = nEndOffset - nRepPos; - if (nMaxRepLen > LCP_MAX) - nMaxRepLen = LCP_MAX; - while ((nCurRepLen + 8) < nMaxRepLen && !memcmp(pInWindow + nRepPos + nCurRepLen, pInWindow + nRepPos - nMatchOffset + nCurRepLen, 8)) - nCurRepLen += 8; - while ((nCurRepLen + 4) < nMaxRepLen && !memcmp(pInWindow + nRepPos + nCurRepLen, pInWindow + nRepPos - nMatchOffset + nCurRepLen, 4)) - nCurRepLen += 4; - while (nCurRepLen < nMaxRepLen && pInWindow[nRepPos + nCurRepLen] == pInWindow[nRepPos - nMatchOffset + nCurRepLen]) - nCurRepLen++; - - lzsa_match *fwd_match = pCompressor->match + ((nRepPos - nStartOffset) << MATCHES_PER_INDEX_SHIFT_V2); - int exists = 0; - int r; + (nRepPos + nRepLen) <= nEndOffset && + pCompressor->match[((nRepPos - nStartOffset) << MATCHES_PER_INDEX_SHIFT_V2) + NMATCHES_PER_INDEX_V2 - 1].length == 0) { - for (r = 0; r < NMATCHES_PER_INDEX_V2 && fwd_match[r].length >= MIN_MATCH_SIZE_V2; r++) { - if (fwd_match[r].offset == nMatchOffset) { - exists = 1; + if (visited[nRepPos].offset != nMatchOffset || visited[nRepPos].length > nRepLen) { + visited[nRepPos].offset = nMatchOffset; + visited[nRepPos].length = nRepLen; - if (fwd_match[r].length < nCurRepLen) { - fwd_match[r].length = nCurRepLen; - lzsa_insert_forward_match_v2(pCompressor, pInWindow, nRepPos, nMatchOffset, nStartOffset, nEndOffset, nMatchesPerArrival, nDepth + 1); - } - break; - } - } + if (pInWindow[nRepPos] == pInWindow[nRepPos - nMatchOffset]) { + int nLen0 = rle_len[nRepPos - nMatchOffset]; + int nLen1 = rle_len[nRepPos]; + int nMinLen = (nLen0 < nLen1) ? nLen0 : nLen1; + + if (nMinLen >= nRepLen || !memcmp(pInWindow + nRepPos + nMinLen, pInWindow + nRepPos + nMinLen - nMatchOffset, nRepLen - nMinLen)) { + visited[nRepPos].length = 0; - if (!exists && r < NMATCHES_PER_INDEX_V2) { - fwd_match[r].offset = nMatchOffset; - fwd_match[r].length = nCurRepLen; + lzsa_match* fwd_match = pCompressor->match + ((nRepPos - nStartOffset) << MATCHES_PER_INDEX_SHIFT_V2); + int r; + + for (r = 0; r < NMATCHES_PER_INDEX_V2 && fwd_match[r].length >= MIN_MATCH_SIZE_V2; r++) { + if (fwd_match[r].offset == nMatchOffset) { + r = NMATCHES_PER_INDEX_V2; + break; + } + } - lzsa_insert_forward_match_v2(pCompressor, pInWindow, nRepPos, nMatchOffset, nStartOffset, nEndOffset, nMatchesPerArrival, nDepth + 1); + if (r < NMATCHES_PER_INDEX_V2) { + int nMaxRepLen = nEndOffset - nRepPos; + if (nMaxRepLen > LCP_MAX) + nMaxRepLen = LCP_MAX; + int nCurRepLen = (nMinLen > nRepLen) ? nMinLen : nRepLen; + if (nCurRepLen > nMaxRepLen) + nCurRepLen = nMaxRepLen; + const unsigned char* pInWindowMax = pInWindow + nRepPos + nMaxRepLen; + const unsigned char* pInWindowAtRepPos = pInWindow + nRepPos + nCurRepLen; + while ((pInWindowAtRepPos + 8) < pInWindowMax && !memcmp(pInWindowAtRepPos, pInWindowAtRepPos - nMatchOffset, 8)) + pInWindowAtRepPos += 8; + while ((pInWindowAtRepPos + 4) < pInWindowMax && !memcmp(pInWindowAtRepPos, pInWindowAtRepPos - nMatchOffset, 4)) + pInWindowAtRepPos += 4; + while (pInWindowAtRepPos < pInWindowMax && pInWindowAtRepPos[0] == pInWindowAtRepPos[-nMatchOffset]) + pInWindowAtRepPos++; + + nCurRepLen = (int)(pInWindowAtRepPos - (pInWindow + nRepPos)); + fwd_match[r].offset = nMatchOffset; + fwd_match[r].length = nCurRepLen; + + if (nDepth < 9) + lzsa_insert_forward_match_v2(pCompressor, pInWindow, nRepPos, nMatchOffset, nStartOffset, nEndOffset, nDepth + 1); + } + } + } } } } @@ -251,33 +265,44 @@ static void lzsa_insert_forward_match_v2(lzsa_compressor *pCompressor, const uns * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes * @param nReduce non-zero to reduce the number of tokens when the path costs are equal, zero not to * @param nInsertForwardReps non-zero to insert forward repmatch candidates, zero to use the previously inserted candidates - * @param nMatchesPerArrival number of arrivals to record per input buffer position + * @param nArrivalsPerPosition number of arrivals to record per input buffer position */ -static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigned char *pInWindow, lzsa_match *pBestMatch, const int nStartOffset, const int nEndOffset, const int nReduce, const int nInsertForwardReps, const int nMatchesPerArrival) { - lzsa_arrival *arrival = pCompressor->arrival - (nStartOffset << MATCHES_PER_ARRIVAL_SHIFT); - const int nFavorRatio = (pCompressor->flags & LZSA_FLAG_FAVOR_RATIO) ? 1 : 0; +static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigned char *pInWindow, lzsa_match *pBestMatch, const int nStartOffset, const int nEndOffset, const int nReduce, const int nInsertForwardReps, const int nArrivalsPerPosition) { + lzsa_arrival *arrival = pCompressor->arrival - (nStartOffset << ARRIVALS_PER_POSITION_SHIFT); + const int *rle_len = (int*)pCompressor->intervals /* reuse */; + lzsa_match *visited = ((lzsa_match*)pCompressor->pos_data) - nStartOffset /* reuse */; + char *nRepSlotHandledMask = pCompressor->rep_slot_handled_mask; + char *nRepLenHandledMask = pCompressor->rep_len_handled_mask; + const int nModeSwitchPenalty = (pCompressor->flags & LZSA_FLAG_FAVOR_RATIO) ? 0 : MODESWITCH_PENALTY; const int nMinMatchSize = pCompressor->min_match_size; const int nDisableScore = nReduce ? 0 : (2 * BLOCK_SIZE); - const int nLeaveAloneMatchSize = (nMatchesPerArrival == NMATCHES_PER_ARRIVAL_V2_SMALL) ? LEAVE_ALONE_MATCH_SIZE_SMALL : LEAVE_ALONE_MATCH_SIZE; + const int nMaxRepInsertedLen = nReduce ? LEAVE_ALONE_MATCH_SIZE : 0; + const int nLeaveAloneMatchSize = (nArrivalsPerPosition == NARRIVALS_PER_POSITION_V2_SMALL) ? LEAVE_ALONE_MATCH_SIZE_SMALL : LEAVE_ALONE_MATCH_SIZE; int i, j, n; if ((nEndOffset - nStartOffset) > BLOCK_SIZE) return; - memset(arrival + (nStartOffset << MATCHES_PER_ARRIVAL_SHIFT), 0, sizeof(lzsa_arrival) * ((nEndOffset - nStartOffset + 1) << MATCHES_PER_ARRIVAL_SHIFT)); + memset(arrival + (nStartOffset << ARRIVALS_PER_POSITION_SHIFT), 0, sizeof(lzsa_arrival) * ((nEndOffset - nStartOffset + 1) << ARRIVALS_PER_POSITION_SHIFT)); - for (i = (nStartOffset << MATCHES_PER_ARRIVAL_SHIFT); i != ((nEndOffset + 1) << MATCHES_PER_ARRIVAL_SHIFT); i++) { + for (i = (nStartOffset << ARRIVALS_PER_POSITION_SHIFT); i != ((nEndOffset + 1) << ARRIVALS_PER_POSITION_SHIFT); i++) { arrival[i].cost = 0x40000000; } - arrival[nStartOffset << MATCHES_PER_ARRIVAL_SHIFT].from_slot = -1; + arrival[nStartOffset << ARRIVALS_PER_POSITION_SHIFT].from_slot = -1; + + if (nInsertForwardReps) { + memset(visited + nStartOffset, 0, (nEndOffset - nStartOffset) * sizeof(lzsa_match)); + } for (i = nStartOffset; i != nEndOffset; i++) { + lzsa_arrival *cur_arrival = &arrival[i << ARRIVALS_PER_POSITION_SHIFT]; int m; - for (j = 0; j < nMatchesPerArrival && arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].from_slot; j++) { - const int nPrevCost = arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].cost & 0x3fffffff; + for (j = 0; j < nArrivalsPerPosition && cur_arrival[j].from_slot; j++) { + const int nPrevCost = cur_arrival[j].cost & 0x3fffffff; int nCodingChoiceCost = nPrevCost + 8 /* literal */; - int nNumLiterals = arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].num_literals + 1; + int nScore = cur_arrival[j].score + 1; + int nNumLiterals = cur_arrival[j].num_literals + 1; if (nNumLiterals == LITERALS_RUN_LEN_V2) { nCodingChoiceCost += 4; @@ -289,52 +314,70 @@ static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigne nCodingChoiceCost += 16; } - if (!nFavorRatio && nNumLiterals == 1) - nCodingChoiceCost += MODESWITCH_PENALTY; + if (nNumLiterals == 1) + nCodingChoiceCost += nModeSwitchPenalty; - lzsa_arrival *pDestSlots = &arrival[(i + 1) << MATCHES_PER_ARRIVAL_SHIFT]; - if (nCodingChoiceCost <= pDestSlots[nMatchesPerArrival - 1].cost) { + lzsa_arrival *pDestSlots = &cur_arrival[1 << ARRIVALS_PER_POSITION_SHIFT]; + if (nCodingChoiceCost < pDestSlots[nArrivalsPerPosition - 1].cost || + (nCodingChoiceCost == pDestSlots[nArrivalsPerPosition - 1].cost && nScore < (pDestSlots[nArrivalsPerPosition - 1].score + nDisableScore))) { + int nRepOffset = cur_arrival[j].rep_offset; int exists = 0; + for (n = 0; - n < nMatchesPerArrival && pDestSlots[n].cost <= nCodingChoiceCost; + n < nArrivalsPerPosition && pDestSlots[n].cost < nCodingChoiceCost; n++) { - if (pDestSlots[n].rep_offset == arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].rep_offset) { + if (pDestSlots[n].rep_offset == nRepOffset) { exists = 1; break; } } if (!exists) { - int nScore = arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].score + 1; - for (n = 0; n < nMatchesPerArrival; n++) { - lzsa_arrival *pDestArrival = &pDestSlots[n]; - if (nCodingChoiceCost < pDestArrival->cost || - (nCodingChoiceCost == pDestArrival->cost && nScore < (pDestArrival->score + nDisableScore))) { + for (; + n < nArrivalsPerPosition && pDestSlots[n].cost == nCodingChoiceCost && nScore >= (pDestSlots[n].score + nDisableScore); + n++) { + if (pDestSlots[n].rep_offset == nRepOffset) { + exists = 1; + break; + } + } + + if (!exists) { + if (n < nArrivalsPerPosition) { + int nn; + + for (nn = n; + nn < nArrivalsPerPosition && pDestSlots[nn].cost == nCodingChoiceCost; + nn++) { + if (pDestSlots[nn].rep_offset == nRepOffset) { + exists = 1; + break; + } + } - if (pDestArrival->from_slot) { + if (!exists) { int z; - for (z = n; z < nMatchesPerArrival - 1; z++) { - if (pDestSlots[z].rep_offset == arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].rep_offset) + for (z = n; z < nArrivalsPerPosition - 1 && pDestSlots[z].from_slot; z++) { + if (pDestSlots[z].rep_offset == nRepOffset) break; } memmove(&pDestSlots[n + 1], &pDestSlots[n], sizeof(lzsa_arrival) * (z - n)); - } - pDestArrival->cost = nCodingChoiceCost; - pDestArrival->from_pos = i; - pDestArrival->from_slot = j + 1; - pDestArrival->match_offset = 0; - pDestArrival->match_len = 0; - pDestArrival->num_literals = nNumLiterals; - pDestArrival->score = nScore; - pDestArrival->rep_offset = arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].rep_offset; - pDestArrival->rep_pos = arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].rep_pos; - pDestArrival->rep_len = arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].rep_len; - break; + lzsa_arrival* pDestArrival = &pDestSlots[n]; + pDestArrival->cost = nCodingChoiceCost; + pDestArrival->from_pos = i; + pDestArrival->from_slot = j + 1; + pDestArrival->match_len = 0; + pDestArrival->num_literals = nNumLiterals; + pDestArrival->score = nScore; + pDestArrival->rep_offset = nRepOffset; + pDestArrival->rep_pos = cur_arrival[j].rep_pos; + pDestArrival->rep_len = cur_arrival[j].rep_len; + } } } } @@ -342,203 +385,276 @@ static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigne } lzsa_match *match = pCompressor->match + ((i - nStartOffset) << MATCHES_PER_INDEX_SHIFT_V2); + int nNumArrivalsForThisPos = j, nMinOverallRepLen = 0, nMaxOverallRepLen = 0; + + int nRepMatchArrivalIdxAndLen[(NARRIVALS_PER_POSITION_V2_BIG * 2) + 1]; + int nNumRepMatchArrivals = 0; + + int nMaxRepLenForPos = nEndOffset - i; + if (nMaxRepLenForPos > LCP_MAX) + nMaxRepLenForPos = LCP_MAX; + const unsigned char* pInWindowStart = pInWindow + i; + const unsigned char* pInWindowMax = pInWindowStart + nMaxRepLenForPos; + + for (j = 0; j < nNumArrivalsForThisPos && (i + MIN_MATCH_SIZE_V2) <= nEndOffset; j++) { + int nRepOffset = cur_arrival[j].rep_offset; + + if (nRepOffset) { + if (i > nRepOffset) { + if (pInWindow[i] == pInWindow[i - nRepOffset]) { + const unsigned char* pInWindowAtPos; + + int nLen0 = rle_len[i - nRepOffset]; + int nLen1 = rle_len[i]; + int nMinLen = (nLen0 < nLen1) ? nLen0 : nLen1; + + if (nMinLen > nMaxRepLenForPos) + nMinLen = nMaxRepLenForPos; + pInWindowAtPos = pInWindowStart + nMinLen; + + while ((pInWindowAtPos + 8) < pInWindowMax && !memcmp(pInWindowAtPos - nRepOffset, pInWindowAtPos, 8)) + pInWindowAtPos += 8; + while ((pInWindowAtPos + 4) < pInWindowMax && !memcmp(pInWindowAtPos - nRepOffset, pInWindowAtPos, 4)) + pInWindowAtPos += 4; + while (pInWindowAtPos < pInWindowMax && pInWindowAtPos[-nRepOffset] == pInWindowAtPos[0]) + pInWindowAtPos++; + int nCurRepLen = (int)(pInWindowAtPos - pInWindowStart); + + if (nCurRepLen >= MIN_MATCH_SIZE_V2) { + if (nMaxOverallRepLen < nCurRepLen) + nMaxOverallRepLen = nCurRepLen; + nRepMatchArrivalIdxAndLen[nNumRepMatchArrivals++] = j; + nRepMatchArrivalIdxAndLen[nNumRepMatchArrivals++] = nCurRepLen; + } + } + } + } + } + nRepMatchArrivalIdxAndLen[nNumRepMatchArrivals] = -1; - int nMinRepLen[NMATCHES_PER_ARRIVAL_V2_BIG]; - memset(nMinRepLen, 0, nMatchesPerArrival * sizeof(int)); + if (!nReduce) { + memset(nRepSlotHandledMask, 0, nArrivalsPerPosition * ((LCP_MAX + 1) / 8) * sizeof(char)); + } + memset(nRepLenHandledMask, 0, ((LCP_MAX + 1) / 8) * sizeof(char)); for (m = 0; m < NMATCHES_PER_INDEX_V2 && match[m].length; m++) { int nMatchLen = match[m].length & 0x7fff; int nMatchOffset = match[m].offset; - int nScorePenalty = ((match[m].length & 0x8000) >> 15); + int nScorePenalty = 3 + ((match[m].length & 0x8000) >> 15); int nNoRepmatchOffsetCost = (nMatchOffset <= 32) ? 4 : ((nMatchOffset <= 512) ? 8 : ((nMatchOffset <= (8192 + 512)) ? 12 : 16)); int nStartingMatchLen, k; - int nMaxRepLen[NMATCHES_PER_ARRIVAL_V2_BIG]; - if ((i + nMatchLen) > (nEndOffset - LAST_LITERALS)) - nMatchLen = nEndOffset - LAST_LITERALS - i; + if ((i + nMatchLen) > nEndOffset) + nMatchLen = nEndOffset - i; - for (j = 0; j < nMatchesPerArrival && arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].from_slot; j++) { - int nRepOffset = arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].rep_offset; - int nCurMaxRepLen = 0; + if (nInsertForwardReps) + lzsa_insert_forward_match_v2(pCompressor, pInWindow, i, nMatchOffset, nStartOffset, nEndOffset, 0); - if (nRepOffset) { - if (nMatchOffset == nRepOffset) - nCurMaxRepLen = nMatchLen; - else { - if (i > nRepOffset && - (i - nRepOffset + nMatchLen) <= (nEndOffset - LAST_LITERALS)) { - nCurMaxRepLen = nMinRepLen[j]; - while ((nCurMaxRepLen + 8) < nMatchLen && !memcmp(pInWindow + i - nRepOffset + nCurMaxRepLen, pInWindow + i + nCurMaxRepLen, 8)) - nCurMaxRepLen += 8; - while ((nCurMaxRepLen + 4) < nMatchLen && !memcmp(pInWindow + i - nRepOffset + nCurMaxRepLen, pInWindow + i + nCurMaxRepLen, 4)) - nCurMaxRepLen += 4; - while (nCurMaxRepLen < nMatchLen && pInWindow[i - nRepOffset + nCurMaxRepLen] == pInWindow[i + nCurMaxRepLen]) - nCurMaxRepLen++; - nMinRepLen[j] = nCurMaxRepLen; - } - } - } + int nNonRepMatchArrivalIdx = -1; + for (j = 0; j < nNumArrivalsForThisPos; j++) { + int nRepOffset = cur_arrival[j].rep_offset; - nMaxRepLen[j] = nCurMaxRepLen; + if (nMatchOffset != nRepOffset) { + nNonRepMatchArrivalIdx = j; + break; + } } - while (j < nMatchesPerArrival) - nMaxRepLen[j++] = 0; - if (nInsertForwardReps) - lzsa_insert_forward_match_v2(pCompressor, pInWindow, i, nMatchOffset, nStartOffset, nEndOffset, nMatchesPerArrival, 0); - - int nMatchLenCost = 0; + int nMatchLenCost; if (nMatchLen >= nLeaveAloneMatchSize) { nStartingMatchLen = nMatchLen; - nMatchLenCost = 4 + 24; + nMatchLenCost = 4 + 24 + 8 /* token */; } else { nStartingMatchLen = nMinMatchSize; - nMatchLenCost = 0; + nMatchLenCost = 0 + 8 /* token */; } for (k = nStartingMatchLen; k <= nMatchLen; k++) { if (k == (MATCH_RUN_LEN_V2 + MIN_MATCH_SIZE_V2)) { - nMatchLenCost = 4; + nMatchLenCost = 4 + 8 /* token */; } else { if (k == (MATCH_RUN_LEN_V2 + 15 + MIN_MATCH_SIZE_V2)) - nMatchLenCost = 4 + 8; + nMatchLenCost = 4 + 8 + 8 /* token */; else { if (k == 256) - nMatchLenCost = 4 + 24; + nMatchLenCost = 4 + 24 + 8 /* token */; } } - lzsa_arrival *pDestSlots = &arrival[(i + k) << MATCHES_PER_ARRIVAL_SHIFT]; - int nInsertedNoRepMatchCandidate = 0; + lzsa_arrival *pDestSlots = &cur_arrival[k << ARRIVALS_PER_POSITION_SHIFT]; - for (j = 0; j < nMatchesPerArrival && arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].from_slot; j++) { - const int nPrevCost = arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].cost & 0x3fffffff; - int nRepCodingChoiceCost = nPrevCost + 8 /* token */ /* the actual cost of the literals themselves accumulates up the chain */ + nMatchLenCost; + /* Insert non-repmatch candidate */ - if (nRepCodingChoiceCost <= pDestSlots[nMatchesPerArrival - 1].cost) { - int nRepOffset = arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].rep_offset; + if (nNonRepMatchArrivalIdx >= 0) { + const int nPrevCost = cur_arrival[nNonRepMatchArrivalIdx].cost & 0x3fffffff; + int nCodingChoiceCost = nPrevCost /* the actual cost of the literals themselves accumulates up the chain */ + nMatchLenCost + nNoRepmatchOffsetCost; - if (nMatchOffset != nRepOffset && !nInsertedNoRepMatchCandidate) { - int nCodingChoiceCost = nRepCodingChoiceCost + nNoRepmatchOffsetCost; + if (!cur_arrival[nNonRepMatchArrivalIdx].num_literals) + nCodingChoiceCost += nModeSwitchPenalty; - if (!nFavorRatio && !arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].num_literals) - nCodingChoiceCost += MODESWITCH_PENALTY; + int nScore = cur_arrival[nNonRepMatchArrivalIdx].score + nScorePenalty; + if (nCodingChoiceCost < pDestSlots[nArrivalsPerPosition - 2].cost || + (nCodingChoiceCost == pDestSlots[nArrivalsPerPosition - 2].cost && nScore < (pDestSlots[nArrivalsPerPosition - 2].score + nDisableScore))) { + int exists = 0; - if (nCodingChoiceCost <= pDestSlots[nMatchesPerArrival - 1].cost) { - int exists = 0; - int nScore = arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].score + 3 + nScorePenalty; + for (n = 0; + n < nArrivalsPerPosition && pDestSlots[n].cost < nCodingChoiceCost; + n++) { + if (pDestSlots[n].rep_offset == nMatchOffset) { + exists = 1; + break; + } + } - for (n = 0; - n < nMatchesPerArrival && pDestSlots[n].cost <= nCodingChoiceCost; - n++) { - if (pDestSlots[n].rep_offset == nMatchOffset && - (!nInsertForwardReps || pDestSlots[n].cost != nCodingChoiceCost || pDestSlots[n].rep_pos >= i || nScore >= (pDestSlots[n].score + nDisableScore) || - pDestSlots[nMatchesPerArrival - 1].from_slot)) { - exists = 1; - break; - } + if (!exists) { + for (; + n < nArrivalsPerPosition && pDestSlots[n].cost == nCodingChoiceCost && nScore >= (pDestSlots[n].score + nDisableScore); + n++) { + if (pDestSlots[n].rep_offset == nMatchOffset) { + exists = 1; + break; } + } - if (!exists) { - for (n = 0; n < nMatchesPerArrival - 1; n++) { - lzsa_arrival *pDestArrival = &pDestSlots[n]; + if (!exists) { + if (n < nArrivalsPerPosition - 1) { + int nn; + + for (nn = n; + nn < nArrivalsPerPosition && pDestSlots[nn].cost == nCodingChoiceCost; + nn++) { + if (pDestSlots[nn].rep_offset == nMatchOffset && + (!nInsertForwardReps || pDestSlots[nn].rep_pos >= i || + pDestSlots[nArrivalsPerPosition - 1].from_slot)) { + exists = 1; + break; + } + } - if (nCodingChoiceCost < pDestArrival->cost || - (nCodingChoiceCost == pDestArrival->cost && nScore < (pDestArrival->score + nDisableScore))) { - if (pDestArrival->from_slot) { - int z; + if (!exists) { + int z; - for (z = n; z < nMatchesPerArrival - 1; z++) { - if (pDestSlots[z].rep_offset == nMatchOffset) - break; - } + for (z = n; z < nArrivalsPerPosition - 1 && pDestSlots[z].from_slot; z++) { + if (pDestSlots[z].rep_offset == nMatchOffset) + break; + } - if (z == (nMatchesPerArrival - 1) && pDestSlots[z].from_slot && pDestSlots[z].match_len < MIN_MATCH_SIZE_V2) - z--; + if (z == (nArrivalsPerPosition - 1) && pDestSlots[z].from_slot && pDestSlots[z].match_len < MIN_MATCH_SIZE_V2) + z--; - memmove(&pDestSlots[n + 1], - &pDestSlots[n], - sizeof(lzsa_arrival) * (z - n)); - } + memmove(&pDestSlots[n + 1], + &pDestSlots[n], + sizeof(lzsa_arrival) * (z - n)); - pDestArrival->cost = nCodingChoiceCost; - pDestArrival->from_pos = i; - pDestArrival->from_slot = j + 1; - pDestArrival->match_offset = nMatchOffset; - pDestArrival->match_len = k; - pDestArrival->num_literals = 0; - pDestArrival->score = nScore; - pDestArrival->rep_offset = nMatchOffset; - pDestArrival->rep_pos = i; - pDestArrival->rep_len = k; - nInsertedNoRepMatchCandidate = 1; - break; - } + lzsa_arrival* pDestArrival = &pDestSlots[n]; + pDestArrival->cost = nCodingChoiceCost; + pDestArrival->from_pos = i; + pDestArrival->from_slot = nNonRepMatchArrivalIdx + 1; + pDestArrival->match_len = k; + pDestArrival->num_literals = 0; + pDestArrival->score = nScore; + pDestArrival->rep_offset = nMatchOffset; + pDestArrival->rep_pos = i; + pDestArrival->rep_len = k; + nRepLenHandledMask[k >> 3] &= ~(1 << (k & 7)); } } } } + } + } - /* If this coding choice doesn't rep-match, see if we still get a match by using the current repmatch offset for this arrival. This can occur (and not have the - * matchfinder offer the offset in the first place, or have too many choices with the same cost to retain the repmatchable offset) when compressing regions - * of identical bytes, for instance. Checking for this provides a big compression win on some files. */ + /* Insert repmatch candidates */ - if (nMaxRepLen[j] >= k) { - int exists = 0; + if (k > nMinOverallRepLen && k <= nMaxOverallRepLen && (nRepLenHandledMask[k >> 3] & (1 << (k & 7))) == 0) { + int nCurRepMatchArrival; - /* A match is possible at the rep offset; insert the extra coding choice. */ + nRepLenHandledMask[k >> 3] |= 1 << (k & 7); - for (n = 0; - n < nMatchesPerArrival && pDestSlots[n].cost <= nRepCodingChoiceCost; - n++) { - if (pDestSlots[n].rep_offset == nRepOffset) { - exists = 1; - break; - } - } + for (nCurRepMatchArrival = 0; (j = nRepMatchArrivalIdxAndLen[nCurRepMatchArrival]) >= 0; nCurRepMatchArrival += 2) { + int nMaskOffset = (j << 7) + (k >> 3); + if (nRepMatchArrivalIdxAndLen[nCurRepMatchArrival + 1] >= k && (nReduce || !(nRepSlotHandledMask[nMaskOffset] & (1 << (k & 7))))) { + const int nPrevCost = cur_arrival[j].cost & 0x3fffffff; + int nRepCodingChoiceCost = nPrevCost /* the actual cost of the literals themselves accumulates up the chain */ + nMatchLenCost; + int nScore = cur_arrival[j].score + 2; - if (!exists) { - int nScore = arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + j].score + 2; + if (nRepCodingChoiceCost < pDestSlots[nArrivalsPerPosition - 1].cost || + (nRepCodingChoiceCost == pDestSlots[nArrivalsPerPosition - 1].cost && nScore < (pDestSlots[nArrivalsPerPosition - 1].score + nDisableScore))) { + int nRepOffset = cur_arrival[j].rep_offset; + int exists = 0; - for (n = 0; n < nMatchesPerArrival; n++) { - lzsa_arrival *pDestArrival = &pDestSlots[n]; + for (n = 0; + n < nArrivalsPerPosition && pDestSlots[n].cost < nRepCodingChoiceCost; + n++) { + if (pDestSlots[n].rep_offset == nRepOffset) { + exists = 1; + if (!nReduce) + nRepSlotHandledMask[nMaskOffset] |= 1 << (k & 7); + break; + } + } - if (nRepCodingChoiceCost < pDestArrival->cost || - (nRepCodingChoiceCost == pDestArrival->cost && nScore < (pDestArrival->score + nDisableScore))) { - if (pDestArrival->from_slot) { - int z; + if (!exists) { + for (; + n < nArrivalsPerPosition && pDestSlots[n].cost == nRepCodingChoiceCost && nScore >= (pDestSlots[n].score + nDisableScore); + n++) { + if (pDestSlots[n].rep_offset == nRepOffset) { + exists = 1; + break; + } + } - for (z = n; z < nMatchesPerArrival - 1; z++) { - if (pDestSlots[z].rep_offset == nRepOffset) + if (!exists) { + if (n < nArrivalsPerPosition) { + int nn; + + for (nn = n; + nn < nArrivalsPerPosition && pDestSlots[nn].cost == nRepCodingChoiceCost; + nn++) { + if (pDestSlots[nn].rep_offset == nRepOffset) { + exists = 1; break; + } } - memmove(&pDestSlots[n + 1], - &pDestSlots[n], - sizeof(lzsa_arrival) * (z - n)); - } + if (!exists) { + int z; - pDestArrival->cost = nRepCodingChoiceCost; - pDestArrival->from_pos = i; - pDestArrival->from_slot = j + 1; - pDestArrival->match_offset = nRepOffset; - pDestArrival->match_len = k; - pDestArrival->num_literals = 0; - pDestArrival->score = nScore; - pDestArrival->rep_offset = nRepOffset; - pDestArrival->rep_pos = i; - pDestArrival->rep_len = k; - break; + for (z = n; z < nArrivalsPerPosition - 1 && pDestSlots[z].from_slot; z++) { + if (pDestSlots[z].rep_offset == nRepOffset) + break; + } + + memmove(&pDestSlots[n + 1], + &pDestSlots[n], + sizeof(lzsa_arrival) * (z - n)); + + lzsa_arrival* pDestArrival = &pDestSlots[n]; + pDestArrival->cost = nRepCodingChoiceCost; + pDestArrival->from_pos = i; + pDestArrival->from_slot = j + 1; + pDestArrival->match_len = k; + pDestArrival->num_literals = 0; + pDestArrival->score = nScore; + pDestArrival->rep_offset = nRepOffset; + pDestArrival->rep_pos = i; + pDestArrival->rep_len = k; + nRepLenHandledMask[k >> 3] &= ~(1 << (k & 7)); + } + } } } } + else { + break; + } } } - else { - break; - } + + if (k < nMaxRepInsertedLen) + nMinOverallRepLen = k; } } @@ -547,13 +663,16 @@ static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigne } } - lzsa_arrival *end_arrival = &arrival[(i << MATCHES_PER_ARRIVAL_SHIFT) + 0]; + lzsa_arrival *end_arrival = &arrival[(i << ARRIVALS_PER_POSITION_SHIFT) + 0]; while (end_arrival->from_slot > 0 && end_arrival->from_pos >= 0) { if (end_arrival->from_pos >= nEndOffset) return; pBestMatch[end_arrival->from_pos].length = end_arrival->match_len; - pBestMatch[end_arrival->from_pos].offset = end_arrival->match_offset; - end_arrival = &arrival[(end_arrival->from_pos << MATCHES_PER_ARRIVAL_SHIFT) + (end_arrival->from_slot - 1)]; + if (end_arrival->match_len) + pBestMatch[end_arrival->from_pos].offset = end_arrival->rep_offset; + else + pBestMatch[end_arrival->from_pos].offset = 0; + end_arrival = &arrival[(end_arrival->from_pos << ARRIVALS_PER_POSITION_SHIFT) + (end_arrival->from_slot - 1)]; } } @@ -582,12 +701,12 @@ static int lzsa_optimize_command_count_v2(lzsa_compressor *pCompressor, const un lzsa_match *pMatch = pBestMatch + i; if (pMatch->length == 0 && - (i + 1) < (nEndOffset - LAST_LITERALS) && + (i + 1) < nEndOffset && pBestMatch[i + 1].length >= MIN_MATCH_SIZE_V2 && pBestMatch[i + 1].length < MAX_VARLEN && pBestMatch[i + 1].offset && i >= pBestMatch[i + 1].offset && - (i + pBestMatch[i + 1].length + 1) <= (nEndOffset - LAST_LITERALS) && + (i + pBestMatch[i + 1].length + 1) <= nEndOffset && !memcmp(pInWindow + i - (pBestMatch[i + 1].offset), pInWindow + i, pBestMatch[i + 1].length + 1)) { int nCurLenSize = lzsa_get_match_varlen_size_v2(pBestMatch[i + 1].length - MIN_MATCH_SIZE_V2); int nReducedLenSize = lzsa_get_match_varlen_size_v2(pBestMatch[i + 1].length + 1 - MIN_MATCH_SIZE_V2); @@ -623,7 +742,7 @@ static int lzsa_optimize_command_count_v2(lzsa_compressor *pCompressor, const un * matching large regions of identical bytes for instance, where there are too many offsets to be considered by the parser, and when not compressing to favor the * ratio (the forward arrivals parser already has this covered). */ if (i > nRepMatchOffset && - (i - nRepMatchOffset + pMatch->length) <= (nEndOffset - LAST_LITERALS) && + (i - nRepMatchOffset + pMatch->length) <= nEndOffset && !memcmp(pInWindow + i - nRepMatchOffset, pInWindow + i - pMatch->offset, pMatch->length)) { pMatch->offset = nRepMatchOffset; nDidReduce = 1; @@ -632,7 +751,7 @@ static int lzsa_optimize_command_count_v2(lzsa_compressor *pCompressor, const un if (pBestMatch[nNextIndex].offset && pMatch->offset != pBestMatch[nNextIndex].offset && nRepMatchOffset != pBestMatch[nNextIndex].offset) { /* Otherwise, try to gain a match forward as well */ - if (i > pBestMatch[nNextIndex].offset && (i - pBestMatch[nNextIndex].offset + pMatch->length) <= (nEndOffset - LAST_LITERALS)) { + if (i > pBestMatch[nNextIndex].offset && (i - pBestMatch[nNextIndex].offset + pMatch->length) <= nEndOffset) { int nMaxLen = 0; while (nMaxLen < pMatch->length && pInWindow[i - pBestMatch[nNextIndex].offset + nMaxLen] == pInWindow[i - pMatch->offset + nMaxLen]) nMaxLen++; @@ -675,20 +794,20 @@ static int lzsa_optimize_command_count_v2(lzsa_compressor *pCompressor, const un nCurCommandSize += (pMatch->offset <= 32) ? 4 : ((pMatch->offset <= 512) ? 8 : ((pMatch->offset <= (8192 + 512)) ? 12 : 16)); /* Calculate the next command's current cost */ - int nNextCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNextLiterals) + (nNextLiterals << 3) + lzsa_get_match_varlen_size_v2(pBestMatch[nNextIndex].length - MIN_MATCH_SIZE_V2); + int nNextCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNextLiterals) + /* (nNextLiterals << 3) + */ lzsa_get_match_varlen_size_v2(pBestMatch[nNextIndex].length - MIN_MATCH_SIZE_V2); if (pBestMatch[nNextIndex].offset != pMatch->offset) nNextCommandSize += (pBestMatch[nNextIndex].offset <= 32) ? 4 : ((pBestMatch[nNextIndex].offset <= 512) ? 8 : ((pBestMatch[nNextIndex].offset <= (8192 + 512)) ? 12 : 16)); int nOriginalCombinedCommandSize = nCurCommandSize + nNextCommandSize; /* Calculate the cost of replacing this match command by literals + the next command with the cost of encoding these literals (excluding 'nNumLiterals' bytes) */ - int nReducedCommandSize = (pMatch->length << 3) + 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNumLiterals + pMatch->length + nNextLiterals) + (nNextLiterals << 3) + lzsa_get_match_varlen_size_v2(pBestMatch[nNextIndex].length - MIN_MATCH_SIZE_V2); + int nReducedCommandSize = (pMatch->length << 3) + 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNumLiterals + pMatch->length + nNextLiterals) + /* (nNextLiterals << 3) + */ lzsa_get_match_varlen_size_v2(pBestMatch[nNextIndex].length - MIN_MATCH_SIZE_V2); if (pBestMatch[nNextIndex].offset != nRepMatchOffset) nReducedCommandSize += (pBestMatch[nNextIndex].offset <= 32) ? 4 : ((pBestMatch[nNextIndex].offset <= 512) ? 8 : ((pBestMatch[nNextIndex].offset <= (8192 + 512)) ? 12 : 16)); int nReplaceRepOffset = 0; if (nRepMatchOffset && nRepMatchOffset != nPrevRepMatchOffset && nRepMatchLen >= MIN_MATCH_SIZE_V2 && nRepMatchOffset != pBestMatch[nNextIndex].offset && nRepIndex > pBestMatch[nNextIndex].offset && - (nRepIndex - pBestMatch[nNextIndex].offset + nRepMatchLen) <= (nEndOffset - LAST_LITERALS) && + (nRepIndex - pBestMatch[nNextIndex].offset + nRepMatchLen) <= nEndOffset && !memcmp(pInWindow + nRepIndex - nRepMatchOffset, pInWindow + nRepIndex - pBestMatch[nNextIndex].offset, nRepMatchLen)) { /* Replacing this match command by literals would let us create a repmatch */ nReplaceRepOffset = 1; @@ -729,26 +848,30 @@ static int lzsa_optimize_command_count_v2(lzsa_compressor *pCompressor, const un pBestMatch[i + pMatch->length].length)) { int nNextIndex = i + pMatch->length; - int nNextLiterals = 0; while (nNextIndex < nEndOffset && pBestMatch[nNextIndex].length < MIN_MATCH_SIZE_V2) { - nNextLiterals++; nNextIndex++; } + int nNextOffset; + if (nNextIndex < nEndOffset) + nNextOffset = pBestMatch[nNextIndex].offset; + else + nNextOffset = 0; + int nCurPartialSize = lzsa_get_match_varlen_size_v2(pMatch->length - MIN_MATCH_SIZE_V2); - nCurPartialSize += 8 /* token */ + lzsa_get_literals_varlen_size_v2(0) + lzsa_get_match_varlen_size_v2(pBestMatch[i + pMatch->length].length - MIN_MATCH_SIZE_V2); + nCurPartialSize += 8 /* token */ + /* lzsa_get_literals_varlen_size_v2(0) + */ lzsa_get_match_varlen_size_v2(pBestMatch[i + pMatch->length].length - MIN_MATCH_SIZE_V2); if (pBestMatch[i + pMatch->length].offset != pMatch->offset) nCurPartialSize += (pBestMatch[i + pMatch->length].offset <= 32) ? 4 : ((pBestMatch[i + pMatch->length].offset <= 512) ? 8 : ((pBestMatch[i + pMatch->length].offset <= (8192 + 512)) ? 12 : 16)); - if (pBestMatch[nNextIndex].offset != pBestMatch[i + pMatch->length].offset) - nCurPartialSize += (pBestMatch[nNextIndex].offset <= 32) ? 4 : ((pBestMatch[nNextIndex].offset <= 512) ? 8 : ((pBestMatch[nNextIndex].offset <= (8192 + 512)) ? 12 : 16)); + if (nNextOffset != pBestMatch[i + pMatch->length].offset) + nCurPartialSize += (nNextOffset <= 32) ? 4 : ((nNextOffset <= 512) ? 8 : ((nNextOffset <= (8192 + 512)) ? 12 : 16)); int nReducedPartialSize = lzsa_get_match_varlen_size_v2(pMatch->length + pBestMatch[i + pMatch->length].length - MIN_MATCH_SIZE_V2); - if (pBestMatch[nNextIndex].offset != pMatch->offset) - nReducedPartialSize += (pBestMatch[nNextIndex].offset <= 32) ? 4 : ((pBestMatch[nNextIndex].offset <= 512) ? 8 : ((pBestMatch[nNextIndex].offset <= (8192 + 512)) ? 12 : 16)); + if (nNextOffset != pMatch->offset) + nReducedPartialSize += (nNextOffset <= 32) ? 4 : ((nNextOffset <= 512) ? 8 : ((nNextOffset <= (8192 + 512)) ? 12 : 16)); if (nCurPartialSize >= nReducedPartialSize) { int nMatchLen = pMatch->length; @@ -793,7 +916,6 @@ static int lzsa_optimize_command_count_v2(lzsa_compressor *pCompressor, const un static int lzsa_get_compressed_size_v2(lzsa_compressor *pCompressor, lzsa_match *pBestMatch, const int nStartOffset, const int nEndOffset) { int i; int nNumLiterals = 0; - int nOutOffset = 0; int nRepMatchOffset = 0; int nCompressedSize = 0; @@ -838,7 +960,6 @@ static int lzsa_get_compressed_size_v2(lzsa_compressor *pCompressor, lzsa_match } { - int nTokenLiteralsLen = (nNumLiterals >= LITERALS_RUN_LEN_V2) ? LITERALS_RUN_LEN_V2 : nNumLiterals; int nCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNumLiterals) + (nNumLiterals << 3); nCompressedSize += nCommandSize; @@ -846,7 +967,7 @@ static int lzsa_get_compressed_size_v2(lzsa_compressor *pCompressor, lzsa_match } if (pCompressor->flags & LZSA_FLAG_RAW_BLOCK) { - nCompressedSize += (8 + 4 + 8); + nCompressedSize += (8 + 4); } return nCompressedSize; @@ -870,7 +991,7 @@ static int lzsa_write_block_v2(lzsa_compressor *pCompressor, lzsa_match *pBestMa int nNumLiterals = 0; int nInFirstLiteralOffset = 0; int nOutOffset = 0; - int nCurNibbleOffset = -1, nCurFreeNibbles = 0; + int nCurNibbleOffset = -1; int nRepMatchOffset = 0; for (i = nStartOffset; i < nEndOffset; ) { @@ -916,7 +1037,7 @@ static int lzsa_write_block_v2(lzsa_compressor *pCompressor, lzsa_match *pBestMa return -1; pOutData[nOutOffset++] = nTokenOffsetMode | (nTokenLiteralsLen << 3) | nTokenMatchLen; - nOutOffset = lzsa_write_literals_varlen_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, nNumLiterals); + nOutOffset = lzsa_write_literals_varlen_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, nNumLiterals); if (nOutOffset < 0) return -1; if (nNumLiterals < pCompressor->stats.min_literals || pCompressor->stats.min_literals == -1) @@ -933,14 +1054,14 @@ static int lzsa_write_block_v2(lzsa_compressor *pCompressor, lzsa_match *pBestMa } if (nTokenOffsetMode == 0x00 || nTokenOffsetMode == 0x20) { - nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, ((-nMatchOffset) & 0x1e) >> 1); + nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, ((-nMatchOffset) & 0x1e) >> 1); if (nOutOffset < 0) return -1; } else if (nTokenOffsetMode == 0x40 || nTokenOffsetMode == 0x60) { pOutData[nOutOffset++] = (-nMatchOffset) & 0xff; } else if (nTokenOffsetMode == 0x80 || nTokenOffsetMode == 0xa0) { - nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, ((-(nMatchOffset - 512)) >> 9) & 0x0f); + nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, ((-(nMatchOffset - 512)) >> 9) & 0x0f); if (nOutOffset < 0) return -1; pOutData[nOutOffset++] = (-(nMatchOffset - 512)) & 0xff; } @@ -954,7 +1075,7 @@ static int lzsa_write_block_v2(lzsa_compressor *pCompressor, lzsa_match *pBestMa nRepMatchOffset = nMatchOffset; - nOutOffset = lzsa_write_match_varlen_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, nEncodedMatchLen); + nOutOffset = lzsa_write_match_varlen_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, nEncodedMatchLen); if (nOutOffset < 0) return -1; if (nMatchOffset < pCompressor->stats.min_offset || pCompressor->stats.min_offset == -1) @@ -1013,10 +1134,10 @@ static int lzsa_write_block_v2(lzsa_compressor *pCompressor, lzsa_match *pBestMa return -1; if (pCompressor->flags & LZSA_FLAG_RAW_BLOCK) - pOutData[nOutOffset++] = (nTokenLiteralsLen << 3) | 0x47; + pOutData[nOutOffset++] = (nTokenLiteralsLen << 3) | 0xe7; else pOutData[nOutOffset++] = (nTokenLiteralsLen << 3) | 0x00; - nOutOffset = lzsa_write_literals_varlen_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, nNumLiterals); + nOutOffset = lzsa_write_literals_varlen_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, nNumLiterals); if (nOutOffset < 0) return -1; if (nNumLiterals < pCompressor->stats.min_literals || pCompressor->stats.min_literals == -1) @@ -1046,9 +1167,8 @@ static int lzsa_write_block_v2(lzsa_compressor *pCompressor, lzsa_match *pBestMa if (nOutOffset >= nMaxOutDataSize) return -1; - pOutData[nOutOffset++] = 0; /* Match offset */ - nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, 15); /* Extended match length nibble */ + nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, 15); /* Extended match length nibble */ if (nOutOffset < 0) return -1; if ((nOutOffset + 1) > nMaxOutDataSize) @@ -1058,7 +1178,7 @@ static int lzsa_write_block_v2(lzsa_compressor *pCompressor, lzsa_match *pBestMa } if (nCurNibbleOffset != -1) { - nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, 0); + nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, 0); if (nOutOffset < 0 || nCurNibbleOffset != -1) return -1; } @@ -1079,19 +1199,19 @@ static int lzsa_write_block_v2(lzsa_compressor *pCompressor, lzsa_match *pBestMa * @return size of compressed data in output buffer, or -1 if the data is uncompressible */ static int lzsa_write_raw_uncompressed_block_v2(lzsa_compressor *pCompressor, const unsigned char *pInWindow, const int nStartOffset, const int nEndOffset, unsigned char *pOutData, const int nMaxOutDataSize) { - int nCurNibbleOffset = -1, nCurFreeNibbles = 0; + int nCurNibbleOffset = -1; int nNumLiterals = nEndOffset - nStartOffset; int nTokenLiteralsLen = (nNumLiterals >= LITERALS_RUN_LEN_V2) ? LITERALS_RUN_LEN_V2 : nNumLiterals; int nOutOffset = 0; - int nCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNumLiterals) + (nNumLiterals << 3) + 8 + 4 + 8; + int nCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNumLiterals) + (nNumLiterals << 3) + 4 + 8; if ((nOutOffset + ((nCommandSize + 7) >> 3)) > nMaxOutDataSize) return -1; pCompressor->num_commands = 0; - pOutData[nOutOffset++] = (nTokenLiteralsLen << 3) | 0x47; + pOutData[nOutOffset++] = (nTokenLiteralsLen << 3) | 0xe7; - nOutOffset = lzsa_write_literals_varlen_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, nNumLiterals); + nOutOffset = lzsa_write_literals_varlen_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, nNumLiterals); if (nOutOffset < 0) return -1; if (nNumLiterals != 0) { @@ -1102,9 +1222,7 @@ static int lzsa_write_raw_uncompressed_block_v2(lzsa_compressor *pCompressor, co /* Emit EOD marker for raw block */ - pOutData[nOutOffset++] = 0; /* Match offset */ - - nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, 15); /* Extended match length nibble */ + nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, 15); /* Extended match length nibble */ if (nOutOffset < 0) return -1; if ((nOutOffset + 1) > nMaxOutDataSize) @@ -1115,7 +1233,7 @@ static int lzsa_write_raw_uncompressed_block_v2(lzsa_compressor *pCompressor, co pCompressor->num_commands++; if (nCurNibbleOffset != -1) { - nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, 0); + nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, 0); if (nOutOffset < 0 || nCurNibbleOffset != -1) return -1; } @@ -1137,12 +1255,27 @@ static int lzsa_write_raw_uncompressed_block_v2(lzsa_compressor *pCompressor, co */ int lzsa_optimize_and_write_block_v2(lzsa_compressor *pCompressor, const unsigned char *pInWindow, const int nPreviousBlockSize, const int nInDataSize, unsigned char *pOutData, const int nMaxOutDataSize) { int nResult, nBaseCompressedSize; - int nMatchesPerArrival = (nInDataSize < 65536) ? NMATCHES_PER_ARRIVAL_V2_BIG : NMATCHES_PER_ARRIVAL_V2_SMALL; + int nArrivalsPerPosition = (nInDataSize < 65536) ? NARRIVALS_PER_POSITION_V2_BIG : NARRIVALS_PER_POSITION_V2_SMALL; + int *rle_len = (int*)pCompressor->intervals /* reuse */; + int i; + + i = 0; + while (i < (nPreviousBlockSize + nInDataSize)) { + int nRangeStartIdx = i; + unsigned char c = pInWindow[nRangeStartIdx]; + do { + i++; + } while (i < (nPreviousBlockSize + nInDataSize) && pInWindow[i] == c); + while (nRangeStartIdx < i) { + rle_len[nRangeStartIdx] = i - nRangeStartIdx; + nRangeStartIdx++; + } + } /* Compress optimally without breaking ties in favor of less tokens */ memset(pCompressor->best_match, 0, BLOCK_SIZE * sizeof(lzsa_match)); - lzsa_optimize_forward_v2(pCompressor, pInWindow, pCompressor->best_match - nPreviousBlockSize, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, 0 /* reduce */, (nInDataSize < 65536) ? 1 : 0 /* insert forward reps */, nMatchesPerArrival); + lzsa_optimize_forward_v2(pCompressor, pInWindow, pCompressor->best_match - nPreviousBlockSize, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, 0 /* reduce */, (nInDataSize < 65536) ? 1 : 0 /* insert forward reps */, nArrivalsPerPosition); int nDidReduce; int nPasses = 0; @@ -1159,7 +1292,7 @@ int lzsa_optimize_and_write_block_v2(lzsa_compressor *pCompressor, const unsigne /* Compress optimally and do break ties in favor of less tokens */ memset(pCompressor->improved_match, 0, BLOCK_SIZE * sizeof(lzsa_match)); - lzsa_optimize_forward_v2(pCompressor, pInWindow, pCompressor->improved_match - nPreviousBlockSize, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, 1 /* reduce */, 0 /* use forward reps */, nMatchesPerArrival); + lzsa_optimize_forward_v2(pCompressor, pInWindow, pCompressor->improved_match - nPreviousBlockSize, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, 1 /* reduce */, 0 /* use forward reps */, nArrivalsPerPosition); nPasses = 0; do { @@ -1169,8 +1302,77 @@ int lzsa_optimize_and_write_block_v2(lzsa_compressor *pCompressor, const unsigne nReducedCompressedSize = lzsa_get_compressed_size_v2(pCompressor, pCompressor->improved_match - nPreviousBlockSize, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); if (nReducedCompressedSize > 0 && nReducedCompressedSize <= nBaseCompressedSize) { + const int nEndOffset = nPreviousBlockSize + nInDataSize; + int nSupplementedCompressedSize; + /* Pick the parse with the reduced number of tokens as it didn't negatively affect the size */ pBestMatch = pCompressor->improved_match - nPreviousBlockSize; + + int* first_offset_for_byte = pCompressor->first_offset_for_byte; + int* next_offset_for_pos = pCompressor->next_offset_for_pos; + int nPosition; + + /* Supplement small matches */ + + memset(first_offset_for_byte, 0xff, sizeof(int) * 65536); + memset(next_offset_for_pos, 0xff, sizeof(int) * nInDataSize); + + for (nPosition = nPreviousBlockSize; nPosition < nEndOffset - 1; nPosition++) { + next_offset_for_pos[nPosition - nPreviousBlockSize] = first_offset_for_byte[((unsigned int)pInWindow[nPosition]) | (((unsigned int)pInWindow[nPosition + 1]) << 8)]; + first_offset_for_byte[((unsigned int)pInWindow[nPosition]) | (((unsigned int)pInWindow[nPosition + 1]) << 8)] = nPosition; + } + + for (nPosition = nPreviousBlockSize + 1; nPosition < (nEndOffset - 1); nPosition++) { + lzsa_match* match = pCompressor->match + ((nPosition - nPreviousBlockSize) << MATCHES_PER_INDEX_SHIFT_V2); + int m = 0, nInserted = 0; + int nMatchPos; + + while (m < 15 && match[m].length) + m++; + + for (nMatchPos = next_offset_for_pos[nPosition - nPreviousBlockSize]; m < 15 && nMatchPos >= 0; nMatchPos = next_offset_for_pos[nMatchPos - nPreviousBlockSize]) { + int nMatchOffset = nPosition - nMatchPos; + int nExistingMatchIdx; + int nAlreadyExists = 0; + + for (nExistingMatchIdx = 0; nExistingMatchIdx < m; nExistingMatchIdx++) { + if (match[nExistingMatchIdx].offset == nMatchOffset) { + nAlreadyExists = 1; + break; + } + } + + if (!nAlreadyExists) { + int nMatchLen = 2; + while (nMatchLen < 16 && (nPosition + nMatchLen + 4) < nEndOffset && !memcmp(pInWindow + nMatchPos + nMatchLen, pInWindow + nPosition + nMatchLen, 4)) + nMatchLen += 4; + while (nMatchLen < 16 && (nPosition + nMatchLen) < nEndOffset && pInWindow[nMatchPos + nMatchLen] == pInWindow[nPosition + nMatchLen]) + nMatchLen++; + match[m].length = nMatchLen; + match[m].offset = nMatchOffset; + m++; + nInserted++; + if (nInserted >= 15) + break; + } + } + } + + /* Compress optimally with the extra matches */ + memset(pCompressor->best_match, 0, BLOCK_SIZE * sizeof(lzsa_match)); + lzsa_optimize_forward_v2(pCompressor, pInWindow, pCompressor->best_match - nPreviousBlockSize, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, 1 /* reduce */, 0 /* use forward reps */, nArrivalsPerPosition); + + nPasses = 0; + do { + nDidReduce = lzsa_optimize_command_count_v2(pCompressor, pInWindow, pCompressor->best_match - nPreviousBlockSize, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); + nPasses++; + } while (nDidReduce && nPasses < 20); + + nSupplementedCompressedSize = lzsa_get_compressed_size_v2(pCompressor, pCompressor->best_match - nPreviousBlockSize, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); + if (nSupplementedCompressedSize > 0 && nSupplementedCompressedSize < nReducedCompressedSize) { + /* Pick the parse with the extra matches as it didn't negatively affect the size */ + pBestMatch = pCompressor->best_match - nPreviousBlockSize; + } } } diff --git a/Tools/unix/lzsa/src/shrink_context.c b/Tools/unix/lzsa/src/shrink_context.c index c1e7ab3d..9e6900f7 100644 --- a/Tools/unix/lzsa/src/shrink_context.c +++ b/Tools/unix/lzsa/src/shrink_context.c @@ -62,6 +62,10 @@ int lzsa_compressor_init(lzsa_compressor *pCompressor, const int nMaxWindowSize, pCompressor->best_match = NULL; pCompressor->improved_match = NULL; pCompressor->arrival = NULL; + pCompressor->rep_slot_handled_mask = NULL; + pCompressor->rep_len_handled_mask = NULL; + pCompressor->first_offset_for_byte = NULL; + pCompressor->next_offset_for_pos = NULL; pCompressor->min_match_size = nMinMatchSize; if (pCompressor->min_match_size < nMinMatchSizeForFormat) pCompressor->min_match_size = nMinMatchSizeForFormat; @@ -89,7 +93,7 @@ int lzsa_compressor_init(lzsa_compressor *pCompressor, const int nMaxWindowSize, pCompressor->open_intervals = (unsigned int *)malloc((LCP_AND_TAG_MAX + 1) * sizeof(unsigned int)); if (pCompressor->open_intervals) { - pCompressor->arrival = (lzsa_arrival *)malloc(((BLOCK_SIZE + 1) << MATCHES_PER_ARRIVAL_SHIFT) * sizeof(lzsa_arrival)); + pCompressor->arrival = (lzsa_arrival *)malloc(((BLOCK_SIZE + 1) << ARRIVALS_PER_POSITION_SHIFT) * sizeof(lzsa_arrival)); if (pCompressor->arrival) { pCompressor->best_match = (lzsa_match *)malloc(BLOCK_SIZE * sizeof(lzsa_match)); @@ -102,8 +106,26 @@ int lzsa_compressor_init(lzsa_compressor *pCompressor, const int nMaxWindowSize, pCompressor->match = (lzsa_match *)malloc(BLOCK_SIZE * NMATCHES_PER_INDEX_V2 * sizeof(lzsa_match)); else pCompressor->match = (lzsa_match *)malloc(BLOCK_SIZE * NMATCHES_PER_INDEX_V1 * sizeof(lzsa_match)); - if (pCompressor->match) - return 0; + if (pCompressor->match) { + if (pCompressor->format_version == 2) { + pCompressor->rep_slot_handled_mask = (char*)malloc(NARRIVALS_PER_POSITION_V2_BIG * ((LCP_MAX + 1) / 8) * sizeof(char)); + if (pCompressor->rep_slot_handled_mask) { + pCompressor->rep_len_handled_mask = (char*)malloc(((LCP_MAX + 1) / 8) * sizeof(char)); + if (pCompressor->rep_len_handled_mask) { + pCompressor->first_offset_for_byte = (int*)malloc(65536 * sizeof(int)); + if (pCompressor->first_offset_for_byte) { + pCompressor->next_offset_for_pos = (int*)malloc(BLOCK_SIZE * sizeof(int)); + if (pCompressor->next_offset_for_pos) { + return 0; + } + } + } + } + } + else { + return 0; + } + } } } } @@ -124,6 +146,26 @@ int lzsa_compressor_init(lzsa_compressor *pCompressor, const int nMaxWindowSize, void lzsa_compressor_destroy(lzsa_compressor *pCompressor) { divsufsort_destroy(&pCompressor->divsufsort_context); + if (pCompressor->next_offset_for_pos) { + free(pCompressor->next_offset_for_pos); + pCompressor->next_offset_for_pos = NULL; + } + + if (pCompressor->first_offset_for_byte) { + free(pCompressor->first_offset_for_byte); + pCompressor->first_offset_for_byte = NULL; + } + + if (pCompressor->rep_len_handled_mask) { + free(pCompressor->rep_len_handled_mask); + pCompressor->rep_len_handled_mask = NULL; + } + + if (pCompressor->rep_slot_handled_mask) { + free(pCompressor->rep_slot_handled_mask); + pCompressor->rep_slot_handled_mask = NULL; + } + if (pCompressor->match) { free(pCompressor->match); pCompressor->match = NULL; diff --git a/Tools/unix/lzsa/src/shrink_context.h b/Tools/unix/lzsa/src/shrink_context.h index 70245cf6..ce80fbd2 100644 --- a/Tools/unix/lzsa/src/shrink_context.h +++ b/Tools/unix/lzsa/src/shrink_context.h @@ -49,10 +49,10 @@ extern "C" { #define VISITED_FLAG 0x80000000 #define EXCL_VISITED_MASK 0x7fffffff -#define NMATCHES_PER_ARRIVAL_V1 8 -#define NMATCHES_PER_ARRIVAL_V2_SMALL 9 -#define NMATCHES_PER_ARRIVAL_V2_BIG 32 -#define MATCHES_PER_ARRIVAL_SHIFT 5 +#define NARRIVALS_PER_POSITION_V1 8 +#define NARRIVALS_PER_POSITION_V2_SMALL 9 +#define NARRIVALS_PER_POSITION_V2_BIG 32 +#define ARRIVALS_PER_POSITION_SHIFT 5 #define NMATCHES_PER_INDEX_V1 8 #define MATCHES_PER_INDEX_SHIFT_V1 3 @@ -63,8 +63,6 @@ extern "C" { #define LEAVE_ALONE_MATCH_SIZE 300 #define LEAVE_ALONE_MATCH_SIZE_SMALL 1000 -#define LAST_LITERALS 0 - #define MODESWITCH_PENALTY 3 /** One match */ @@ -81,12 +79,10 @@ typedef struct { int from_pos; unsigned short rep_len; + unsigned short match_len; int rep_pos; int num_literals; int score; - - unsigned short match_offset; - unsigned short match_len; } lzsa_arrival; /** Compression statistics */ @@ -128,6 +124,10 @@ typedef struct _lzsa_compressor { lzsa_match *best_match; lzsa_match *improved_match; lzsa_arrival *arrival; + char *rep_slot_handled_mask; + char *rep_len_handled_mask; + int *first_offset_for_byte; + int *next_offset_for_pos; int min_match_size; int format_version; int flags;