CVE-2016-9297
Description
Compile
Download
虽然 Mitre 的 description 写是 4.0.6 版本的漏洞,但是 chall 的 description 说 4.0.4,那就用 4.0.4 吧。
git clone https://gitlab.com/libtiff/libtiff.git && cd libtiffgit checkout v4.0.4Build
mkdir ../libtiff-build-fuzzcd ../libtiff-build-fuzzCC=afl-clang-lto \CXX=afl-clang-lto++ \../libtiff/configure \ --prefix="$(realpath ../libtiff-fuzz)" \ --disable-sharedmake clean && make -j`nproc` && make install
mkdir ../libtiff-build-fuzz-asancd ../libtiff-build-fuzz-asanAFL_USE_ASAN=1 \CC=afl-clang-lto \CXX=afl-clang-lto++ \../libtiff/configure \ --prefix="$(realpath ../libtiff-fuzz-asan)" \ --disable-sharedmake clean && AFL_USE_ASAN=1 make -j`nproc` && AFL_USE_ASAN=1 make installmake 的时候遇到两个定义不完整的报错,直接在 tif_predict.h 中加入下面两行即可:
#include "tiffio.h"#include "tiffiop.h"Samples
#!/usr/bin/env python
import osimport subprocessimport struct
def run_cmd(cmd): try: subprocess.run(cmd, check=True, capture_output=True) except subprocess.CalledProcessError as e: print(f"Error running {' '.join(cmd)}: {e}")
def create_minimal_tiff(filename, width=8, height=8): # Minimal 8x8 BW TIFF (1 bit per pixel) # Header header = b'II' + struct.pack('<HI', 42, 8) # II, 42, offset to IFD=8
# IFD num_entries = 11 ifd_offset = 8 next_ifd_offset = 0
# Tags: # 256 (Width), 257 (Height), 258 (BitsPerSample), 259 (Compression), # 262 (Photometric), 273 (StripOffsets), 277 (SamplesPerPixel), # 278 (RowsPerStrip), 279 (StripByteCounts), 282 (XRes), 283 (YRes)
# Each entry is 12 bytes entries = [] def add_entry(tag, type, count, value): entries.append(struct.pack('<HHII', tag, type, count, value))
add_entry(256, 3, 1, width) # Width add_entry(257, 3, 1, height) # Height add_entry(258, 3, 1, 1) # BitsPerSample add_entry(259, 3, 1, 1) # Compression (None) add_entry(262, 3, 1, 1) # Photometric (MinIsBlack) add_entry(273, 4, 1, 8 + 2 + num_entries*12 + 4 + 16) # StripOffsets (after IFD and Res values) add_entry(277, 3, 1, 1) # SamplesPerPixel add_entry(278, 3, 1, height) # RowsPerStrip add_entry(279, 4, 1, (width * height + 7) // 8) # StripByteCounts add_entry(282, 5, 1, 8 + 2 + num_entries*12 + 4) # XRes (offset) add_entry(283, 5, 1, 8 + 2 + num_entries*12 + 4 + 8) # YRes (offset)
ifd = struct.pack('<H', num_entries) + b''.join(entries) + struct.pack('<I', next_ifd_offset)
# Rational values for XRes, YRes (72/1) res_values = struct.pack('<IIII', 72, 1, 72, 1)
# Image data (all zeros for simplicity) data = b'\x00' * ((width * height + 7) // 8)
with open(filename, 'wb') as f: f.write(header) f.write(ifd) f.write(res_values) f.write(data)
def create_grayscale_tiff(filename, width=8, height=8, bpp=8): # Header header = b'II' + struct.pack('<HI', 42, 8)
num_entries = 11 entries = [] def add_entry(tag, type, count, value): entries.append(struct.pack('<HHII', tag, type, count, value))
data_offset = 8 + 2 + num_entries*12 + 4 + 16
add_entry(256, 3, 1, width) # Width add_entry(257, 3, 1, height) # Height add_entry(258, 3, 1, bpp) # BitsPerSample add_entry(259, 3, 1, 1) # Compression add_entry(262, 3, 1, 1) # Photometric (MinIsBlack) add_entry(273, 4, 1, data_offset) # StripOffsets add_entry(277, 3, 1, 1) # SamplesPerPixel add_entry(278, 3, 1, height) # RowsPerStrip add_entry(279, 4, 1, width * height * (bpp // 8)) # StripByteCounts add_entry(282, 5, 1, 8 + 2 + num_entries*12 + 4) # XRes add_entry(283, 5, 1, 8 + 2 + num_entries*12 + 4 + 8) # YRes
ifd = struct.pack('<H', num_entries) + b''.join(entries) + struct.pack('<I', 0) res_values = struct.pack('<IIII', 72, 1, 72, 1) data = b'\x80' * (width * height * (bpp // 8)) # Gray data
with open(filename, 'wb') as f: f.write(header) f.write(ifd) f.write(res_values) f.write(data)
def create_rgb_tiff(filename, width=8, height=8): header = b'II' + struct.pack('<HI', 42, 8) num_entries = 12 entries = [] def add_entry(tag, type, count, value): entries.append(struct.pack('<HHII', tag, type, count, value))
bps_offset = 8 + 2 + num_entries*12 + 4 res_offset = bps_offset + 6 data_offset = res_offset + 16
add_entry(256, 3, 1, width) # Width add_entry(257, 3, 1, height) # Height add_entry(258, 3, 3, bps_offset) # BitsPerSample (3 values) add_entry(259, 3, 1, 1) # Compression add_entry(262, 3, 1, 2) # Photometric (RGB) add_entry(273, 4, 1, data_offset) # StripOffsets add_entry(277, 3, 1, 3) # SamplesPerPixel add_entry(278, 3, 1, height) # RowsPerStrip add_entry(279, 4, 1, width * height * 3) # StripByteCounts add_entry(282, 5, 1, res_offset) # XRes add_entry(283, 5, 1, res_offset + 8) # YRes add_entry(284, 3, 1, 1) # PlanarConfig (Contig)
ifd = struct.pack('<H', num_entries) + b''.join(entries) + struct.pack('<I', 0) bps_values = struct.pack('<HHH', 8, 8, 8) res_values = struct.pack('<IIII', 72, 1, 72, 1) data = b'\xff\x00\x00' * (width * height) # Red image
with open(filename, 'wb') as f: f.write(header) f.write(ifd) f.write(bps_values) f.write(res_values) f.write(data)
def create_palette_tiff(filename, width=8, height=8): header = b'II' + struct.pack('<HI', 42, 8) num_entries = 12 entries = [] def add_entry(tag, type, count, value): entries.append(struct.pack('<HHII', tag, type, count, value))
cmap_offset = 8 + 2 + num_entries*12 + 4 res_offset = cmap_offset + 256 * 3 * 2 # 256 colors, RGB, 2 bytes each data_offset = res_offset + 16
add_entry(256, 3, 1, width) # Width add_entry(257, 3, 1, height) # Height add_entry(258, 3, 1, 8) # BitsPerSample add_entry(259, 3, 1, 1) # Compression add_entry(262, 3, 1, 3) # Photometric (Palette) add_entry(273, 4, 1, data_offset) # StripOffsets add_entry(277, 3, 1, 1) # SamplesPerPixel add_entry(278, 3, 1, height) # RowsPerStrip add_entry(279, 4, 1, width * height) # StripByteCounts add_entry(282, 5, 1, res_offset) # XRes add_entry(283, 5, 1, res_offset + 8) # YRes add_entry(320, 3, 256 * 3, cmap_offset) # ColorMap
ifd = struct.pack('<H', num_entries) + b''.join(entries) + struct.pack('<I', 0)
# Color map: 256 RGB values, each 2 bytes (16-bit) cmap = b'' for i in range(256): cmap += struct.pack('<H', i * 256) # Red for i in range(256): cmap += struct.pack('<H', i * 256) # Green for i in range(256): cmap += struct.pack('<H', i * 256) # Blue
res_values = struct.pack('<IIII', 72, 1, 72, 1) data = bytes([i % 256 for i in range(width * height)])
with open(filename, 'wb') as f: f.write(header) f.write(ifd) f.write(cmap) f.write(res_values) f.write(data)
def main(): corpus_dir = "corpus" if not os.path.exists(corpus_dir): os.makedirs(corpus_dir)
base_tiff = os.path.join(corpus_dir, "base_bw.tif") create_minimal_tiff(base_tiff)
base_gray8 = os.path.join(corpus_dir, "base_gray8.tif") create_grayscale_tiff(base_gray8, bpp=8)
base_gray16 = os.path.join(corpus_dir, "base_gray16.tif") create_grayscale_tiff(base_gray16, bpp=16)
base_rgb = os.path.join(corpus_dir, "base_rgb.tif") create_rgb_tiff(base_rgb)
base_palette = os.path.join(corpus_dir, "base_palette.tif") create_palette_tiff(base_palette)
bases = [base_tiff, base_gray8, base_gray16, base_rgb, base_palette]
# Use tiffcp to create variations compressions = ["none", "lzw", "zip", "packbits", "zstd"] for base in bases: base_name = os.path.basename(base).split('.')[0] for comp in compressions: out = os.path.join(corpus_dir, f"{base_name}_{comp}.tif") run_cmd(["tiffcp", "-c", comp, base, out])
out_tile = os.path.join(corpus_dir, f"{base_name}_{comp}_tile.tif") run_cmd(["tiffcp", "-c", comp, "-t", "-w", "8", "-l", "8", base, out_tile])
# Special compressions for BW for comp in ["g3", "g4"]: run_cmd(["tiffcp", "-c", comp, base_tiff, os.path.join(corpus_dir, f"bw_{comp}.tif")])
# JPEG for RGB run_cmd(["tiffcp", "-c", "jpeg", base_rgb, os.path.join(corpus_dir, "rgb_jpeg.tif")])
# BigTIFF run_cmd(["tiffcp", "-8", base_rgb, os.path.join(corpus_dir, "bigtiff_rgb.tif")])
# Byte order run_cmd(["tiffcp", "-B", base_rgb, os.path.join(corpus_dir, "big_endian_rgb.tif")])
# Planar configuration run_cmd(["tiffcp", "-p", "separate", base_rgb, os.path.join(corpus_dir, "separate_rgb.tif")])
# Add some tags tagged_tiff = os.path.join(corpus_dir, "tagged_rgb.tif") run_cmd(["cp", base_rgb, tagged_tiff]) run_cmd(["tiffset", "-s", "315", "Fuzzer", tagged_tiff])
# Broken cases # Circular IFD (from base_bw) circular = os.path.join(corpus_dir, "circular.tif") with open(base_tiff, 'rb') as f: data = bytearray(f.read()) # Find next_ifd_offset (last 4 bytes of IFD) # IFD starts at 8, has 11 entries (11*12=132), + 2 bytes for count = 134. # next_ifd_offset is at index 142 if len(data) >= 146: data[142:146] = struct.pack('<I', 8) with open(circular, 'wb') as f: f.write(data)
# Huge dimensions huge_dim = os.path.join(corpus_dir, "huge_dim.tif") create_minimal_tiff(huge_dim) run_cmd(["tiffset", "-s", "256", "1000000", huge_dim]) run_cmd(["tiffset", "-s", "257", "1000000", huge_dim])
# Invalid compression tag invalid_comp = os.path.join(corpus_dir, "invalid_comp.tif") create_minimal_tiff(invalid_comp) run_cmd(["tiffset", "-s", "259", "65535", invalid_comp])
print(f"Generated {len(os.listdir(corpus_dir))} samples in {corpus_dir}")
if __name__ == "__main__": main()除了 AI 生成的这些样本外,test/images/ 中也有一些样本可以拿来用。
Fuzzing
编译出来发现有一堆程序,哪个才是我们的目标呢?
查看 Mitre 给出的 references,其中 Bug 2590 - CVE-2016-9297: segfault in _TIFFPrintField (tif_print.c:127) 写道:
Triggered in libtiff 4.0.6 with AFL and ASAN. Only crashes if I LD_PRELOAD AFL’s libdislocator (more info: https://github.com/mirrorer/afl/tree/master/libdislocator).
LD_PRELOAD=/root/afl-2.35b/libdislocator/libdislocator.so ./tiffinfo -i test000
由此可知我们 fuzz 的目标是 tiffinfo 。

我们将所有的输出参数都用上,以便提高覆盖率,这里只开了两个线程 fuzz:
mkdir -p /dev/shm/{normal,asan}
AFL_TMPDIR=/dev/shm/asan \afl-fuzz -i corpus \ -o outs \ -m none \ -M asan \ -- ../libtiff-fuzz-asan/bin/tiffinfo -Dcjrsw @@
AFL_TMPDIR=/dev/shm/normal \afl-fuzz -i corpus \ -o outs \ -m none \ -S normal \ -- ../libtiff-fuzz/bin/tiffinfo -Dcjrsw @@刚跑了两分钟,ASAN 那个线程以每秒一百多 crashes 的速度产出……吓坏了(

Analysis
Coverage
这个 chall 主要是让我们使用 lcov 来生成覆盖率报告的,不过我选择 llvm-cov,而不是 lcov 。
先单独编译一个 cov 插桩的版本:
mkdir ../libtiff-build-covcd libtiff-build-covCC=clang \CXX=clang++ \CFLAGS="-fprofile-instr-generate -fcoverage-mapping" \CXXFLAGS="$CFLAGS" \../libtiff/configure \ --prefix="$(realpath ../libtiff-cov)" \ --disable-sharedmake clean && AFL_USE_ASAN=1 make -j`nproc` && AFL_USE_ASAN=1 make install然后可以使用如下脚本自动生成覆盖率报告:
#!/usr/bin/env python
import osimport subprocessimport globimport argparse
def run_cmd(cmd, env=None): """Run a shell command and return the output.""" try: result = subprocess.run(cmd, shell=True, env=env, capture_output=True, text=True) return result.returncode, result.stdout, result.stderr except Exception as e: return 1, "", str(e)
def main(): parser = argparse.ArgumentParser(description="Generate llvm-cov report from AFL++ samples.") parser.add_argument("--bin", required=True, help="Path to the instrumented binary.") parser.add_argument("--samples", required=True, help="Directory containing AFL++ samples (e.g., output/default/queue).") parser.add_argument("--out", default="coverage_report", help="Directory to save the HTML report (default: coverage_report).") parser.add_argument("--profraw-dir", default="profraws", help="Directory to store intermediate .profraw files.") parser.add_argument("--args", default="", help="Additional arguments to pass to the binary (use {} as placeholder for sample path).")
args = parser.parse_args()
# Create directories if not os.path.exists(args.profraw_dir): os.makedirs(args.profraw_dir) if not os.path.exists(args.out): os.makedirs(args.out)
# Find all samples samples = glob.glob(os.path.join(args.samples, "*")) # Filter only files (AFL++ directories might have subfolders) samples = [s for s in samples if os.path.isfile(s)]
if not samples: print(f"No samples found in {args.samples}") return
print(f"Found {len(samples)} samples. Running coverage...")
# Set up environment for LLVM profile generation # %p is PID, %m is binary hash to avoid collisions env = os.environ.copy()
for i, sample in enumerate(samples): profraw_path = os.path.abspath(os.path.join(args.profraw_dir, f"sample_{i}.profraw")) env["LLVM_PROFILE_FILE"] = profraw_path
# Construct command if "{}" in args.args: cmd = f"{args.bin} {args.args.format(sample)}" else: cmd = f"{args.bin} {args.args} {sample}"
print(f"[{i+1}/{len(samples)}] Running: {cmd}", end="\r") run_cmd(cmd, env=env)
print("\nMerging profile data...") profdata_file = "merged.profdata" profraw_pattern = os.path.join(args.profraw_dir, "*.profraw") merge_cmd = f"llvm-profdata merge -sparse {profraw_pattern} -o {profdata_file}" rc, stdout, stderr = run_cmd(merge_cmd)
if rc != 0: print(f"Error merging data: {stderr}") return
print(f"Generating HTML report in {args.out}...") report_cmd = f"llvm-cov show {args.bin} -instr-profile={profdata_file} -format=html -output-dir={args.out}" rc, stdout, stderr = run_cmd(report_cmd)
if rc != 0: print(f"Error generating report: {stderr}") return
# Also generate a summary report print("\nCoverage Summary:") summary_cmd = f"llvm-cov report {args.bin} -instr-profile={profdata_file}" rc, stdout, stderr = run_cmd(summary_cmd) print(stdout)
print(f"Report generated successfully in directory: {args.out}") print(f"You can view it by opening {os.path.join(args.out, 'index.html')} in a browser.")
if __name__ == "__main__": main()./gen_coverage.py --bin ./bin/tiffinfo --args "-Dcjrsw {}" --samples ../libtiff-workshop/outs/asan/queue最后的 web 报告如下:

出于进度优先的考虑,之后的(包括前面的 libtiff)chall 我都没有深入分析它们的漏洞成因,想着早点把 fuzz 基本要点都玩明白后去自己写一个结合了 AI 的 fuzzer 。还有一个原因是 Fuzzing 101 的可复现性不怎么高,能不能跑出和 CVE 描述对应的 crash 完全是看运气,所以意义不大,没必要浪费时间去磨一个一模一样的 crash 样本。