diff --git a/.rustfmt.toml b/.rustfmt.toml new file mode 100644 index 0000000..c1fc7b8 --- /dev/null +++ b/.rustfmt.toml @@ -0,0 +1 @@ +# reset to default diff --git a/src/alpha_blending.rs b/src/alpha_blending.rs index b2ea508..124ae35 100644 --- a/src/alpha_blending.rs +++ b/src/alpha_blending.rs @@ -1,8 +1,8 @@ //! Optimized alpha blending routines based on libwebp //! -//! https://github.com/webmproject/libwebp/blob/e4f7a9f0c7c9fbfae1568bc7fa5c94b989b50872/src/demux/anim_decode.c#L215-L267 +//! -fn channel_shift(i: u32) -> u32 { +const fn channel_shift(i: u32) -> u32 { i * 8 } @@ -18,8 +18,9 @@ fn blend_channel_nonpremult( ) -> u8 { let src_channel = ((src >> shift) & 0xff) as u8; let dst_channel = ((dst >> shift) & 0xff) as u8; - let blend_unscaled = (src_channel as u32 * src_a as u32) + (dst_channel as u32 * dst_a as u32); - debug_assert!(u64::from(blend_unscaled) < (1u64 << 32) / scale as u64); + let blend_unscaled = + (u32::from(src_channel) * u32::from(src_a)) + (u32::from(dst_channel) * u32::from(dst_a)); + debug_assert!(u64::from(blend_unscaled) < (1u64 << 32) / u64::from(scale)); ((blend_unscaled * scale) >> channel_shift(3)) as u8 } @@ -35,8 +36,8 @@ fn blend_pixel_nonpremult(src: u32, dst: u32) -> u32 { // libwebp used the following formula here: //let dst_factor_a = (dst_a as u32 * (256 - src_a as u32)) >> 8; // however, we've found that we can use a more precise approximation without losing performance: - let dst_factor_a = div_by_255(dst_a as u32 * (255 - src_a as u32)); - let blend_a = src_a as u32 + dst_factor_a; + let dst_factor_a = div_by_255(u32::from(dst_a) * (255 - u32::from(src_a))); + let blend_a = u32::from(src_a) + dst_factor_a; let scale = (1u32 << 24) / blend_a; let blend_r = @@ -45,11 +46,11 @@ fn blend_pixel_nonpremult(src: u32, dst: u32) -> u32 { blend_channel_nonpremult(src, src_a, dst, dst_factor_a as u8, scale, channel_shift(1)); let blend_b = blend_channel_nonpremult(src, src_a, dst, dst_factor_a as u8, scale, channel_shift(2)); - debug_assert!(src_a as u32 + dst_factor_a < 256); + debug_assert!(u32::from(src_a) + dst_factor_a < 256); - ((blend_r as u32) << channel_shift(0)) - | ((blend_g as u32) << channel_shift(1)) - | ((blend_b as u32) << channel_shift(2)) + (u32::from(blend_r) << channel_shift(0)) + | (u32::from(blend_g) << channel_shift(1)) + | (u32::from(blend_b) << channel_shift(2)) | (blend_a << channel_shift(3)) } } @@ -69,7 +70,7 @@ pub(crate) fn do_alpha_blending(buffer: [u8; 4], canvas: [u8; 4]) -> [u8; 4] { // https://arxiv.org/pdf/2202.02864 // https://github.com/image-rs/image-webp/issues/119#issuecomment-2544007820 #[inline] -fn div_by_255(v: u32) -> u32 { +const fn div_by_255(v: u32) -> u32 { (((v + 0x80) >> 8) + v + 0x80) >> 8 } @@ -116,9 +117,10 @@ mod tests { let slow = do_alpha_blending_reference([r1, 0, 0, a1], [r2, 0, 0, a2]); // libwebp doesn't do exact blending and so we don't either for (o, s) in opt.iter().zip(slow.iter()) { - if o.abs_diff(*s) > 3 { - panic!("Mismatch in results! opt: {opt:?}, slow: {slow:?}, blended values: [{r1}, 0, 0, {a1}], [{r2}, 0, 0, {a2}]"); - } + assert!( + o.abs_diff(*s) <= 3, + "Mismatch in results! opt: {opt:?}, slow: {slow:?}, blended values: [{r1}, 0, 0, {a1}], [{r2}, 0, 0, {a2}]" + ); } } } diff --git a/src/decoder.rs b/src/decoder.rs index 0e13857..357bed9 100644 --- a/src/decoder.rs +++ b/src/decoder.rs @@ -186,7 +186,7 @@ pub(crate) enum WebPRiffChunk { } impl WebPRiffChunk { - pub(crate) fn from_fourcc(chunk_fourcc: [u8; 4]) -> Self { + pub(crate) const fn from_fourcc(chunk_fourcc: [u8; 4]) -> Self { match &chunk_fourcc { b"RIFF" => Self::RIFF, b"WEBP" => Self::WEBP, @@ -203,7 +203,7 @@ impl WebPRiffChunk { } } - pub(crate) fn to_fourcc(self) -> [u8; 4] { + pub(crate) const fn to_fourcc(self) -> [u8; 4] { match self { Self::RIFF => *b"RIFF", Self::WEBP => *b"WEBP", @@ -220,7 +220,7 @@ impl WebPRiffChunk { } } - pub(crate) fn is_unknown(&self) -> bool { + pub(crate) const fn is_unknown(self) -> bool { matches!(self, Self::Unknown(_)) } } @@ -292,10 +292,10 @@ pub struct WebPDecoder { } impl WebPDecoder { - /// Create a new WebPDecoder from the reader `r`. The decoder performs many small reads, so the + /// Create a new `WebPDecoder` from the reader `r`. The decoder performs many small reads, so the /// reader should be buffered. - pub fn new(r: R) -> Result, DecodingError> { - let mut decoder = WebPDecoder { + pub fn new(r: R) -> Result { + let mut decoder = Self { r, width: 0, height: 0, @@ -346,8 +346,8 @@ impl WebPDecoder { let w = self.r.read_u16::()?; let h = self.r.read_u16::()?; - self.width = (w & 0x3FFF) as u32; - self.height = (h & 0x3FFF) as u32; + self.width = u32::from(w & 0x3FFF); + self.height = u32::from(h & 0x3FFF); if self.width == 0 || self.height == 0 { return Err(DecodingError::InconsistentImageSizes); } @@ -402,7 +402,7 @@ impl WebPDecoder { self.chunks.entry(chunk).or_insert(range); } - if let WebPRiffChunk::ANMF = chunk { + if chunk == WebPRiffChunk::ANMF { self.num_frames += 1; if chunk_size < 24 { return Err(DecodingError::InvalidChunkSize); @@ -603,7 +603,7 @@ impl WebPDecoder { } /// Returns the number of bytes required to store the image or a single frame, or None if that - /// would take more than usize::MAX bytes. + /// would take more than `usize::MAX` bytes. pub fn output_buffer_size(&self) -> Option { let bytes_per_pixel = if self.has_alpha() { 4 } else { 3 }; (self.width as usize) @@ -612,8 +612,12 @@ impl WebPDecoder { } /// Returns the raw bytes of the image. For animated images, this is the first frame. + /// + /// Fails with `ImageTooLarge` if `buf` has length different than `output_buffer_size()` pub fn read_image(&mut self, buf: &mut [u8]) -> Result<(), DecodingError> { - assert_eq!(Some(buf.len()), self.output_buffer_size()); + if Some(buf.len()) != self.output_buffer_size() { + return Err(DecodingError::ImageTooLarge); + } if self.is_animated() { let saved = std::mem::take(&mut self.animation); @@ -639,10 +643,8 @@ impl WebPDecoder { .chunks .get(&WebPRiffChunk::VP8) .ok_or(DecodingError::ChunkMissing)?; - // TODO: avoid cloning frame - let frame = Vp8Decoder::new(range_reader(&mut self.r, range.start..range.end)?) - .decode_frame()? - .clone(); + let reader = range_reader(&mut self.r, range.start..range.end)?; + let frame = Vp8Decoder::decode_frame(reader)?; if u32::from(frame.width) != self.width || u32::from(frame.height) != self.height { return Err(DecodingError::InconsistentImageSizes); } @@ -656,7 +658,7 @@ impl WebPDecoder { .ok_or(DecodingError::ChunkMissing)? .clone(); let alpha_chunk = read_alpha_chunk( - &mut range_reader(&mut self.r, range.start..range.end)?, + &mut range_reader(&mut self.r, range)?, self.width as u16, self.height as u16, )?; @@ -746,9 +748,9 @@ impl WebPDecoder { let (frame, frame_has_alpha): (Vec, bool) = match chunk { WebPRiffChunk::VP8 => { let reader = (&mut self.r).take(chunk_size); - let mut vp8_decoder = Vp8Decoder::new(reader); - let raw_frame = vp8_decoder.decode_frame()?; - if raw_frame.width as u32 != frame_width || raw_frame.height as u32 != frame_height + let raw_frame = Vp8Decoder::decode_frame(reader)?; + if u32::from(raw_frame.width) != frame_width + || u32::from(raw_frame.height) != frame_height { return Err(DecodingError::InconsistentImageSizes); } @@ -781,8 +783,7 @@ impl WebPDecoder { return Err(DecodingError::ChunkHeaderInvalid(next_chunk.to_fourcc())); } - let mut vp8_decoder = Vp8Decoder::new((&mut self.r).take(next_chunk_size)); - let frame = vp8_decoder.decode_frame()?; + let frame = Vp8Decoder::decode_frame((&mut self.r).take(next_chunk_size))?; let mut rgba_frame = vec![0; frame_width as usize * frame_height as usize * 4]; frame.fill_rgba(&mut rgba_frame); diff --git a/src/encoder.rs b/src/encoder.rs index 3b415e4..0300bd1 100644 --- a/src/encoder.rs +++ b/src/encoder.rs @@ -57,7 +57,7 @@ impl BitWriter { if self.nbits >= 64 { self.writer.write_all(&self.buffer.to_le_bytes())?; self.nbits -= 64; - self.buffer = bits.checked_shr((nbits - self.nbits) as u32).unwrap_or(0); + self.buffer = bits.checked_shr(u32::from(nbits - self.nbits)).unwrap_or(0); } debug_assert!(self.nbits < 64); Ok(()) @@ -82,10 +82,10 @@ fn write_single_entry_huffman_tree(w: &mut BitWriter, symbol: u8) - w.write_bits(1, 2)?; if symbol <= 1 { w.write_bits(0, 1)?; - w.write_bits(symbol as u64, 1)?; + w.write_bits(u64::from(symbol), 1)?; } else { w.write_bits(1, 1)?; - w.write_bits(symbol as u64, 8)?; + w.write_bits(u64::from(symbol), 8)?; } Ok(()) } @@ -188,7 +188,7 @@ fn build_huffman_tree( let mut len = length_limit; let mut indexes = frequencies.iter().copied().enumerate().collect::>(); indexes.sort_unstable_by_key(|&(_, frequency)| frequency); - for &(i, frequency) in indexes.iter() { + for &(i, frequency) in &indexes { if frequency > 0 { while counts[len as usize] == 0 { len -= 1; @@ -251,13 +251,13 @@ fn write_huffman_tree( w.write_bits(0, 1)?; // normal huffman tree w.write_bits(19 - 4, 4)?; // num_code_lengths - 4 - for &i in CODE_LENGTH_ORDER.iter() { + for i in CODE_LENGTH_ORDER { if i > 15 || code_length_frequencies[i] == 0 { w.write_bits(0, 3)?; } else if single_code_length_length { w.write_bits(1, 3)?; } else { - w.write_bits(code_length_lengths[i] as u64, 3)?; + w.write_bits(u64::from(code_length_lengths[i]), 3)?; } } @@ -275,7 +275,7 @@ fn write_huffman_tree( if !single_code_length_length { for &len in lengths.iter() { w.write_bits( - code_length_codes[len as usize] as u64, + u64::from(code_length_codes[len as usize]), code_length_lengths[len as usize], )?; } @@ -284,7 +284,7 @@ fn write_huffman_tree( Ok(()) } -fn length_to_symbol(len: u16) -> (u16, u8) { +const fn length_to_symbol(len: u16) -> (u16, u8) { let len = len - 1; let highest_bit = 15 - len.leading_zeros() as u16; // TODO: use ilog2 once MSRV >= 1.67 let second_highest_bit = (len >> (highest_bit - 1)) & 1; @@ -331,11 +331,11 @@ fn write_run( if run_length > 0 { if run_length <= 4 { let symbol = 256 + run_length - 1; - w.write_bits(codes1[symbol] as u64, lengths1[symbol])?; + w.write_bits(u64::from(codes1[symbol]), lengths1[symbol])?; } else { let (symbol, extra_bits) = length_to_symbol(run_length as u16); w.write_bits( - codes1[256 + symbol as usize] as u64, + u64::from(codes1[256 + symbol as usize]), lengths1[256 + symbol as usize], )?; w.write_bits( @@ -392,7 +392,7 @@ fn encode_frame( }; assert_eq!( - (width as u64 * height as u64).saturating_mul(bytes_per_pixel), + (u64::from(width) * u64::from(height)).saturating_mul(bytes_per_pixel), data.len() as u64 ); @@ -401,10 +401,10 @@ fn encode_frame( } w.write_bits(0x2f, 8)?; // signature - w.write_bits(width as u64 - 1, 14)?; - w.write_bits(height as u64 - 1, 14)?; + w.write_bits(u64::from(width) - 1, 14)?; + w.write_bits(u64::from(height) - 1, 14)?; - w.write_bits(is_alpha as u64, 1)?; // alpha used + w.write_bits(u64::from(is_alpha), 1)?; // alpha used w.write_bits(0x0, 3)?; // version // subtract green transform @@ -542,7 +542,7 @@ fn encode_frame( ColorType::L8 => { while let Some(pixel) = it.next() { w.write_bits( - codes1[pixel[1] as usize] as u64, + u64::from(codes1[pixel[1] as usize]), lengths1[pixel[1] as usize], )?; write_run(w, pixel, &mut it, &codes1, &lengths1)?; @@ -553,8 +553,8 @@ fn encode_frame( let len1 = lengths1[pixel[1] as usize]; let len3 = lengths3[pixel[3] as usize]; - let code = - codes1[pixel[1] as usize] as u64 | (codes3[pixel[3] as usize] as u64) << len1; + let code = u64::from(codes1[pixel[1] as usize]) + | (u64::from(codes3[pixel[3] as usize]) << len1); w.write_bits(code, len1 + len3)?; write_run(w, pixel, &mut it, &codes1, &lengths1)?; @@ -566,9 +566,9 @@ fn encode_frame( let len0 = lengths0[pixel[0] as usize]; let len2 = lengths2[pixel[2] as usize]; - let code = codes1[pixel[1] as usize] as u64 - | (codes0[pixel[0] as usize] as u64) << len1 - | (codes2[pixel[2] as usize] as u64) << (len1 + len0); + let code = u64::from(codes1[pixel[1] as usize]) + | (u64::from(codes0[pixel[0] as usize]) << len1) + | (u64::from(codes2[pixel[2] as usize]) << (len1 + len0)); w.write_bits(code, len1 + len0 + len2)?; write_run(w, pixel, &mut it, &codes1, &lengths1)?; @@ -581,10 +581,10 @@ fn encode_frame( let len2 = lengths2[pixel[2] as usize]; let len3 = lengths3[pixel[3] as usize]; - let code = codes1[pixel[1] as usize] as u64 - | (codes0[pixel[0] as usize] as u64) << len1 - | (codes2[pixel[2] as usize] as u64) << (len1 + len0) - | (codes3[pixel[3] as usize] as u64) << (len1 + len0 + len2); + let code = u64::from(codes1[pixel[1] as usize]) + | (u64::from(codes0[pixel[0] as usize]) << len1) + | (u64::from(codes2[pixel[2] as usize]) << (len1 + len0)) + | (u64::from(codes3[pixel[3] as usize]) << (len1 + len0 + len2)); w.write_bits(code, len1 + len0 + len2 + len3)?; write_run(w, pixel, &mut it, &codes1, &lengths1)?; @@ -596,7 +596,7 @@ fn encode_frame( Ok(()) } -fn chunk_size(inner_bytes: usize) -> u32 { +const fn chunk_size(inner_bytes: usize) -> u32 { if inner_bytes % 2 == 1 { (inner_bytes + 1) as u32 + 8 } else { @@ -809,7 +809,7 @@ mod tests { .encode(&img[..256 * 256 * 3], 256, 256, crate::ColorType::Rgb8) .unwrap(); let decoded = webp::Decoder::new(&output).decode().unwrap(); - assert!(&img[..256 * 256 * 3] == &*decoded); + assert_eq!(img[..256 * 256 * 3], *decoded); let mut output = Vec::new(); let mut encoder = WebPEncoder::new(&mut output); @@ -818,7 +818,7 @@ mod tests { .encode(&img, 256, 256, crate::ColorType::Rgba8) .unwrap(); let decoded = webp::Decoder::new(&output).decode().unwrap(); - assert!(&img == &*decoded); + assert_eq!(img, *decoded); let mut output = Vec::new(); let mut encoder = WebPEncoder::new(&mut output); @@ -828,7 +828,7 @@ mod tests { .encode(&img, 256, 256, crate::ColorType::Rgba8) .unwrap(); let decoded = webp::Decoder::new(&output).decode().unwrap(); - assert!(&img == &*decoded); + assert_eq!(img, *decoded); let mut output = Vec::new(); let mut encoder = WebPEncoder::new(&mut output); @@ -838,11 +838,11 @@ mod tests { .encode(&img, 256, 256, crate::ColorType::Rgba8) .unwrap(); let decoded = webp::Decoder::new(&output).decode().unwrap(); - assert!(&img == &*decoded); + assert_eq!(img, *decoded); let mut output = Vec::new(); let mut encoder = WebPEncoder::new(&mut output); - encoder.set_params(params.clone()); + encoder.set_params(params); encoder.set_xmp_metadata(vec![0; 7]); encoder.set_icc_profile(vec![0; 8]); encoder.set_icc_profile(vec![0; 9]); @@ -850,6 +850,6 @@ mod tests { .encode(&img, 256, 256, crate::ColorType::Rgba8) .unwrap(); let decoded = webp::Decoder::new(&output).decode().unwrap(); - assert!(&img == &*decoded); + assert_eq!(img, *decoded); } } diff --git a/src/extended.rs b/src/extended.rs index ce8e761..f872ad1 100644 --- a/src/extended.rs +++ b/src/extended.rs @@ -301,7 +301,7 @@ pub(crate) fn read_alpha_chunk( let mut decoder = LosslessDecoder::new(reader); let mut data = vec![0; usize::from(width) * usize::from(height) * 4]; - decoder.decode_frame(width as u32, height as u32, true, &mut data)?; + decoder.decode_frame(u32::from(width), u32::from(height), true, &mut data)?; let mut green = vec![0; usize::from(width) * usize::from(height)]; for (rgba_val, green_val) in data.chunks_exact(4).zip(green.iter_mut()) { diff --git a/src/huffman.rs b/src/huffman.rs index 3a24244..45870fd 100644 --- a/src/huffman.rs +++ b/src/huffman.rs @@ -1,5 +1,5 @@ //! Rudimentary utility for reading Canonical Huffman Codes. -//! Based off https://github.com/webmproject/libwebp/blob/7f8472a610b61ec780ef0a8873cd954ac512a505/src/utils/huffman.c +//! Based off use std::io::BufRead; @@ -39,7 +39,7 @@ impl Default for HuffmanTree { impl HuffmanTree { /// Builds a tree implicitly, just from code lengths - pub(crate) fn build_implicit(code_lengths: Vec) -> Result { + pub(crate) fn build_implicit(code_lengths: Vec) -> Result { // Count symbols and build histogram let mut num_symbols = 0; let mut code_length_hist = [0; MAX_ALLOWED_CODE_LENGTH + 1]; @@ -60,7 +60,7 @@ impl HuffmanTree { let mut curr_code = 0; let mut next_codes = [0; MAX_ALLOWED_CODE_LENGTH + 1]; let max_code_length = code_length_hist.iter().rposition(|&x| x != 0).unwrap() as u16; - for code_len in 1..=usize::from(max_code_length) { + for code_len in 1..usize::from(max_code_length) + 1 { next_codes[code_len] = curr_code; curr_code = (curr_code + code_length_hist[code_len]) << 1; } @@ -71,7 +71,7 @@ impl HuffmanTree { } // Calculate table/tree parameters - let table_bits = max_code_length.min(MAX_TABLE_BITS as u16); + let table_bits = max_code_length.min(u16::from(MAX_TABLE_BITS)); let table_size = (1 << table_bits) as usize; let table_mask = table_size as u16 - 1; let tree_size = code_length_hist[table_bits as usize + 1..=max_code_length as usize] @@ -91,7 +91,7 @@ impl HuffmanTree { if length <= table_bits { let mut j = (u16::reverse_bits(code) >> (16 - length)) as usize; - let entry = ((length as u32) << 16) | symbol as u32; + let entry = (u32::from(length) << 16) | symbol as u32; while j < table_size { table[j] = entry; j += 1 << length as usize; @@ -134,7 +134,7 @@ impl HuffmanTree { match tree[node_index] { HuffmanTreeNode::Empty => { - tree[node_index] = HuffmanTreeNode::Leaf(symbol as u16) + tree[node_index] = HuffmanTreeNode::Leaf(symbol as u16); } HuffmanTreeNode::Leaf(_) => return Err(DecodingError::HuffmanError), HuffmanTreeNode::Branch(_offset) => return Err(DecodingError::HuffmanError), @@ -149,23 +149,23 @@ impl HuffmanTree { })) } - pub(crate) fn build_single_node(symbol: u16) -> HuffmanTree { + pub(crate) const fn build_single_node(symbol: u16) -> Self { Self(HuffmanTreeInner::Single(symbol)) } - pub(crate) fn build_two_node(zero: u16, one: u16) -> HuffmanTree { + pub(crate) fn build_two_node(zero: u16, one: u16) -> Self { Self(HuffmanTreeInner::Tree { tree: vec![ HuffmanTreeNode::Leaf(zero), HuffmanTreeNode::Leaf(one), HuffmanTreeNode::Empty, ], - table: vec![1 << 16 | zero as u32, 1 << 16 | one as u32], + table: vec![(1 << 16) | u32::from(zero), (1 << 16) | u32::from(one)], table_mask: 0x1, }) } - pub(crate) fn is_single_node(&self) -> bool { + pub(crate) const fn is_single_node(&self) -> bool { matches!(self.0, HuffmanTreeInner::Single(_)) } @@ -230,10 +230,7 @@ impl HuffmanTree { /// /// Returns a tuple of the codelength and symbol value. This function may return wrong /// information if there aren't enough bits in the bit reader to read the next symbol. - pub(crate) fn peek_symbol( - &self, - bit_reader: &mut BitReader, - ) -> Option<(u8, u16)> { + pub(crate) fn peek_symbol(&self, bit_reader: &BitReader) -> Option<(u8, u16)> { match &self.0 { HuffmanTreeInner::Tree { table, table_mask, .. diff --git a/src/loop_filter.rs b/src/loop_filter.rs index a0e5008..2893139 100644 --- a/src/loop_filter.rs +++ b/src/loop_filter.rs @@ -18,7 +18,7 @@ fn s2u(val: i32) -> u8 { } #[inline] -fn diff(val1: u8, val2: u8) -> u8 { +const fn diff(val1: u8, val2: u8) -> u8 { if val1 > val2 { val1 - val2 } else { diff --git a/src/lossless.rs b/src/lossless.rs index 99be480..14070f9 100644 --- a/src/lossless.rs +++ b/src/lossless.rs @@ -1,7 +1,6 @@ //! Decoding of lossless WebP images //! //! [Lossless spec](https://developers.google.com/speed/webp/docs/webp_lossless_bitstream_specification) -//! use std::io::BufRead; use std::mem; @@ -72,8 +71,8 @@ pub(crate) struct LosslessDecoder { impl LosslessDecoder { /// Create a new decoder - pub(crate) fn new(r: R) -> LosslessDecoder { - LosslessDecoder { + pub(crate) const fn new(r: R) -> Self { + Self { bit_reader: BitReader::new(r), transforms: [None, None, None, None], transform_order: Vec::new(), @@ -144,10 +143,15 @@ impl LosslessDecoder { size_bits, transform_data, } => { - apply_color_transform(&mut buf[..image_size], width, *size_bits, transform_data) + apply_color_transform( + &mut buf[..image_size], + width, + *size_bits, + transform_data, + ); } TransformType::SubtractGreen => { - apply_subtract_green_transform(&mut buf[..image_size]) + apply_subtract_green_transform(&mut buf[..image_size]); } TransformType::ColorIndexingTransform { table_size, @@ -155,7 +159,13 @@ impl LosslessDecoder { } => { width = self.width; image_size = usize::from(width) * usize::from(self.height) * 4; - apply_color_indexing_transform(buf, width, self.height, *table_size, table_data) + apply_color_indexing_transform( + buf, + width, + self.height, + *table_size, + table_data, + ); } } } @@ -448,13 +458,13 @@ impl LosslessDecoder { if symbol + repeat > num_symbols { return Err(DecodingError::BitStreamError); - } else { - let length = if use_prev { prev_code_len } else { 0 }; - while repeat > 0 { - repeat -= 1; - code_lengths[usize::from(symbol)] = length; - symbol += 1; - } + } + + let length = if use_prev { prev_code_len } else { 0 }; + while repeat > 0 { + repeat -= 1; + code_lengths[usize::from(symbol)] = length; + symbol += 1; } } } @@ -596,7 +606,7 @@ impl LosslessDecoder { index += 1; if index < next_block_start { - if let Some((bits, code)) = tree[GREEN].peek_symbol(&mut self.bit_reader) { + if let Some((bits, code)) = tree[GREEN].peek_symbol(&self.bit_reader) { if code >= 280 { self.bit_reader.consume(bits)?; data[index * 4..][..4] @@ -712,7 +722,7 @@ pub(crate) struct BitReader { } impl BitReader { - fn new(reader: R) -> Self { + const fn new(reader: R) -> Self { Self { reader, buffer: 0, @@ -746,12 +756,12 @@ impl BitReader { } /// Peeks at the next `num` bits in the buffer. - pub(crate) fn peek(&self, num: u8) -> u64 { + pub(crate) const fn peek(&self, num: u8) -> u64 { self.buffer & ((1 << num) - 1) } /// Peeks at the full buffer. - pub(crate) fn peek_full(&self) -> u64 { + pub(crate) const fn peek_full(&self) -> u64 { self.buffer } @@ -777,10 +787,10 @@ impl BitReader { let value = self.peek(num) as u32; self.consume(num)?; - match value.try_into() { - Ok(value) => Ok(value), - Err(_) => unreachable!("Value too large to fit in type"), - } + value.try_into().map_err(|_| { + debug_assert!(false, "Value too large to fit in type"); + DecodingError::BitStreamError + }) } } diff --git a/src/lossless_transform.rs b/src/lossless_transform.rs index 3291d8c..5efde37 100644 --- a/src/lossless_transform.rs +++ b/src/lossless_transform.rs @@ -216,20 +216,25 @@ pub fn apply_predictor_transform_11(image_data: &mut [u8], range: Range, let top = &old[range.start - width * 4..]; let mut l = [ - old[range.start - 4] as i16, - old[range.start - 3] as i16, - old[range.start - 2] as i16, - old[range.start - 1] as i16, + i16::from(old[range.start - 4]), + i16::from(old[range.start - 3]), + i16::from(old[range.start - 2]), + i16::from(old[range.start - 1]), ]; let mut tl = [ - old[range.start - width * 4 - 4] as i16, - old[range.start - width * 4 - 3] as i16, - old[range.start - width * 4 - 2] as i16, - old[range.start - width * 4 - 1] as i16, + i16::from(old[range.start - width * 4 - 4]), + i16::from(old[range.start - width * 4 - 3]), + i16::from(old[range.start - width * 4 - 2]), + i16::from(old[range.start - width * 4 - 1]), ]; for (chunk, top) in current.chunks_exact_mut(4).zip(top.chunks_exact(4)) { - let t = [top[0] as i16, top[1] as i16, top[2] as i16, top[3] as i16]; + let t = [ + i16::from(top[0]), + i16::from(top[1]), + i16::from(top[2]), + i16::from(top[3]), + ]; let mut predict_left = 0; let mut predict_top = 0; @@ -257,10 +262,10 @@ pub fn apply_predictor_transform_11(image_data: &mut [u8], range: Range, tl = t; l = [ - chunk[0] as i16, - chunk[1] as i16, - chunk[2] as i16, - chunk[3] as i16, + i16::from(chunk[0]), + i16::from(chunk[1]), + i16::from(chunk[2]), + i16::from(chunk[3]), ]; } } @@ -382,7 +387,7 @@ pub(crate) fn apply_color_indexing_transform( table_data: &[u8], ) { // TODO: Replace with built-in div_ceil when MSRV is 1.73+ - fn div_ceil(a: u16, b: u16) -> u16 { + const fn div_ceil(a: u16, b: u16) -> u16 { let d = a / b; let r = a % b; if r > 0 && b > 0 { @@ -416,7 +421,7 @@ pub(crate) fn apply_color_indexing_transform( .flat_map(|i| { let mut entry = Vec::new(); for j in 0..(1 << width_bits) { - let k = i >> (j * bits_per_entry) & mask; + let k = (i >> (j * bits_per_entry)) & mask; if k < table_size { entry.extend_from_slice(&table_data[usize::from(k) * 4..][..4]); } else { diff --git a/src/vp8.rs b/src/vp8.rs index a5f3ca7..c685d38 100644 --- a/src/vp8.rs +++ b/src/vp8.rs @@ -8,9 +8,7 @@ //! //! # Related Links //! * [rfc-6386](http://tools.ietf.org/html/rfc6386) - The VP8 Data Format and Decoding Guide -//! * [VP8.pdf](http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37073.pdf) - An overview of -//! of the VP8 format -//! +//! * [VP8.pdf](http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37073.pdf) - An overview of of the VP8 format use byteorder_lite::{LittleEndian, ReadBytesExt}; use std::cmp; @@ -19,10 +17,10 @@ use std::io::Read; use crate::decoder::DecodingError; -use super::loop_filter; -use super::transform; +use super::{loop_filter, transform}; use super::vp8_arithmetic_decoder::ArithmeticDecoder; + const MAX_SEGMENTS: usize = 4; const NUM_DCT_TOKENS: usize = 12; @@ -806,11 +804,11 @@ pub struct Frame { impl Frame { /// Chroma plane is half the size of the Luma plane - fn chroma_width(&self) -> u16 { + const fn chroma_width(&self) -> u16 { (self.width + 1) / 2 } - fn chroma_height(&self) -> u16 { + const fn chroma_height(&self) -> u16 { (self.height + 1) / 2 } @@ -827,7 +825,7 @@ impl Frame { let chroma_index = usize::from(self.chroma_width()) * (y / 2); let next_index = index + usize::from(self.width); - Frame::fill_rgb_row( + Self::fill_rgb_row( &self.ybuf[index..next_index], &self.ubuf[chroma_index..], &self.vbuf[chroma_index..], @@ -900,7 +898,7 @@ impl Frame { let chroma_index = usize::from(self.chroma_width()) * (y / 2); let next_index = index + usize::from(self.width); - Frame::fill_rgba_row( + Self::fill_rgba_row( &self.ybuf[index..next_index], &self.ubuf[chroma_index..], &self.vbuf[chroma_index..], @@ -965,6 +963,7 @@ impl Frame { } /// Gets the buffer size + #[must_use] pub fn get_buf_size(&self) -> usize { self.ybuf.len() * 3 } @@ -1059,12 +1058,12 @@ pub struct Vp8Decoder { impl Vp8Decoder { /// Create a new decoder. /// The reader must present a raw vp8 bitstream to the decoder - pub fn new(r: R) -> Vp8Decoder { + fn new(r: R) -> Self { let f = Frame::default(); let s = Segment::default(); let m = MacroBlock::default(); - Vp8Decoder { + Self { r, b: ArithmeticDecoder::new(), @@ -1180,10 +1179,10 @@ impl Vp8Decoder { }; for i in 0usize..n { let base = i32::from(if self.segments_enabled { - if !self.segment[i].delta_values { - i16::from(self.segment[i].quantizer_level) - } else { + if self.segment[i].delta_values { i16::from(self.segment[i].quantizer_level) + i16::from(yac_abs) + } else { + i16::from(self.segment[i].quantizer_level) } } else { i16::from(yac_abs) @@ -1293,7 +1292,7 @@ impl Vp8Decoder { self.top = init_top_macroblocks(self.frame.width as usize); // Almost always the first macro block, except when non exists (i.e. `width == 0`) - self.left = self.top.first().cloned().unwrap_or_default(); + self.left = self.top.first().copied().unwrap_or_default(); self.mbwidth = (self.frame.width + 15) / 16; self.mbheight = (self.frame.height + 15) / 16; @@ -1354,11 +1353,11 @@ impl Vp8Decoder { return Err(DecodingError::UnsupportedFeature( "Non-keyframe frames".to_owned(), )); - } else { - // Refresh entropy probs ????? - let _ = self.b.read_literal(1); } + // Refresh entropy probs ????? + let _ = self.b.read_literal(1); + self.update_token_probabilities()?; let mut res = self.b.start_accumulated_result(); @@ -1670,13 +1669,13 @@ impl Vp8Decoder { break; } let b = decoder.read_bool(t).or_accumulate(&mut res); - extra = extra + extra + b as i16; + extra = extra + extra + i16::from(b); } i16::from(DCT_CAT_BASE[(category - DCT_CAT1) as usize]) + extra } - c => panic!("unknown token: {}", c), + c => panic!("unknown token: {c}"), }); skip = false; @@ -2055,7 +2054,7 @@ impl Vp8Decoder { //return values are the filter level, interior limit and hev threshold fn calculate_filter_parameters(&self, macroblock: &MacroBlock) -> (u8, u8, u8) { let segment = self.segment[macroblock.segmentid as usize]; - let mut filter_level = self.frame.filter_level as i32; + let mut filter_level = i32::from(self.frame.filter_level); if self.segments_enabled { if segment.delta_values { @@ -2112,7 +2111,12 @@ impl Vp8Decoder { } /// Decodes the current frame - pub fn decode_frame(&mut self) -> Result<&Frame, DecodingError> { + pub fn decode_frame(r: R) -> Result { + let decoder = Self::new(r); + decoder.decode_frame_() + } + + fn decode_frame_(mut self) -> Result { self.read_frame_header()?; for mby in 0..self.mbheight as usize { @@ -2154,58 +2158,58 @@ impl Vp8Decoder { } } - Ok(&self.frame) + Ok(self.frame) } } impl LumaMode { - fn from_i8(val: i8) -> Option { + const fn from_i8(val: i8) -> Option { Some(match val { - DC_PRED => LumaMode::DC, - V_PRED => LumaMode::V, - H_PRED => LumaMode::H, - TM_PRED => LumaMode::TM, - B_PRED => LumaMode::B, + DC_PRED => Self::DC, + V_PRED => Self::V, + H_PRED => Self::H, + TM_PRED => Self::TM, + B_PRED => Self::B, _ => return None, }) } - fn into_intra(self) -> Option { + const fn into_intra(self) -> Option { Some(match self { - LumaMode::DC => IntraMode::DC, - LumaMode::V => IntraMode::VE, - LumaMode::H => IntraMode::HE, - LumaMode::TM => IntraMode::TM, - LumaMode::B => return None, + Self::DC => IntraMode::DC, + Self::V => IntraMode::VE, + Self::H => IntraMode::HE, + Self::TM => IntraMode::TM, + Self::B => return None, }) } } impl ChromaMode { - fn from_i8(val: i8) -> Option { + const fn from_i8(val: i8) -> Option { Some(match val { - DC_PRED => ChromaMode::DC, - V_PRED => ChromaMode::V, - H_PRED => ChromaMode::H, - TM_PRED => ChromaMode::TM, + DC_PRED => Self::DC, + V_PRED => Self::V, + H_PRED => Self::H, + TM_PRED => Self::TM, _ => return None, }) } } impl IntraMode { - fn from_i8(val: i8) -> Option { + const fn from_i8(val: i8) -> Option { Some(match val { - B_DC_PRED => IntraMode::DC, - B_TM_PRED => IntraMode::TM, - B_VE_PRED => IntraMode::VE, - B_HE_PRED => IntraMode::HE, - B_LD_PRED => IntraMode::LD, - B_RD_PRED => IntraMode::RD, - B_VR_PRED => IntraMode::VR, - B_VL_PRED => IntraMode::VL, - B_HD_PRED => IntraMode::HD, - B_HU_PRED => IntraMode::HU, + B_DC_PRED => Self::DC, + B_TM_PRED => Self::TM, + B_VE_PRED => Self::VE, + B_HE_PRED => Self::HE, + B_LD_PRED => Self::LD, + B_RD_PRED => Self::RD, + B_VR_PRED => Self::VR, + B_VL_PRED => Self::VL, + B_HD_PRED => Self::HD, + B_HU_PRED => Self::HU, _ => return None, }) } @@ -2241,7 +2245,7 @@ fn create_border_luma(mbx: usize, mby: usize, mbw: usize, top: &[u8], left: &[u8 } if mbx == mbw - 1 { - for above in above[16..].iter_mut() { + for above in &mut above[16..] { *above = top[mbx * 16 + 15]; } } else { @@ -2431,7 +2435,7 @@ fn predict_bdcpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) { v >>= 3; for chunk in a.chunks_exact_mut(stride).skip(y0).take(4) { - for ch in chunk[x0..][..4].iter_mut() { + for ch in &mut chunk[x0..][..4] { *ch = v as u8; } } @@ -2488,7 +2492,7 @@ fn edge_pixels( fn predict_bvepred(a: &mut [u8], x0: usize, y0: usize, stride: usize) { let p = topleft_pixel(a, x0, y0, stride); - let (a0, a1, a2, a3, a4, _, _, _) = top_pixels(a, x0, y0, stride); + let (a0, a1, a2, a3, a4, ..) = top_pixels(a, x0, y0, stride); let avg_1 = avg3(p, a0, a1); let avg_2 = avg3(a0, a1, a2); let avg_3 = avg3(a1, a2, a3); @@ -2515,8 +2519,8 @@ fn predict_bhepred(a: &mut [u8], x0: usize, y0: usize, stride: usize) { ]; let mut pos = y0 * stride + x0; - for &avg in avgs.iter() { - for a_p in a[pos..=pos + 3].iter_mut() { + for avg in avgs { + for a_p in &mut a[pos..=pos + 3] { *a_p = avg; } pos += stride; @@ -2761,7 +2765,7 @@ mod tests { fn test_avg2() { for i in 0u8..=255 { for j in 0u8..=255 { - let ceil_avg = ((i as f32) + (j as f32)) / 2.0; + let ceil_avg = (f32::from(i) + f32::from(j)) / 2.0; let ceil_avg = ceil_avg.ceil() as u8; assert_eq!( ceil_avg, @@ -2793,7 +2797,8 @@ mod tests { for i in 0u8..=255 { for j in 0u8..=255 { for k in 0u8..=255 { - let floor_avg = ((i as f32) + 2.0 * (j as f32) + { k as f32 } + 2.0) / 4.0; + let floor_avg = + (2.0f32.mul_add(f32::from(j), f32::from(i)) + { f32::from(k) } + 2.0) / 4.0; let floor_avg = floor_avg.floor() as u8; assert_eq!( floor_avg, diff --git a/tests/decode.rs b/tests/decode.rs index cbeee40..ed37136 100644 --- a/tests/decode.rs +++ b/tests/decode.rs @@ -1,8 +1,6 @@ -use std::{ - fs::create_dir_all, - io::{Cursor, Write}, - path::PathBuf, -}; +use std::fs::create_dir_all; +use std::io::{Cursor, Write}; +use std::path::PathBuf; // Write images to `out/` directory on test failure - useful for diffing with reference images. const WRITE_IMAGES_ON_FAILURE: bool = false; @@ -128,12 +126,7 @@ fn reference_test(file: &str) { let mut data = vec![0; width as usize * height as usize * bytes_per_pixel]; decoder.read_frame(&mut data).unwrap(); - if !decoder.is_lossy() { - if data != reference_data { - save_image(&data, file, Some(i), decoder.has_alpha(), width, height); - panic!("Pixel mismatch") - } - } else { + if decoder.is_lossy() { let num_bytes_different = data .iter() .zip(reference_data.iter()) @@ -144,6 +137,9 @@ fn reference_test(file: &str) { save_image(&data, file, Some(i), decoder.has_alpha(), width, height); } assert!(percentage_different < 10, "More than 10% of pixels differ"); + } else if data != reference_data { + save_image(&data, file, Some(i), decoder.has_alpha(), width, height); + panic!("Pixel mismatch") } } }