On Mon, Sep 22, 2014 at 04:58:26PM +0800, Gui Hecheng wrote:
> So we check page alignment every time before we are going to
> fetch the next @len and after the former piece of data is decompressed.
> If the current page that we reach has less than 4 bytes left,
> then we should fetch the next @len at the start of next page.
Thanks for the fix.
> --- a/cmds-restore.c
> +++ b/cmds-restore.c
> @@ -57,6 +57,9 @@ static int dry_run = 0;
>
> #define LZO_LEN 4
> #define PAGE_CACHE_SIZE 4096
> +#define PAGE_CACHE_MASK (~(PAGE_CACHE_SIZE - 1))
> +#define PAGE_CACHE_ALIGN(addr) (((addr) + PAGE_CACHE_SIZE - 1) \
> + & PAGE_CACHE_MASK)
This is not type-safe, the PAGE_CACHE_SIZE should be unsigned long.
> #define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3)
>
> static int decompress_zlib(char *inbuf, char *outbuf, u64 compress_len,
> @@ -93,6 +96,28 @@ static inline size_t read_compress_length(unsigned char *buf)
> return le32_to_cpu(dlen);
> }
>
> +static void align_if_need(size_t *tot_in, size_t *in_len)
> +{
> + int tot_in_aligned;
> + int bytes_left;
> +
> + tot_in_aligned = PAGE_CACHE_ALIGN(*tot_in);
size_t -> int, plus other tricks that happen inside the macro
> + bytes_left = tot_in_aligned - *tot_in;
int = int - size_t
> +
> + if (bytes_left >= LZO_LEN)
> + return;
> +
> + /*
> + * The LZO_LEN bytes is guaranteed to be
> + * in one page as a whole, so if a page
> + * has fewer than LZO_LEN bytes left,
> + * the LZO_LEN bytes should be fetched
> + * at the start of the next page
> + */
Nitpick, the comment can use the whole width of the line
/*
* The LZO_LEN bytes is guaranteed to be in one page as a whole,
* so if a page has fewer than LZO_LEN bytes left, the LZO_LEN
* bytes should be fetched at the start of the next page
*/
> + *in_len += bytes_left;
> + *tot_in = tot_in_aligned;
> +}
--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html