Add hibernation image area
[AGL/meta-agl.git] / meta-agl-bsp / meta-renesas / recipes-bsp / u-boot / u-boot / hibernation / 0002-Enable-swsusp-DMA-support.patch
diff --git a/meta-agl-bsp/meta-renesas/recipes-bsp/u-boot/u-boot/hibernation/0002-Enable-swsusp-DMA-support.patch b/meta-agl-bsp/meta-renesas/recipes-bsp/u-boot/u-boot/hibernation/0002-Enable-swsusp-DMA-support.patch
new file mode 100644 (file)
index 0000000..2a525d3
--- /dev/null
@@ -0,0 +1,95 @@
+From 33dfe19185b35fc61613070032836beee0f48c45 Mon Sep 17 00:00:00 2001
+From: Yuichi Kusakabe <yuichi.kusakabe@jp.fujitsu.com>
+Date: Fri, 9 Jun 2017 20:45:39 +0900
+Subject: [PATCH 2/3] Enable swsusp DMA support
+
+Signed-off-by: Yuichi Kusakabe <yuichi.kusakabe@jp.fujitsu.com>
+---
+ common/cmd_swsusp.c | 58 +++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 58 insertions(+)
+
+diff --git a/common/cmd_swsusp.c b/common/cmd_swsusp.c
+index ba05aa4..b1d6c22 100644
+--- a/common/cmd_swsusp.c
++++ b/common/cmd_swsusp.c
+@@ -226,6 +226,53 @@ static inline void *malloc_aligned(u32 size, u32 align)
+       return (void *)(((u32)malloc(size + align) + align - 1) & ~(align - 1));
+ }
++static int block_read(u32 page, void *addr, u32 count)
++{
++      __u32 cnt;
++      int blk_per_page;
++
++      blk_per_page = PAGE_SIZE / swap_dev->blksz;
++      cnt = swap_dev->block_read(swap_dev->dev,
++                              swap_info.start + (page * blk_per_page),
++                              count * blk_per_page, addr);
++
++      return cnt != count * blk_per_page;
++}
++
++static int get_block(unsigned char *buffer, u32 size)
++{
++      int need_num_pages = size / PAGE_SIZE;
++      int read_pages = 0;
++      int count;
++      u64 start;
++
++      do {
++              u64 prev;
++              count = 0;
++
++              if (!get_meta())
++                      goto exit;
++
++              prev = start = meta_map->entries[meta_idx];
++              do {
++                      count++;
++                      meta_idx++;
++                      if (meta_map->entries[meta_idx] - prev > 1)
++                              break;
++                      prev = meta_map->entries[meta_idx];
++              } while (read_pages + count < need_num_pages &&
++                      meta_idx < ARRAY_SIZE(meta_map->entries));
++
++              if (block_read(start, buffer, count))
++                      return -1;
++              read_pages += count;
++              buffer += count * PAGE_SIZE;
++      } while (read_pages < need_num_pages);
++
++exit:
++      return read_pages * PAGE_SIZE;
++}
++
+ #endif
+ static int page_read(u32 page, void *addr)
+@@ -465,12 +512,23 @@ static int image_page_get_next(void *buffer)
+                       cmp_len = *(size_t *) cmp_buf;
+                       cmp_avail = PAGE_SIZE;
++#ifdef CONFIG_SH_DMA
++                      while (cmp_avail < cmp_len + LZO_HEADER) {
++                              /* try to DMA-read whole block */
++                              ret = get_block(cmp_buf + cmp_avail,
++                                              cmp_len + LZO_HEADER);
++                              if (unlikely(ret <= 0))
++                                      return ret;
++                              cmp_avail += ret;
++                      }
++#else
+                       while (cmp_avail < cmp_len + LZO_HEADER) {
+                               ret = raw_page_get_next(cmp_buf + cmp_avail);
+                               if (unlikely(ret <= 0))
+                                       return ret;
+                               cmp_avail += PAGE_SIZE;
+                       }
++#endif
+                       unc_len = LZO_UNC_SIZE;
+                       ret = lzo1x_decompress_safe(cmp_buf + LZO_HEADER,
+                                               cmp_len, unc_buf, &unc_len);
+-- 
+1.8.3.1
+