linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/3] zram: use u32 for entry ac_time tracking
@ 2025-12-15  5:47 Sergey Senozhatsky
  2025-12-15  5:47 ` [PATCH 2/3] zram: rename internal slot API Sergey Senozhatsky
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Sergey Senozhatsky @ 2025-12-15  5:47 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Minchan Kim, Brian Geffon, David Stevens, linux-kernel, linux-mm,
	linux-block, Sergey Senozhatsky

We can reduce sizeof(zram_table_entry) on 64-bit systems
by converting flags and ac_time to u32.  Entry flags fit
into u32, and for ac_time u32 gives us over a century of
entry lifespan (approx 136 years) which is plenty (zram
uses system boot time (seconds)).

In struct zram_table_entry we use bytes aliasing, because
bit-wait API (for slot lock) requires a whole unsigned
long word.

Suggested-by: David Stevens <stevensd@google.com>
Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
---
 drivers/block/zram/zram_drv.c | 60 +++++++++++++++++------------------
 drivers/block/zram/zram_drv.h |  9 ++++--
 2 files changed, 37 insertions(+), 32 deletions(-)

diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 67a9e7c005c3..65f99ff3e2e5 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -81,7 +81,7 @@ static void zram_slot_lock_init(struct zram *zram, u32 index)
  */
 static __must_check bool zram_slot_trylock(struct zram *zram, u32 index)
 {
-	unsigned long *lock = &zram->table[index].flags;
+	unsigned long *lock = &zram->table[index].__lock;
 
 	if (!test_and_set_bit_lock(ZRAM_ENTRY_LOCK, lock)) {
 		mutex_acquire(slot_dep_map(zram, index), 0, 1, _RET_IP_);
@@ -94,7 +94,7 @@ static __must_check bool zram_slot_trylock(struct zram *zram, u32 index)
 
 static void zram_slot_lock(struct zram *zram, u32 index)
 {
-	unsigned long *lock = &zram->table[index].flags;
+	unsigned long *lock = &zram->table[index].__lock;
 
 	mutex_acquire(slot_dep_map(zram, index), 0, 0, _RET_IP_);
 	wait_on_bit_lock(lock, ZRAM_ENTRY_LOCK, TASK_UNINTERRUPTIBLE);
@@ -103,7 +103,7 @@ static void zram_slot_lock(struct zram *zram, u32 index)
 
 static void zram_slot_unlock(struct zram *zram, u32 index)
 {
-	unsigned long *lock = &zram->table[index].flags;
+	unsigned long *lock = &zram->table[index].__lock;
 
 	mutex_release(slot_dep_map(zram, index), _RET_IP_);
 	clear_and_wake_up_bit(ZRAM_ENTRY_LOCK, lock);
@@ -130,34 +130,33 @@ static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
 }
 
 static bool zram_test_flag(struct zram *zram, u32 index,
-			enum zram_pageflags flag)
+			   enum zram_pageflags flag)
 {
-	return zram->table[index].flags & BIT(flag);
+	return zram->table[index].attr.flags & BIT(flag);
 }
 
 static void zram_set_flag(struct zram *zram, u32 index,
-			enum zram_pageflags flag)
+			  enum zram_pageflags flag)
 {
-	zram->table[index].flags |= BIT(flag);
+	zram->table[index].attr.flags |= BIT(flag);
 }
 
 static void zram_clear_flag(struct zram *zram, u32 index,
-			enum zram_pageflags flag)
+			    enum zram_pageflags flag)
 {
-	zram->table[index].flags &= ~BIT(flag);
+	zram->table[index].attr.flags &= ~BIT(flag);
 }
 
 static size_t zram_get_obj_size(struct zram *zram, u32 index)
 {
-	return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
+	return zram->table[index].attr.flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
 }
 
-static void zram_set_obj_size(struct zram *zram,
-					u32 index, size_t size)
+static void zram_set_obj_size(struct zram *zram, u32 index, size_t size)
 {
-	unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT;
+	unsigned long flags = zram->table[index].attr.flags >> ZRAM_FLAG_SHIFT;
 
-	zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
+	zram->table[index].attr.flags = (flags << ZRAM_FLAG_SHIFT) | size;
 }
 
 static inline bool zram_allocated(struct zram *zram, u32 index)
@@ -208,14 +207,14 @@ static inline void zram_set_priority(struct zram *zram, u32 index, u32 prio)
 	 * Clear previous priority value first, in case if we recompress
 	 * further an already recompressed page
 	 */
-	zram->table[index].flags &= ~(ZRAM_COMP_PRIORITY_MASK <<
-				      ZRAM_COMP_PRIORITY_BIT1);
-	zram->table[index].flags |= (prio << ZRAM_COMP_PRIORITY_BIT1);
+	zram->table[index].attr.flags &= ~(ZRAM_COMP_PRIORITY_MASK <<
+					   ZRAM_COMP_PRIORITY_BIT1);
+	zram->table[index].attr.flags |= (prio << ZRAM_COMP_PRIORITY_BIT1);
 }
 
 static inline u32 zram_get_priority(struct zram *zram, u32 index)
 {
-	u32 prio = zram->table[index].flags >> ZRAM_COMP_PRIORITY_BIT1;
+	u32 prio = zram->table[index].attr.flags >> ZRAM_COMP_PRIORITY_BIT1;
 
 	return prio & ZRAM_COMP_PRIORITY_MASK;
 }
@@ -225,7 +224,7 @@ static void zram_accessed(struct zram *zram, u32 index)
 	zram_clear_flag(zram, index, ZRAM_IDLE);
 	zram_clear_flag(zram, index, ZRAM_PP_SLOT);
 #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
-	zram->table[index].ac_time = ktime_get_boottime();
+	zram->table[index].attr.ac_time = (u32)ktime_get_boottime_seconds();
 #endif
 }
 
@@ -447,7 +446,7 @@ static void mark_idle(struct zram *zram, ktime_t cutoff)
 
 #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
 		is_idle = !cutoff ||
-			ktime_after(cutoff, zram->table[index].ac_time);
+			ktime_after(cutoff, zram->table[index].attr.ac_time);
 #endif
 		if (is_idle)
 			zram_set_flag(zram, index, ZRAM_IDLE);
@@ -461,18 +460,19 @@ static ssize_t idle_store(struct device *dev, struct device_attribute *attr,
 			  const char *buf, size_t len)
 {
 	struct zram *zram = dev_to_zram(dev);
-	ktime_t cutoff_time = 0;
+	ktime_t cutoff = 0;
 
 	if (!sysfs_streq(buf, "all")) {
 		/*
 		 * If it did not parse as 'all' try to treat it as an integer
 		 * when we have memory tracking enabled.
 		 */
-		u64 age_sec;
+		u32 age_sec;
 
-		if (IS_ENABLED(CONFIG_ZRAM_TRACK_ENTRY_ACTIME) && !kstrtoull(buf, 0, &age_sec))
-			cutoff_time = ktime_sub(ktime_get_boottime(),
-					ns_to_ktime(age_sec * NSEC_PER_SEC));
+		if (IS_ENABLED(CONFIG_ZRAM_TRACK_ENTRY_ACTIME) &&
+		    !kstrtouint(buf, 0, &age_sec))
+			cutoff = ktime_sub((u32)ktime_get_boottime_seconds(),
+					   age_sec);
 		else
 			return -EINVAL;
 	}
@@ -482,10 +482,10 @@ static ssize_t idle_store(struct device *dev, struct device_attribute *attr,
 		return -EINVAL;
 
 	/*
-	 * A cutoff_time of 0 marks everything as idle, this is the
+	 * A cutoff of 0 marks everything as idle, this is the
 	 * "all" behavior.
 	 */
-	mark_idle(zram, cutoff_time);
+	mark_idle(zram, cutoff);
 	return len;
 }
 
@@ -1588,7 +1588,7 @@ static ssize_t read_block_state(struct file *file, char __user *buf,
 		if (!zram_allocated(zram, index))
 			goto next;
 
-		ts = ktime_to_timespec64(zram->table[index].ac_time);
+		ts = ktime_to_timespec64(zram->table[index].attr.ac_time);
 		copied = snprintf(kbuf + written, count,
 			"%12zd %12lld.%06lu %c%c%c%c%c%c\n",
 			index, (s64)ts.tv_sec,
@@ -2013,7 +2013,7 @@ static void zram_slot_free(struct zram *zram, u32 index)
 	unsigned long handle;
 
 #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
-	zram->table[index].ac_time = 0;
+	zram->table[index].attr.ac_time = 0;
 #endif
 
 	zram_clear_flag(zram, index, ZRAM_IDLE);
@@ -3286,7 +3286,7 @@ static int __init zram_init(void)
 	struct zram_table_entry zram_te;
 	int ret;
 
-	BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > sizeof(zram_te.flags) * 8);
+	BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > sizeof(zram_te.attr.flags) * 8);
 
 	ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
 				      zcomp_cpu_up_prepare, zcomp_cpu_dead);
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 72fdf66c78ab..48d6861c6647 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -65,10 +65,15 @@ enum zram_pageflags {
  */
 struct zram_table_entry {
 	unsigned long handle;
-	unsigned long flags;
+	union {
+		unsigned long __lock;
+		struct attr {
+			u32 flags;
 #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
-	ktime_t ac_time;
+			u32 ac_time;
 #endif
+		} attr;
+	};
 	struct lockdep_map dep_map;
 };
 
-- 
2.52.0.239.gd5f0c6e74e-goog



^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 2/3] zram: rename internal slot API
  2025-12-15  5:47 [PATCH 1/3] zram: use u32 for entry ac_time tracking Sergey Senozhatsky
@ 2025-12-15  5:47 ` Sergey Senozhatsky
  2025-12-15  5:47 ` [PATCH 3/3] zram: trivial fix of recompress_slot() coding styles Sergey Senozhatsky
  2025-12-15 22:31 ` [PATCH 1/3] zram: use u32 for entry ac_time tracking Brian Geffon
  2 siblings, 0 replies; 6+ messages in thread
From: Sergey Senozhatsky @ 2025-12-15  5:47 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Minchan Kim, Brian Geffon, David Stevens, linux-kernel, linux-mm,
	linux-block, Sergey Senozhatsky

We have a somewhat confusing internal API naming.  E.g.
the following code:

	zram_slot_lock()
	if (zram_allocated())
		zram_set_flag()
	zram_slot_unlock()

may look like it does something on zram device level, but in
fact it tests and sets slot entry flags, not the device ones.

Rename API to explicitly distinguish functions that operate on
the slot level from functions that operate on the zram device
level.

While at it, fixup some coding styles.

Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
---
 drivers/block/zram/zram_drv.c | 363 +++++++++++++++++-----------------
 1 file changed, 182 insertions(+), 181 deletions(-)

diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 65f99ff3e2e5..f00f3d22d5e3 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -56,10 +56,10 @@ static size_t huge_class_size;
 
 static const struct block_device_operations zram_devops;
 
-static void zram_slot_free(struct zram *zram, u32 index);
+static void slot_free(struct zram *zram, u32 index);
 #define slot_dep_map(zram, index) (&(zram)->table[(index)].dep_map)
 
-static void zram_slot_lock_init(struct zram *zram, u32 index)
+static void slot_lock_init(struct zram *zram, u32 index)
 {
 	static struct lock_class_key __key;
 
@@ -79,7 +79,7 @@ static void zram_slot_lock_init(struct zram *zram, u32 index)
  * 4) Use TRY lock variant when in atomic context
  *    - must check return value and handle locking failers
  */
-static __must_check bool zram_slot_trylock(struct zram *zram, u32 index)
+static __must_check bool slot_trylock(struct zram *zram, u32 index)
 {
 	unsigned long *lock = &zram->table[index].__lock;
 
@@ -92,7 +92,7 @@ static __must_check bool zram_slot_trylock(struct zram *zram, u32 index)
 	return false;
 }
 
-static void zram_slot_lock(struct zram *zram, u32 index)
+static void slot_lock(struct zram *zram, u32 index)
 {
 	unsigned long *lock = &zram->table[index].__lock;
 
@@ -101,7 +101,7 @@ static void zram_slot_lock(struct zram *zram, u32 index)
 	lock_acquired(slot_dep_map(zram, index), _RET_IP_);
 }
 
-static void zram_slot_unlock(struct zram *zram, u32 index)
+static void slot_unlock(struct zram *zram, u32 index)
 {
 	unsigned long *lock = &zram->table[index].__lock;
 
@@ -119,51 +119,80 @@ static inline struct zram *dev_to_zram(struct device *dev)
 	return (struct zram *)dev_to_disk(dev)->private_data;
 }
 
-static unsigned long zram_get_handle(struct zram *zram, u32 index)
+static unsigned long get_slot_handle(struct zram *zram, u32 index)
 {
 	return zram->table[index].handle;
 }
 
-static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
+static void set_slot_handle(struct zram *zram, u32 index, unsigned long handle)
 {
 	zram->table[index].handle = handle;
 }
 
-static bool zram_test_flag(struct zram *zram, u32 index,
+static bool test_slot_flag(struct zram *zram, u32 index,
 			   enum zram_pageflags flag)
 {
 	return zram->table[index].attr.flags & BIT(flag);
 }
 
-static void zram_set_flag(struct zram *zram, u32 index,
+static void set_slot_flag(struct zram *zram, u32 index,
 			  enum zram_pageflags flag)
 {
 	zram->table[index].attr.flags |= BIT(flag);
 }
 
-static void zram_clear_flag(struct zram *zram, u32 index,
+static void clear_slot_flag(struct zram *zram, u32 index,
 			    enum zram_pageflags flag)
 {
 	zram->table[index].attr.flags &= ~BIT(flag);
 }
 
-static size_t zram_get_obj_size(struct zram *zram, u32 index)
+static size_t get_slot_size(struct zram *zram, u32 index)
 {
 	return zram->table[index].attr.flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
 }
 
-static void zram_set_obj_size(struct zram *zram, u32 index, size_t size)
+static void set_slot_size(struct zram *zram, u32 index, size_t size)
 {
 	unsigned long flags = zram->table[index].attr.flags >> ZRAM_FLAG_SHIFT;
 
 	zram->table[index].attr.flags = (flags << ZRAM_FLAG_SHIFT) | size;
 }
 
-static inline bool zram_allocated(struct zram *zram, u32 index)
+static inline bool slot_allocated(struct zram *zram, u32 index)
 {
-	return zram_get_obj_size(zram, index) ||
-			zram_test_flag(zram, index, ZRAM_SAME) ||
-			zram_test_flag(zram, index, ZRAM_WB);
+	return get_slot_size(zram, index) ||
+		test_slot_flag(zram, index, ZRAM_SAME) ||
+		test_slot_flag(zram, index, ZRAM_WB);
+}
+
+static inline void set_slot_comp_priority(struct zram *zram, u32 index,
+					  u32 prio)
+{
+	prio &= ZRAM_COMP_PRIORITY_MASK;
+	/*
+	 * Clear previous priority value first, in case if we recompress
+	 * further an already recompressed page
+	 */
+	zram->table[index].attr.flags &= ~(ZRAM_COMP_PRIORITY_MASK <<
+					   ZRAM_COMP_PRIORITY_BIT1);
+	zram->table[index].attr.flags |= (prio << ZRAM_COMP_PRIORITY_BIT1);
+}
+
+static inline u32 get_slot_comp_priority(struct zram *zram, u32 index)
+{
+	u32 prio = zram->table[index].attr.flags >> ZRAM_COMP_PRIORITY_BIT1;
+
+	return prio & ZRAM_COMP_PRIORITY_MASK;
+}
+
+static void mark_slot_accessed(struct zram *zram, u32 index)
+{
+	clear_slot_flag(zram, index, ZRAM_IDLE);
+	clear_slot_flag(zram, index, ZRAM_PP_SLOT);
+#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
+	zram->table[index].attr.ac_time = ktime_get_boottime();
+#endif
 }
 
 static inline void update_used_max(struct zram *zram, const unsigned long pages)
@@ -200,34 +229,6 @@ static inline bool is_partial_io(struct bio_vec *bvec)
 }
 #endif
 
-static inline void zram_set_priority(struct zram *zram, u32 index, u32 prio)
-{
-	prio &= ZRAM_COMP_PRIORITY_MASK;
-	/*
-	 * Clear previous priority value first, in case if we recompress
-	 * further an already recompressed page
-	 */
-	zram->table[index].attr.flags &= ~(ZRAM_COMP_PRIORITY_MASK <<
-					   ZRAM_COMP_PRIORITY_BIT1);
-	zram->table[index].attr.flags |= (prio << ZRAM_COMP_PRIORITY_BIT1);
-}
-
-static inline u32 zram_get_priority(struct zram *zram, u32 index)
-{
-	u32 prio = zram->table[index].attr.flags >> ZRAM_COMP_PRIORITY_BIT1;
-
-	return prio & ZRAM_COMP_PRIORITY_MASK;
-}
-
-static void zram_accessed(struct zram *zram, u32 index)
-{
-	zram_clear_flag(zram, index, ZRAM_IDLE);
-	zram_clear_flag(zram, index, ZRAM_PP_SLOT);
-#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
-	zram->table[index].attr.ac_time = (u32)ktime_get_boottime_seconds();
-#endif
-}
-
 #if defined CONFIG_ZRAM_WRITEBACK || defined CONFIG_ZRAM_MULTI_COMP
 struct zram_pp_slot {
 	unsigned long		index;
@@ -263,9 +264,9 @@ static void release_pp_slot(struct zram *zram, struct zram_pp_slot *pps)
 {
 	list_del_init(&pps->entry);
 
-	zram_slot_lock(zram, pps->index);
-	zram_clear_flag(zram, pps->index, ZRAM_PP_SLOT);
-	zram_slot_unlock(zram, pps->index);
+	slot_lock(zram, pps->index);
+	clear_slot_flag(zram, pps->index, ZRAM_PP_SLOT);
+	slot_unlock(zram, pps->index);
 
 	kfree(pps);
 }
@@ -304,10 +305,10 @@ static bool place_pp_slot(struct zram *zram, struct zram_pp_ctl *ctl,
 	INIT_LIST_HEAD(&pps->entry);
 	pps->index = index;
 
-	bid = zram_get_obj_size(zram, pps->index) / PP_BUCKET_SIZE_RANGE;
+	bid = get_slot_size(zram, pps->index) / PP_BUCKET_SIZE_RANGE;
 	list_add(&pps->entry, &ctl->pp_buckets[bid]);
 
-	zram_set_flag(zram, pps->index, ZRAM_PP_SLOT);
+	set_slot_flag(zram, pps->index, ZRAM_PP_SLOT);
 	return true;
 }
 
@@ -436,11 +437,11 @@ static void mark_idle(struct zram *zram, ktime_t cutoff)
 		 *
 		 * And ZRAM_WB slots simply cannot be ZRAM_IDLE.
 		 */
-		zram_slot_lock(zram, index);
-		if (!zram_allocated(zram, index) ||
-		    zram_test_flag(zram, index, ZRAM_WB) ||
-		    zram_test_flag(zram, index, ZRAM_SAME)) {
-			zram_slot_unlock(zram, index);
+		slot_lock(zram, index);
+		if (!slot_allocated(zram, index) ||
+		    test_slot_flag(zram, index, ZRAM_WB) ||
+		    test_slot_flag(zram, index, ZRAM_SAME)) {
+			slot_unlock(zram, index);
 			continue;
 		}
 
@@ -449,10 +450,10 @@ static void mark_idle(struct zram *zram, ktime_t cutoff)
 			ktime_after(cutoff, zram->table[index].attr.ac_time);
 #endif
 		if (is_idle)
-			zram_set_flag(zram, index, ZRAM_IDLE);
+			set_slot_flag(zram, index, ZRAM_IDLE);
 		else
-			zram_clear_flag(zram, index, ZRAM_IDLE);
-		zram_slot_unlock(zram, index);
+			clear_slot_flag(zram, index, ZRAM_IDLE);
+		slot_unlock(zram, index);
 	}
 }
 
@@ -933,7 +934,7 @@ static int zram_writeback_complete(struct zram *zram, struct zram_wb_req *req)
 	}
 
 	atomic64_inc(&zram->stats.bd_writes);
-	zram_slot_lock(zram, index);
+	slot_lock(zram, index);
 	/*
 	 * We release slot lock during writeback so slot can change under us:
 	 * slot_free() or slot_free() and zram_write_page(). In both cases
@@ -941,7 +942,7 @@ static int zram_writeback_complete(struct zram *zram, struct zram_wb_req *req)
 	 * set ZRAM_PP_SLOT on such slots until current post-processing
 	 * finishes.
 	 */
-	if (!zram_test_flag(zram, index, ZRAM_PP_SLOT)) {
+	if (!test_slot_flag(zram, index, ZRAM_PP_SLOT)) {
 		zram_release_bdev_block(zram, req->blk_idx);
 		goto out;
 	}
@@ -951,26 +952,26 @@ static int zram_writeback_complete(struct zram *zram, struct zram_wb_req *req)
 		 * ZRAM_WB slots get freed, we need to preserve data required
 		 * for read decompression.
 		 */
-		size = zram_get_obj_size(zram, index);
-		prio = zram_get_priority(zram, index);
-		huge = zram_test_flag(zram, index, ZRAM_HUGE);
+		size = get_slot_size(zram, index);
+		prio = get_slot_comp_priority(zram, index);
+		huge = test_slot_flag(zram, index, ZRAM_HUGE);
 	}
 
-	zram_slot_free(zram, index);
-	zram_set_flag(zram, index, ZRAM_WB);
-	zram_set_handle(zram, index, req->blk_idx);
+	slot_free(zram, index);
+	set_slot_flag(zram, index, ZRAM_WB);
+	set_slot_handle(zram, index, req->blk_idx);
 
 	if (zram->wb_compressed) {
 		if (huge)
-			zram_set_flag(zram, index, ZRAM_HUGE);
-		zram_set_obj_size(zram, index, size);
-		zram_set_priority(zram, index, prio);
+			set_slot_flag(zram, index, ZRAM_HUGE);
+		set_slot_size(zram, index, size);
+		set_slot_comp_priority(zram, index, prio);
 	}
 
 	atomic64_inc(&zram->stats.pages_stored);
 
 out:
-	zram_slot_unlock(zram, index);
+	slot_unlock(zram, index);
 	return 0;
 }
 
@@ -1091,14 +1092,14 @@ static int zram_writeback_slots(struct zram *zram,
 		}
 
 		index = pps->index;
-		zram_slot_lock(zram, index);
+		slot_lock(zram, index);
 		/*
 		 * scan_slots() sets ZRAM_PP_SLOT and releases slot lock, so
 		 * slots can change in the meantime. If slots are accessed or
 		 * freed they lose ZRAM_PP_SLOT flag and hence we don't
 		 * post-process them.
 		 */
-		if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
+		if (!test_slot_flag(zram, index, ZRAM_PP_SLOT))
 			goto next;
 		if (zram->wb_compressed)
 			err = read_from_zspool_raw(zram, req->page, index);
@@ -1106,7 +1107,7 @@ static int zram_writeback_slots(struct zram *zram,
 			err = read_from_zspool(zram, req->page, index);
 		if (err)
 			goto next;
-		zram_slot_unlock(zram, index);
+		slot_unlock(zram, index);
 
 		/*
 		 * From now on pp-slot is owned by the req, remove it from
@@ -1128,7 +1129,7 @@ static int zram_writeback_slots(struct zram *zram,
 		continue;
 
 next:
-		zram_slot_unlock(zram, index);
+		slot_unlock(zram, index);
 		release_pp_slot(zram, pps);
 	}
 
@@ -1221,27 +1222,27 @@ static int scan_slots_for_writeback(struct zram *zram, u32 mode,
 	while (index < hi) {
 		bool ok = true;
 
-		zram_slot_lock(zram, index);
-		if (!zram_allocated(zram, index))
+		slot_lock(zram, index);
+		if (!slot_allocated(zram, index))
 			goto next;
 
-		if (zram_test_flag(zram, index, ZRAM_WB) ||
-		    zram_test_flag(zram, index, ZRAM_SAME))
+		if (test_slot_flag(zram, index, ZRAM_WB) ||
+		    test_slot_flag(zram, index, ZRAM_SAME))
 			goto next;
 
 		if (mode & IDLE_WRITEBACK &&
-		    !zram_test_flag(zram, index, ZRAM_IDLE))
+		    !test_slot_flag(zram, index, ZRAM_IDLE))
 			goto next;
 		if (mode & HUGE_WRITEBACK &&
-		    !zram_test_flag(zram, index, ZRAM_HUGE))
+		    !test_slot_flag(zram, index, ZRAM_HUGE))
 			goto next;
 		if (mode & INCOMPRESSIBLE_WRITEBACK &&
-		    !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
+		    !test_slot_flag(zram, index, ZRAM_INCOMPRESSIBLE))
 			goto next;
 
 		ok = place_pp_slot(zram, ctl, index);
 next:
-		zram_slot_unlock(zram, index);
+		slot_unlock(zram, index);
 		if (!ok)
 			break;
 		index++;
@@ -1369,22 +1370,22 @@ static int decompress_bdev_page(struct zram *zram, struct page *page, u32 index)
 	int ret, prio;
 	void *src;
 
-	zram_slot_lock(zram, index);
+	slot_lock(zram, index);
 	/* Since slot was unlocked we need to make sure it's still ZRAM_WB */
-	if (!zram_test_flag(zram, index, ZRAM_WB)) {
-		zram_slot_unlock(zram, index);
+	if (!test_slot_flag(zram, index, ZRAM_WB)) {
+		slot_unlock(zram, index);
 		/* We read some stale data, zero it out */
 		memset_page(page, 0, 0, PAGE_SIZE);
 		return -EIO;
 	}
 
-	if (zram_test_flag(zram, index, ZRAM_HUGE)) {
-		zram_slot_unlock(zram, index);
+	if (test_slot_flag(zram, index, ZRAM_HUGE)) {
+		slot_unlock(zram, index);
 		return 0;
 	}
 
-	size = zram_get_obj_size(zram, index);
-	prio = zram_get_priority(zram, index);
+	size = get_slot_size(zram, index);
+	prio = get_slot_comp_priority(zram, index);
 
 	zstrm = zcomp_stream_get(zram->comps[prio]);
 	src = kmap_local_page(page);
@@ -1394,7 +1395,7 @@ static int decompress_bdev_page(struct zram *zram, struct page *page, u32 index)
 		copy_page(src, zstrm->local_copy);
 	kunmap_local(src);
 	zcomp_stream_put(zstrm);
-	zram_slot_unlock(zram, index);
+	slot_unlock(zram, index);
 
 	return ret;
 }
@@ -1584,8 +1585,8 @@ static ssize_t read_block_state(struct file *file, char __user *buf,
 	for (index = *ppos; index < nr_pages; index++) {
 		int copied;
 
-		zram_slot_lock(zram, index);
-		if (!zram_allocated(zram, index))
+		slot_lock(zram, index);
+		if (!slot_allocated(zram, index))
 			goto next;
 
 		ts = ktime_to_timespec64(zram->table[index].attr.ac_time);
@@ -1593,22 +1594,22 @@ static ssize_t read_block_state(struct file *file, char __user *buf,
 			"%12zd %12lld.%06lu %c%c%c%c%c%c\n",
 			index, (s64)ts.tv_sec,
 			ts.tv_nsec / NSEC_PER_USEC,
-			zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
-			zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
-			zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.',
-			zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.',
-			zram_get_priority(zram, index) ? 'r' : '.',
-			zram_test_flag(zram, index,
+			test_slot_flag(zram, index, ZRAM_SAME) ? 's' : '.',
+			test_slot_flag(zram, index, ZRAM_WB) ? 'w' : '.',
+			test_slot_flag(zram, index, ZRAM_HUGE) ? 'h' : '.',
+			test_slot_flag(zram, index, ZRAM_IDLE) ? 'i' : '.',
+			get_slot_comp_priority(zram, index) ? 'r' : '.',
+			test_slot_flag(zram, index,
 				       ZRAM_INCOMPRESSIBLE) ? 'n' : '.');
 
 		if (count <= copied) {
-			zram_slot_unlock(zram, index);
+			slot_unlock(zram, index);
 			break;
 		}
 		written += copied;
 		count -= copied;
 next:
-		zram_slot_unlock(zram, index);
+		slot_unlock(zram, index);
 		*ppos += 1;
 	}
 
@@ -1976,7 +1977,7 @@ static void zram_meta_free(struct zram *zram, u64 disksize)
 
 	/* Free all pages that are still in this zram device */
 	for (index = 0; index < num_pages; index++)
-		zram_slot_free(zram, index);
+		slot_free(zram, index);
 
 	zs_destroy_pool(zram->mem_pool);
 	vfree(zram->table);
@@ -2003,12 +2004,12 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
 		huge_class_size = zs_huge_class_size(zram->mem_pool);
 
 	for (index = 0; index < num_pages; index++)
-		zram_slot_lock_init(zram, index);
+		slot_lock_init(zram, index);
 
 	return true;
 }
 
-static void zram_slot_free(struct zram *zram, u32 index)
+static void slot_free(struct zram *zram, u32 index)
 {
 	unsigned long handle;
 
@@ -2016,19 +2017,19 @@ static void zram_slot_free(struct zram *zram, u32 index)
 	zram->table[index].attr.ac_time = 0;
 #endif
 
-	zram_clear_flag(zram, index, ZRAM_IDLE);
-	zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE);
-	zram_clear_flag(zram, index, ZRAM_PP_SLOT);
-	zram_set_priority(zram, index, 0);
+	clear_slot_flag(zram, index, ZRAM_IDLE);
+	clear_slot_flag(zram, index, ZRAM_INCOMPRESSIBLE);
+	clear_slot_flag(zram, index, ZRAM_PP_SLOT);
+	set_slot_comp_priority(zram, index, 0);
 
-	if (zram_test_flag(zram, index, ZRAM_HUGE)) {
-		zram_clear_flag(zram, index, ZRAM_HUGE);
+	if (test_slot_flag(zram, index, ZRAM_HUGE)) {
+		clear_slot_flag(zram, index, ZRAM_HUGE);
 		atomic64_dec(&zram->stats.huge_pages);
 	}
 
-	if (zram_test_flag(zram, index, ZRAM_WB)) {
-		zram_clear_flag(zram, index, ZRAM_WB);
-		zram_release_bdev_block(zram, zram_get_handle(zram, index));
+	if (test_slot_flag(zram, index, ZRAM_WB)) {
+		clear_slot_flag(zram, index, ZRAM_WB);
+		zram_release_bdev_block(zram, get_slot_handle(zram, index));
 		goto out;
 	}
 
@@ -2036,24 +2037,24 @@ static void zram_slot_free(struct zram *zram, u32 index)
 	 * No memory is allocated for same element filled pages.
 	 * Simply clear same page flag.
 	 */
-	if (zram_test_flag(zram, index, ZRAM_SAME)) {
-		zram_clear_flag(zram, index, ZRAM_SAME);
+	if (test_slot_flag(zram, index, ZRAM_SAME)) {
+		clear_slot_flag(zram, index, ZRAM_SAME);
 		atomic64_dec(&zram->stats.same_pages);
 		goto out;
 	}
 
-	handle = zram_get_handle(zram, index);
+	handle = get_slot_handle(zram, index);
 	if (!handle)
 		return;
 
 	zs_free(zram->mem_pool, handle);
 
-	atomic64_sub(zram_get_obj_size(zram, index),
+	atomic64_sub(get_slot_size(zram, index),
 		     &zram->stats.compr_data_size);
 out:
 	atomic64_dec(&zram->stats.pages_stored);
-	zram_set_handle(zram, index, 0);
-	zram_set_obj_size(zram, index, 0);
+	set_slot_handle(zram, index, 0);
+	set_slot_size(zram, index, 0);
 }
 
 static int read_same_filled_page(struct zram *zram, struct page *page,
@@ -2062,7 +2063,7 @@ static int read_same_filled_page(struct zram *zram, struct page *page,
 	void *mem;
 
 	mem = kmap_local_page(page);
-	zram_fill_page(mem, PAGE_SIZE, zram_get_handle(zram, index));
+	zram_fill_page(mem, PAGE_SIZE, get_slot_handle(zram, index));
 	kunmap_local(mem);
 	return 0;
 }
@@ -2073,7 +2074,7 @@ static int read_incompressible_page(struct zram *zram, struct page *page,
 	unsigned long handle;
 	void *src, *dst;
 
-	handle = zram_get_handle(zram, index);
+	handle = get_slot_handle(zram, index);
 	src = zs_obj_read_begin(zram->mem_pool, handle, NULL);
 	dst = kmap_local_page(page);
 	copy_page(dst, src);
@@ -2091,9 +2092,9 @@ static int read_compressed_page(struct zram *zram, struct page *page, u32 index)
 	void *src, *dst;
 	int ret, prio;
 
-	handle = zram_get_handle(zram, index);
-	size = zram_get_obj_size(zram, index);
-	prio = zram_get_priority(zram, index);
+	handle = get_slot_handle(zram, index);
+	size = get_slot_size(zram, index);
+	prio = get_slot_comp_priority(zram, index);
 
 	zstrm = zcomp_stream_get(zram->comps[prio]);
 	src = zs_obj_read_begin(zram->mem_pool, handle, zstrm->local_copy);
@@ -2114,8 +2115,8 @@ static int read_from_zspool_raw(struct zram *zram, struct page *page, u32 index)
 	unsigned int size;
 	void *src;
 
-	handle = zram_get_handle(zram, index);
-	size = zram_get_obj_size(zram, index);
+	handle = get_slot_handle(zram, index);
+	size = get_slot_size(zram, index);
 
 	/*
 	 * We need to get stream just for ->local_copy buffer, in
@@ -2138,11 +2139,11 @@ static int read_from_zspool_raw(struct zram *zram, struct page *page, u32 index)
  */
 static int read_from_zspool(struct zram *zram, struct page *page, u32 index)
 {
-	if (zram_test_flag(zram, index, ZRAM_SAME) ||
-	    !zram_get_handle(zram, index))
+	if (test_slot_flag(zram, index, ZRAM_SAME) ||
+	    !get_slot_handle(zram, index))
 		return read_same_filled_page(zram, page, index);
 
-	if (!zram_test_flag(zram, index, ZRAM_HUGE))
+	if (!test_slot_flag(zram, index, ZRAM_HUGE))
 		return read_compressed_page(zram, page, index);
 	else
 		return read_incompressible_page(zram, page, index);
@@ -2153,19 +2154,19 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index,
 {
 	int ret;
 
-	zram_slot_lock(zram, index);
-	if (!zram_test_flag(zram, index, ZRAM_WB)) {
+	slot_lock(zram, index);
+	if (!test_slot_flag(zram, index, ZRAM_WB)) {
 		/* Slot should be locked through out the function call */
 		ret = read_from_zspool(zram, page, index);
-		zram_slot_unlock(zram, index);
+		slot_unlock(zram, index);
 	} else {
-		unsigned long blk_idx = zram_get_handle(zram, index);
+		unsigned long blk_idx = get_slot_handle(zram, index);
 
 		/*
 		 * The slot should be unlocked before reading from the backing
 		 * device.
 		 */
-		zram_slot_unlock(zram, index);
+		slot_unlock(zram, index);
 		ret = read_from_bdev(zram, page, index, blk_idx, parent);
 	}
 
@@ -2206,11 +2207,11 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
 static int write_same_filled_page(struct zram *zram, unsigned long fill,
 				  u32 index)
 {
-	zram_slot_lock(zram, index);
-	zram_slot_free(zram, index);
-	zram_set_flag(zram, index, ZRAM_SAME);
-	zram_set_handle(zram, index, fill);
-	zram_slot_unlock(zram, index);
+	slot_lock(zram, index);
+	slot_free(zram, index);
+	set_slot_flag(zram, index, ZRAM_SAME);
+	set_slot_handle(zram, index, fill);
+	slot_unlock(zram, index);
 
 	atomic64_inc(&zram->stats.same_pages);
 	atomic64_inc(&zram->stats.pages_stored);
@@ -2244,12 +2245,12 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
 	zs_obj_write(zram->mem_pool, handle, src, PAGE_SIZE);
 	kunmap_local(src);
 
-	zram_slot_lock(zram, index);
-	zram_slot_free(zram, index);
-	zram_set_flag(zram, index, ZRAM_HUGE);
-	zram_set_handle(zram, index, handle);
-	zram_set_obj_size(zram, index, PAGE_SIZE);
-	zram_slot_unlock(zram, index);
+	slot_lock(zram, index);
+	slot_free(zram, index);
+	set_slot_flag(zram, index, ZRAM_HUGE);
+	set_slot_handle(zram, index, handle);
+	set_slot_size(zram, index, PAGE_SIZE);
+	slot_unlock(zram, index);
 
 	atomic64_add(PAGE_SIZE, &zram->stats.compr_data_size);
 	atomic64_inc(&zram->stats.huge_pages);
@@ -2309,11 +2310,11 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
 	zs_obj_write(zram->mem_pool, handle, zstrm->buffer, comp_len);
 	zcomp_stream_put(zstrm);
 
-	zram_slot_lock(zram, index);
-	zram_slot_free(zram, index);
-	zram_set_handle(zram, index, handle);
-	zram_set_obj_size(zram, index, comp_len);
-	zram_slot_unlock(zram, index);
+	slot_lock(zram, index);
+	slot_free(zram, index);
+	set_slot_handle(zram, index, handle);
+	set_slot_size(zram, index, comp_len);
+	slot_unlock(zram, index);
 
 	/* Update stats */
 	atomic64_inc(&zram->stats.pages_stored);
@@ -2364,30 +2365,30 @@ static int scan_slots_for_recompress(struct zram *zram, u32 mode, u32 prio_max,
 	for (index = 0; index < nr_pages; index++) {
 		bool ok = true;
 
-		zram_slot_lock(zram, index);
-		if (!zram_allocated(zram, index))
+		slot_lock(zram, index);
+		if (!slot_allocated(zram, index))
 			goto next;
 
 		if (mode & RECOMPRESS_IDLE &&
-		    !zram_test_flag(zram, index, ZRAM_IDLE))
+		    !test_slot_flag(zram, index, ZRAM_IDLE))
 			goto next;
 
 		if (mode & RECOMPRESS_HUGE &&
-		    !zram_test_flag(zram, index, ZRAM_HUGE))
+		    !test_slot_flag(zram, index, ZRAM_HUGE))
 			goto next;
 
-		if (zram_test_flag(zram, index, ZRAM_WB) ||
-		    zram_test_flag(zram, index, ZRAM_SAME) ||
-		    zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
+		if (test_slot_flag(zram, index, ZRAM_WB) ||
+		    test_slot_flag(zram, index, ZRAM_SAME) ||
+		    test_slot_flag(zram, index, ZRAM_INCOMPRESSIBLE))
 			goto next;
 
 		/* Already compressed with same of higher priority */
-		if (zram_get_priority(zram, index) + 1 >= prio_max)
+		if (get_slot_comp_priority(zram, index) + 1 >= prio_max)
 			goto next;
 
 		ok = place_pp_slot(zram, ctl, index);
 next:
-		zram_slot_unlock(zram, index);
+		slot_unlock(zram, index);
 		if (!ok)
 			break;
 	}
@@ -2416,11 +2417,11 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
 	void *src;
 	int ret = 0;
 
-	handle_old = zram_get_handle(zram, index);
+	handle_old = get_slot_handle(zram, index);
 	if (!handle_old)
 		return -EINVAL;
 
-	comp_len_old = zram_get_obj_size(zram, index);
+	comp_len_old = get_slot_size(zram, index);
 	/*
 	 * Do not recompress objects that are already "small enough".
 	 */
@@ -2436,11 +2437,11 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
 	 * we don't preserve IDLE flag and don't incorrectly pick this entry
 	 * for different post-processing type (e.g. writeback).
 	 */
-	zram_clear_flag(zram, index, ZRAM_IDLE);
+	clear_slot_flag(zram, index, ZRAM_IDLE);
 
 	class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old);
 
-	prio = max(prio, zram_get_priority(zram, index) + 1);
+	prio = max(prio, get_slot_comp_priority(zram, index) + 1);
 	/*
 	 * Recompression slots scan should not select slots that are
 	 * already compressed with a higher priority algorithm, but
@@ -2507,7 +2508,7 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
 		 */
 		if (prio < zram->num_active_comps)
 			return 0;
-		zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE);
+		set_slot_flag(zram, index, ZRAM_INCOMPRESSIBLE);
 		return 0;
 	}
 
@@ -2532,10 +2533,10 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
 	zs_obj_write(zram->mem_pool, handle_new, zstrm->buffer, comp_len_new);
 	zcomp_stream_put(zstrm);
 
-	zram_slot_free(zram, index);
-	zram_set_handle(zram, index, handle_new);
-	zram_set_obj_size(zram, index, comp_len_new);
-	zram_set_priority(zram, index, prio);
+	slot_free(zram, index);
+	set_slot_handle(zram, index, handle_new);
+	set_slot_size(zram, index, comp_len_new);
+	set_slot_comp_priority(zram, index, prio);
 
 	atomic64_add(comp_len_new, &zram->stats.compr_data_size);
 	atomic64_inc(&zram->stats.pages_stored);
@@ -2675,15 +2676,15 @@ static ssize_t recompress_store(struct device *dev,
 		if (!num_recomp_pages)
 			break;
 
-		zram_slot_lock(zram, pps->index);
-		if (!zram_test_flag(zram, pps->index, ZRAM_PP_SLOT))
+		slot_lock(zram, pps->index);
+		if (!test_slot_flag(zram, pps->index, ZRAM_PP_SLOT))
 			goto next;
 
 		err = recompress_slot(zram, pps->index, page,
 				      &num_recomp_pages, threshold,
 				      prio, prio_max);
 next:
-		zram_slot_unlock(zram, pps->index);
+		slot_unlock(zram, pps->index);
 		release_pp_slot(zram, pps);
 
 		if (err) {
@@ -2729,9 +2730,9 @@ static void zram_bio_discard(struct zram *zram, struct bio *bio)
 	}
 
 	while (n >= PAGE_SIZE) {
-		zram_slot_lock(zram, index);
-		zram_slot_free(zram, index);
-		zram_slot_unlock(zram, index);
+		slot_lock(zram, index);
+		slot_free(zram, index);
+		slot_unlock(zram, index);
 		atomic64_inc(&zram->stats.notify_free);
 		index++;
 		n -= PAGE_SIZE;
@@ -2760,9 +2761,9 @@ static void zram_bio_read(struct zram *zram, struct bio *bio)
 		}
 		flush_dcache_page(bv.bv_page);
 
-		zram_slot_lock(zram, index);
-		zram_accessed(zram, index);
-		zram_slot_unlock(zram, index);
+		slot_lock(zram, index);
+		mark_slot_accessed(zram, index);
+		slot_unlock(zram, index);
 
 		bio_advance_iter_single(bio, &iter, bv.bv_len);
 	} while (iter.bi_size);
@@ -2790,9 +2791,9 @@ static void zram_bio_write(struct zram *zram, struct bio *bio)
 			break;
 		}
 
-		zram_slot_lock(zram, index);
-		zram_accessed(zram, index);
-		zram_slot_unlock(zram, index);
+		slot_lock(zram, index);
+		mark_slot_accessed(zram, index);
+		slot_unlock(zram, index);
 
 		bio_advance_iter_single(bio, &iter, bv.bv_len);
 	} while (iter.bi_size);
@@ -2833,13 +2834,13 @@ static void zram_slot_free_notify(struct block_device *bdev,
 	zram = bdev->bd_disk->private_data;
 
 	atomic64_inc(&zram->stats.notify_free);
-	if (!zram_slot_trylock(zram, index)) {
+	if (!slot_trylock(zram, index)) {
 		atomic64_inc(&zram->stats.miss_free);
 		return;
 	}
 
-	zram_slot_free(zram, index);
-	zram_slot_unlock(zram, index);
+	slot_free(zram, index);
+	slot_unlock(zram, index);
 }
 
 static void zram_comp_params_reset(struct zram *zram)
-- 
2.52.0.239.gd5f0c6e74e-goog



^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 3/3] zram: trivial fix of recompress_slot() coding styles
  2025-12-15  5:47 [PATCH 1/3] zram: use u32 for entry ac_time tracking Sergey Senozhatsky
  2025-12-15  5:47 ` [PATCH 2/3] zram: rename internal slot API Sergey Senozhatsky
@ 2025-12-15  5:47 ` Sergey Senozhatsky
  2025-12-15 22:31 ` [PATCH 1/3] zram: use u32 for entry ac_time tracking Brian Geffon
  2 siblings, 0 replies; 6+ messages in thread
From: Sergey Senozhatsky @ 2025-12-15  5:47 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Minchan Kim, Brian Geffon, David Stevens, linux-kernel, linux-mm,
	linux-block, Sergey Senozhatsky

A minor fixup of 80-cols breakage in recompress_slot()
comment and zs_malloc() call.

Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
---
 drivers/block/zram/zram_drv.c | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index f00f3d22d5e3..634848f45e9b 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -2517,14 +2517,15 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
 	 * avoid direct reclaim.  Allocation error is not fatal since
 	 * we still have the old object in the mem_pool.
 	 *
-	 * XXX: technically, the node we really want here is the node that holds
-	 * the original compressed data. But that would require us to modify
-	 * zsmalloc API to return this information. For now, we will make do with
-	 * the node of the page allocated for recompression.
+	 * XXX: technically, the node we really want here is the node that
+	 * holds the original compressed data. But that would require us to
+	 * modify zsmalloc API to return this information. For now, we will
+	 * make do with the node of the page allocated for recompression.
 	 */
 	handle_new = zs_malloc(zram->mem_pool, comp_len_new,
 			       GFP_NOIO | __GFP_NOWARN |
-			       __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
+			       __GFP_HIGHMEM | __GFP_MOVABLE,
+			       page_to_nid(page));
 	if (IS_ERR_VALUE(handle_new)) {
 		zcomp_stream_put(zstrm);
 		return PTR_ERR((void *)handle_new);
-- 
2.52.0.239.gd5f0c6e74e-goog



^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/3] zram: use u32 for entry ac_time tracking
  2025-12-15  5:47 [PATCH 1/3] zram: use u32 for entry ac_time tracking Sergey Senozhatsky
  2025-12-15  5:47 ` [PATCH 2/3] zram: rename internal slot API Sergey Senozhatsky
  2025-12-15  5:47 ` [PATCH 3/3] zram: trivial fix of recompress_slot() coding styles Sergey Senozhatsky
@ 2025-12-15 22:31 ` Brian Geffon
  2025-12-16  0:59   ` Sergey Senozhatsky
  2 siblings, 1 reply; 6+ messages in thread
From: Brian Geffon @ 2025-12-15 22:31 UTC (permalink / raw)
  To: Sergey Senozhatsky
  Cc: Andrew Morton, Minchan Kim, David Stevens, linux-kernel,
	linux-mm, linux-block

On Mon, Dec 15, 2025 at 12:47 AM Sergey Senozhatsky
<senozhatsky@chromium.org> wrote:
>
> We can reduce sizeof(zram_table_entry) on 64-bit systems
> by converting flags and ac_time to u32.  Entry flags fit
> into u32, and for ac_time u32 gives us over a century of
> entry lifespan (approx 136 years) which is plenty (zram
> uses system boot time (seconds)).

Makes sense.

>
> In struct zram_table_entry we use bytes aliasing, because
> bit-wait API (for slot lock) requires a whole unsigned
> long word.
>
> Suggested-by: David Stevens <stevensd@google.com>
> Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>

Reviewed-by: Brian Geffon <bgeffon@google.com>

> ---
>  drivers/block/zram/zram_drv.c | 60 +++++++++++++++++------------------
>  drivers/block/zram/zram_drv.h |  9 ++++--
>  2 files changed, 37 insertions(+), 32 deletions(-)
>
> diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> index 67a9e7c005c3..65f99ff3e2e5 100644
> --- a/drivers/block/zram/zram_drv.c
> +++ b/drivers/block/zram/zram_drv.c
> @@ -81,7 +81,7 @@ static void zram_slot_lock_init(struct zram *zram, u32 index)
>   */
>  static __must_check bool zram_slot_trylock(struct zram *zram, u32 index)
>  {
> -       unsigned long *lock = &zram->table[index].flags;
> +       unsigned long *lock = &zram->table[index].__lock;
>
>         if (!test_and_set_bit_lock(ZRAM_ENTRY_LOCK, lock)) {
>                 mutex_acquire(slot_dep_map(zram, index), 0, 1, _RET_IP_);
> @@ -94,7 +94,7 @@ static __must_check bool zram_slot_trylock(struct zram *zram, u32 index)
>
>  static void zram_slot_lock(struct zram *zram, u32 index)
>  {
> -       unsigned long *lock = &zram->table[index].flags;
> +       unsigned long *lock = &zram->table[index].__lock;
>
>         mutex_acquire(slot_dep_map(zram, index), 0, 0, _RET_IP_);
>         wait_on_bit_lock(lock, ZRAM_ENTRY_LOCK, TASK_UNINTERRUPTIBLE);
> @@ -103,7 +103,7 @@ static void zram_slot_lock(struct zram *zram, u32 index)
>
>  static void zram_slot_unlock(struct zram *zram, u32 index)
>  {
> -       unsigned long *lock = &zram->table[index].flags;
> +       unsigned long *lock = &zram->table[index].__lock;
>
>         mutex_release(slot_dep_map(zram, index), _RET_IP_);
>         clear_and_wake_up_bit(ZRAM_ENTRY_LOCK, lock);
> @@ -130,34 +130,33 @@ static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
>  }
>
>  static bool zram_test_flag(struct zram *zram, u32 index,
> -                       enum zram_pageflags flag)
> +                          enum zram_pageflags flag)
>  {
> -       return zram->table[index].flags & BIT(flag);
> +       return zram->table[index].attr.flags & BIT(flag);
>  }
>
>  static void zram_set_flag(struct zram *zram, u32 index,
> -                       enum zram_pageflags flag)
> +                         enum zram_pageflags flag)
>  {
> -       zram->table[index].flags |= BIT(flag);
> +       zram->table[index].attr.flags |= BIT(flag);
>  }
>
>  static void zram_clear_flag(struct zram *zram, u32 index,
> -                       enum zram_pageflags flag)
> +                           enum zram_pageflags flag)
>  {
> -       zram->table[index].flags &= ~BIT(flag);
> +       zram->table[index].attr.flags &= ~BIT(flag);
>  }
>
>  static size_t zram_get_obj_size(struct zram *zram, u32 index)
>  {
> -       return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
> +       return zram->table[index].attr.flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
>  }
>
> -static void zram_set_obj_size(struct zram *zram,
> -                                       u32 index, size_t size)
> +static void zram_set_obj_size(struct zram *zram, u32 index, size_t size)
>  {
> -       unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT;
> +       unsigned long flags = zram->table[index].attr.flags >> ZRAM_FLAG_SHIFT;
>
> -       zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
> +       zram->table[index].attr.flags = (flags << ZRAM_FLAG_SHIFT) | size;
>  }
>
>  static inline bool zram_allocated(struct zram *zram, u32 index)
> @@ -208,14 +207,14 @@ static inline void zram_set_priority(struct zram *zram, u32 index, u32 prio)
>          * Clear previous priority value first, in case if we recompress
>          * further an already recompressed page
>          */
> -       zram->table[index].flags &= ~(ZRAM_COMP_PRIORITY_MASK <<
> -                                     ZRAM_COMP_PRIORITY_BIT1);
> -       zram->table[index].flags |= (prio << ZRAM_COMP_PRIORITY_BIT1);
> +       zram->table[index].attr.flags &= ~(ZRAM_COMP_PRIORITY_MASK <<
> +                                          ZRAM_COMP_PRIORITY_BIT1);
> +       zram->table[index].attr.flags |= (prio << ZRAM_COMP_PRIORITY_BIT1);
>  }
>
>  static inline u32 zram_get_priority(struct zram *zram, u32 index)
>  {
> -       u32 prio = zram->table[index].flags >> ZRAM_COMP_PRIORITY_BIT1;
> +       u32 prio = zram->table[index].attr.flags >> ZRAM_COMP_PRIORITY_BIT1;
>
>         return prio & ZRAM_COMP_PRIORITY_MASK;
>  }
> @@ -225,7 +224,7 @@ static void zram_accessed(struct zram *zram, u32 index)
>         zram_clear_flag(zram, index, ZRAM_IDLE);
>         zram_clear_flag(zram, index, ZRAM_PP_SLOT);
>  #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
> -       zram->table[index].ac_time = ktime_get_boottime();
> +       zram->table[index].attr.ac_time = (u32)ktime_get_boottime_seconds();
>  #endif
>  }
>
> @@ -447,7 +446,7 @@ static void mark_idle(struct zram *zram, ktime_t cutoff)
>
>  #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
>                 is_idle = !cutoff ||
> -                       ktime_after(cutoff, zram->table[index].ac_time);
> +                       ktime_after(cutoff, zram->table[index].attr.ac_time);
>  #endif
>                 if (is_idle)
>                         zram_set_flag(zram, index, ZRAM_IDLE);
> @@ -461,18 +460,19 @@ static ssize_t idle_store(struct device *dev, struct device_attribute *attr,
>                           const char *buf, size_t len)
>  {
>         struct zram *zram = dev_to_zram(dev);
> -       ktime_t cutoff_time = 0;
> +       ktime_t cutoff = 0;
>
>         if (!sysfs_streq(buf, "all")) {
>                 /*
>                  * If it did not parse as 'all' try to treat it as an integer
>                  * when we have memory tracking enabled.
>                  */
> -               u64 age_sec;
> +               u32 age_sec;
>
> -               if (IS_ENABLED(CONFIG_ZRAM_TRACK_ENTRY_ACTIME) && !kstrtoull(buf, 0, &age_sec))
> -                       cutoff_time = ktime_sub(ktime_get_boottime(),
> -                                       ns_to_ktime(age_sec * NSEC_PER_SEC));
> +               if (IS_ENABLED(CONFIG_ZRAM_TRACK_ENTRY_ACTIME) &&
> +                   !kstrtouint(buf, 0, &age_sec))
> +                       cutoff = ktime_sub((u32)ktime_get_boottime_seconds(),
> +                                          age_sec);
>                 else
>                         return -EINVAL;
>         }
> @@ -482,10 +482,10 @@ static ssize_t idle_store(struct device *dev, struct device_attribute *attr,
>                 return -EINVAL;
>
>         /*
> -        * A cutoff_time of 0 marks everything as idle, this is the
> +        * A cutoff of 0 marks everything as idle, this is the
>          * "all" behavior.
>          */
> -       mark_idle(zram, cutoff_time);
> +       mark_idle(zram, cutoff);
>         return len;
>  }
>
> @@ -1588,7 +1588,7 @@ static ssize_t read_block_state(struct file *file, char __user *buf,
>                 if (!zram_allocated(zram, index))
>                         goto next;
>
> -               ts = ktime_to_timespec64(zram->table[index].ac_time);
> +               ts = ktime_to_timespec64(zram->table[index].attr.ac_time);
>                 copied = snprintf(kbuf + written, count,
>                         "%12zd %12lld.%06lu %c%c%c%c%c%c\n",
>                         index, (s64)ts.tv_sec,
> @@ -2013,7 +2013,7 @@ static void zram_slot_free(struct zram *zram, u32 index)
>         unsigned long handle;
>
>  #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
> -       zram->table[index].ac_time = 0;
> +       zram->table[index].attr.ac_time = 0;
>  #endif
>
>         zram_clear_flag(zram, index, ZRAM_IDLE);
> @@ -3286,7 +3286,7 @@ static int __init zram_init(void)
>         struct zram_table_entry zram_te;
>         int ret;
>
> -       BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > sizeof(zram_te.flags) * 8);
> +       BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > sizeof(zram_te.attr.flags) * 8);
>
>         ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
>                                       zcomp_cpu_up_prepare, zcomp_cpu_dead);
> diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
> index 72fdf66c78ab..48d6861c6647 100644
> --- a/drivers/block/zram/zram_drv.h
> +++ b/drivers/block/zram/zram_drv.h
> @@ -65,10 +65,15 @@ enum zram_pageflags {
>   */
>  struct zram_table_entry {
>         unsigned long handle;
> -       unsigned long flags;
> +       union {
> +               unsigned long __lock;
> +               struct attr {
> +                       u32 flags;
>  #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
> -       ktime_t ac_time;
> +                       u32 ac_time;
>  #endif

Why not just always enable CONFIG_ZRAM_TRACK_ENTRY_ACTIME now that it
doesn't consume any additional space? Also, why can't we do this with
a single unsigned long flags as before and have a simple method that
isolates and casts the lower 32bits as a u32?

static u32 *zram_get_actime_for_slot(struct zram *zram, u32 index) {
  return ((u32*)&zram->table[index].flags) + 1;
}

>
> --
> 2.52.0.239.gd5f0c6e74e-goog
>


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/3] zram: use u32 for entry ac_time tracking
  2025-12-15 22:31 ` [PATCH 1/3] zram: use u32 for entry ac_time tracking Brian Geffon
@ 2025-12-16  0:59   ` Sergey Senozhatsky
  2025-12-16  1:17     ` Brian Geffon
  0 siblings, 1 reply; 6+ messages in thread
From: Sergey Senozhatsky @ 2025-12-16  0:59 UTC (permalink / raw)
  To: Brian Geffon
  Cc: Sergey Senozhatsky, Andrew Morton, Minchan Kim, David Stevens,
	linux-kernel, linux-mm, linux-block

On (25/12/15 17:31), Brian Geffon wrote:
[..]
> >  struct zram_table_entry {
> >         unsigned long handle;
> > -       unsigned long flags;
> > +       union {
> > +               unsigned long __lock;
> > +               struct attr {
> > +                       u32 flags;
> >  #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
> > -       ktime_t ac_time;
> > +                       u32 ac_time;
> >  #endif
> 
> Why not just always enable CONFIG_ZRAM_TRACK_ENTRY_ACTIME now that it
> doesn't consume any additional space?

It's "free" only on x64.  On 32bit systems the removal of
ZRAM_TRACK_ENTRY_ACTIME will unconditionally add 4 bytes
per zram_table_entry.

> Also, why can't we do this with a single unsigned long flags
> as before and have a simple method that isolates and casts the
> lower 32bits as a u32?

There are no upper and lower 32 bits in unsigned long on 32bit systems.


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/3] zram: use u32 for entry ac_time tracking
  2025-12-16  0:59   ` Sergey Senozhatsky
@ 2025-12-16  1:17     ` Brian Geffon
  0 siblings, 0 replies; 6+ messages in thread
From: Brian Geffon @ 2025-12-16  1:17 UTC (permalink / raw)
  To: Sergey Senozhatsky
  Cc: Andrew Morton, Minchan Kim, David Stevens, LKML, linux-mm, linux-block

[-- Attachment #1: Type: text/plain, Size: 1055 bytes --]

On Mon, Dec 15, 2025, 19:59 Sergey Senozhatsky <senozhatsky@chromium.org>
wrote:

> On (25/12/15 17:31), Brian Geffon wrote:
> [..]
> > >  struct zram_table_entry {
> > >         unsigned long handle;
> > > -       unsigned long flags;
> > > +       union {
> > > +               unsigned long __lock;
> > > +               struct attr {
> > > +                       u32 flags;
> > >  #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
> > > -       ktime_t ac_time;
> > > +                       u32 ac_time;
> > >  #endif
> >
> > Why not just always enable CONFIG_ZRAM_TRACK_ENTRY_ACTIME now that it
> > doesn't consume any additional space?
>
> It's "free" only on x64.  On 32bit systems the removal of
> ZRAM_TRACK_ENTRY_ACTIME will unconditionally add 4 bytes
> per zram_table_entry.
>

Yes thanks true. Sounds good to me.


> > Also, why can't we do this with a single unsigned long flags
> > as before and have a simple method that isolates and casts the
> > lower 32bits as a u32?
>
> There are no upper and lower 32 bits in unsigned long on 32bit systems.
>

[-- Attachment #2: Type: text/html, Size: 1905 bytes --]

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2025-12-16  1:17 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-12-15  5:47 [PATCH 1/3] zram: use u32 for entry ac_time tracking Sergey Senozhatsky
2025-12-15  5:47 ` [PATCH 2/3] zram: rename internal slot API Sergey Senozhatsky
2025-12-15  5:47 ` [PATCH 3/3] zram: trivial fix of recompress_slot() coding styles Sergey Senozhatsky
2025-12-15 22:31 ` [PATCH 1/3] zram: use u32 for entry ac_time tracking Brian Geffon
2025-12-16  0:59   ` Sergey Senozhatsky
2025-12-16  1:17     ` Brian Geffon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox