Skip to content

Commit

Permalink
block: Introduce queue limits and sysfs for copy-offload support
Browse files Browse the repository at this point in the history
Add device limits as sysfs entries,
	- copy_max_bytes (RW)
	- copy_max_hw_bytes (RO)

Above limits help to split the copy payload in block layer.
copy_max_bytes: maximum total length of copy in single payload.
copy_max_hw_bytes: Reflects the device supported maximum limit.

Signed-off-by: Nitesh Shetty <[email protected]>
Signed-off-by: Kanchan Joshi <[email protected]>
Signed-off-by: Anuj Gupta <[email protected]>
Reviewed-by: Hannes Reinecke <[email protected]>
  • Loading branch information
nj-shetty authored and ixhamza committed Nov 19, 2024
1 parent ed8fc7f commit 3d934f0
Show file tree
Hide file tree
Showing 4 changed files with 113 additions and 2 deletions.
23 changes: 23 additions & 0 deletions Documentation/ABI/stable/sysfs-block
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,29 @@ Description:
last zone of the device which may be smaller.


What: /sys/block/<disk>/queue/copy_max_bytes
Date: May 2024
Contact: [email protected]
Description:
[RW] This is the maximum number of bytes that the block layer
will allow for a copy request. This is always smaller or
equal to the maximum size allowed by the hardware, indicated by
'copy_max_hw_bytes'. An attempt to set a value higher than
'copy_max_hw_bytes' will truncate this to 'copy_max_hw_bytes'.
Writing '0' to this file will disable offloading copies for this
device, instead copy is done via emulation.


What: /sys/block/<disk>/queue/copy_max_hw_bytes
Date: May 2024
Contact: [email protected]
Description:
[RO] This is the maximum number of bytes that the hardware
will allow for single data copy request.
A value of 0 means that the device does not support
copy offload.


What: /sys/block/<disk>/queue/crypto/
Date: February 2022
Contact: [email protected]
Expand Down
34 changes: 32 additions & 2 deletions block/blk-settings.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,9 @@ void blk_set_stacking_limits(struct queue_limits *lim)
lim->max_write_zeroes_sectors = UINT_MAX;
lim->max_zone_append_sectors = UINT_MAX;
lim->max_user_discard_sectors = UINT_MAX;
lim->max_copy_hw_sectors = UINT_MAX;
lim->max_copy_sectors = UINT_MAX;
lim->max_user_copy_sectors = UINT_MAX;
}
EXPORT_SYMBOL(blk_set_stacking_limits);

Expand Down Expand Up @@ -359,6 +362,9 @@ static int blk_validate_limits(struct queue_limits *lim)
if (!(lim->features & BLK_FEAT_WRITE_CACHE))
lim->features &= ~BLK_FEAT_FUA;

lim->max_copy_sectors =
min(lim->max_copy_hw_sectors, lim->max_user_copy_sectors);

blk_validate_atomic_write_limits(lim);

err = blk_validate_integrity_limits(lim);
Expand All @@ -376,10 +382,11 @@ int blk_set_default_limits(struct queue_limits *lim)
{
/*
* Most defaults are set by capping the bounds in blk_validate_limits,
* but max_user_discard_sectors is special and needs an explicit
* initialization to the max value here.
* but max_user_discard_sectors and max_user_copy_sectors are special
* and needs an explicit initialization to the max value here.
*/
lim->max_user_discard_sectors = UINT_MAX;
lim->max_user_copy_sectors = UINT_MAX;
return blk_validate_limits(lim);
}

Expand Down Expand Up @@ -437,6 +444,25 @@ int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
}
EXPORT_SYMBOL_GPL(queue_limits_set);

/*
* blk_queue_max_copy_hw_sectors - set max sectors for a single copy payload
* @q: the request queue for the device
* @max_copy_sectors: maximum number of sectors to copy
*/
void blk_queue_max_copy_hw_sectors(struct request_queue *q,
unsigned int max_copy_sectors)
{
struct queue_limits *lim = &q->limits;

if (max_copy_sectors > (BLK_COPY_MAX_BYTES >> SECTOR_SHIFT))
max_copy_sectors = BLK_COPY_MAX_BYTES >> SECTOR_SHIFT;

lim->max_copy_hw_sectors = max_copy_sectors;
lim->max_copy_sectors =
min(max_copy_sectors, lim->max_user_copy_sectors);
}
EXPORT_SYMBOL_GPL(blk_queue_max_copy_hw_sectors);

static int queue_limit_alignment_offset(const struct queue_limits *lim,
sector_t sector)
{
Expand Down Expand Up @@ -544,6 +570,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->max_segment_size = min_not_zero(t->max_segment_size,
b->max_segment_size);

t->max_copy_sectors = min(t->max_copy_sectors, b->max_copy_sectors);
t->max_copy_hw_sectors = min(t->max_copy_hw_sectors,
b->max_copy_hw_sectors);

alignment = queue_limit_alignment_offset(b, start);

/* Bottom device has different alignment. Check that it is
Expand Down
44 changes: 44 additions & 0 deletions block/blk-sysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,45 @@ static ssize_t queue_max_discard_sectors_store(struct gendisk *disk,
return ret;
}

static ssize_t queue_copy_hw_max_show(struct gendisk *disk, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)
disk->queue->limits.max_copy_hw_sectors << SECTOR_SHIFT);
}

static ssize_t queue_copy_max_show(struct gendisk *disk, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)
disk->queue->limits.max_copy_sectors << SECTOR_SHIFT);
}

static ssize_t queue_copy_max_store(struct gendisk *disk, const char *page,
size_t count)
{
unsigned long max_copy_bytes;
struct queue_limits lim;
ssize_t ret;
int err;
struct request_queue *q = disk->queue;

ret = queue_var_store(&max_copy_bytes, page, count);
if (ret < 0)
return ret;

if (max_copy_bytes & (queue_logical_block_size(q) - 1))
return -EINVAL;

blk_mq_freeze_queue(q);
lim = queue_limits_start_update(q);
lim.max_user_copy_sectors = max_copy_bytes >> SECTOR_SHIFT;
err = queue_limits_commit_update(q, &lim);
blk_mq_unfreeze_queue(q);

if (err)
return err;
return count;
}

/*
* For zone append queue_max_zone_append_sectors does not just return the
* underlying queue limits, but actually contains a calculation. Because of
Expand Down Expand Up @@ -459,6 +498,9 @@ QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");

QUEUE_RO_ENTRY(queue_copy_hw_max, "copy_max_hw_bytes");
QUEUE_RW_ENTRY(queue_copy_max, "copy_max_bytes");

QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
QUEUE_RW_ENTRY(queue_poll, "io_poll");
Expand Down Expand Up @@ -572,6 +614,8 @@ static struct attribute *queue_attrs[] = {
&queue_max_discard_sectors_entry.attr,
&queue_max_hw_discard_sectors_entry.attr,
&queue_discard_zeroes_data_entry.attr,
&queue_copy_hw_max_entry.attr,
&queue_copy_max_entry.attr,
&queue_atomic_write_max_sectors_entry.attr,
&queue_atomic_write_boundary_sectors_entry.attr,
&queue_atomic_write_unit_min_entry.attr,
Expand Down
14 changes: 14 additions & 0 deletions include/linux/blkdev.h
Original file line number Diff line number Diff line change
Expand Up @@ -376,6 +376,10 @@ struct queue_limits {
unsigned int discard_alignment;
unsigned int zone_write_granularity;

unsigned int max_copy_hw_sectors;
unsigned int max_copy_sectors;
unsigned int max_user_copy_sectors;

/* atomic write limits */
unsigned int atomic_write_hw_max;
unsigned int atomic_write_max_sectors;
Expand Down Expand Up @@ -968,6 +972,8 @@ static inline void blk_queue_disable_write_zeroes(struct request_queue *q)
/*
* Access functions for manipulating queue properties
*/
extern void blk_queue_max_copy_hw_sectors(struct request_queue *q,
unsigned int max_copy_sectors);
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
Expand Down Expand Up @@ -1287,6 +1293,14 @@ static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
return bdev_get_queue(bdev)->limits.discard_granularity;
}

/* maximum copy offload length, this is set to 128MB based on current testing */
#define BLK_COPY_MAX_BYTES (1 << 27)

static inline unsigned int bdev_max_copy_sectors(struct block_device *bdev)
{
return bdev_get_queue(bdev)->limits.max_copy_sectors;
}

static inline unsigned int
bdev_max_secure_erase_sectors(struct block_device *bdev)
{
Expand Down

0 comments on commit 3d934f0

Please sign in to comment.