From e149682f681e09cc639525b4b60619f613ba7225 Mon Sep 17 00:00:00 2001 From: Robert Baldyga Date: Tue, 10 Mar 2026 15:13:20 +0100 Subject: [PATCH 1/4] Remove ocf_cfg.h Metadata memory footprint reduction patches limited those values maximums to default, so there is no reason for them to be configurable anymore. Signed-off-by: Robert Baldyga --- inc/ocf_cfg.h | 36 ------------------------------------ inc/ocf_def.h | 12 +++++++++--- 2 files changed, 9 insertions(+), 39 deletions(-) delete mode 100644 inc/ocf_cfg.h diff --git a/inc/ocf_cfg.h b/inc/ocf_cfg.h deleted file mode 100644 index 2cf73e554..000000000 --- a/inc/ocf_cfg.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright(c) 2012-2021 Intel Corporation - * SPDX-License-Identifier: BSD-3-Clause - */ - - -#ifndef __OCF_CFG_H__ -#define __OCF_CFG_H__ - -/** - * @file - * @brief OCF configuration file - */ - -/** - * Configure maximum numbers of cores in cache instance - */ -#ifndef OCF_CONFIG_MAX_CORES -#define OCF_CONFIG_MAX_CORES 4096 -#endif - -/** Maximum number of IO classes that can be configured */ -#ifndef OCF_CONFIG_MAX_IO_CLASSES -#define OCF_CONFIG_MAX_IO_CLASSES 33 -#endif - -#if OCF_CONFIG_MAX_IO_CLASSES > 256 -#error "Limit of maximum number of IO classes exceeded" -#endif - -/** Enabling debug statistics */ -#ifndef OCF_CONFIG_DEBUG_STATS -#define OCF_CONFIG_DEBUG_STATS 0 -#endif - -#endif /* __OCF_CFG_H__ */ diff --git a/inc/ocf_def.h b/inc/ocf_def.h index cbc5c5b1d..9db47014c 100644 --- a/inc/ocf_def.h +++ b/inc/ocf_def.h @@ -10,7 +10,6 @@ #define __OCF_DEF_H__ #include "ocf_env.h" -#include "ocf_cfg.h" #define OCF_BITWIDTH(T) (sizeof(T) * __CHAR_BIT__) @@ -22,6 +21,13 @@ * @brief OCF definitions */ +/** + * Enabling debug statistics + */ +#ifndef OCF_CONFIG_DEBUG_STATS +#define OCF_CONFIG_DEBUG_STATS 0 +#endif + /** * @name OCF cache definitions */ @@ -87,7 +93,7 @@ * Must be smaller than (1 << OCF_CORE_ID_BITS) to leave space * for invalid OCF_CORE_ID_INVALID. */ -#define OCF_CORE_NUM OCF_CONFIG_MAX_CORES +#define OCF_CORE_NUM 4096 _Static_assert(OCF_CORE_NUM < OCF_MAX_T(uint32_t, OCF_CORE_ID_BITS)); /** * Minimum value of a valid core ID @@ -340,7 +346,7 @@ typedef enum { /** * Maximum numbers of IO classes per cache instance */ -#define OCF_USER_IO_CLASS_MAX OCF_CONFIG_MAX_IO_CLASSES +#define OCF_USER_IO_CLASS_MAX 33 /** * Minimum value of a valid IO class ID */ From 7a524fe53f49dc2f1444834552cf9b170b73e0f8 Mon Sep 17 00:00:00 2001 From: Avi Halaf Date: Tue, 18 Oct 2022 15:47:17 +0300 Subject: [PATCH 2/4] metadata: Make collision updates atomic Allow to accessing collision metadata for read without acquiring hash bucket lock. Signed-off-by: Avi Halaf Signed-off-by: Robert Baldyga --- src/metadata/metadata_collision.c | 49 +++++++++++++++++++++++++------ src/metadata/metadata_collision.h | 19 +++++++----- 2 files changed, 52 insertions(+), 16 deletions(-) diff --git a/src/metadata/metadata_collision.c b/src/metadata/metadata_collision.c index 7ef7ffd8f..dbdfef1ad 100644 --- a/src/metadata/metadata_collision.c +++ b/src/metadata/metadata_collision.c @@ -10,6 +10,37 @@ #include "metadata_internal.h" #include "../utils/utils_cache_line.h" +static inline void ocf_metadata_list_info_set( + struct ocf_metadata_list_info *info, + ocf_cache_line_t next, ocf_cache_line_t prev) +{ + struct ocf_metadata_list_info t = { }; + + t.next_col = next; + t.prev_col = prev; + info->entry = t.entry; +} + +static inline void ocf_metadata_list_info_set_next( + struct ocf_metadata_list_info *info, + ocf_cache_line_t next) +{ + struct ocf_metadata_list_info t = { .entry = info->entry }; + + t.next_col = next; + info->entry = t.entry; +} + +static inline void ocf_metadata_list_info_set_prev( + struct ocf_metadata_list_info *info, + ocf_cache_line_t prev) +{ + struct ocf_metadata_list_info t = { .entry = info->entry }; + + t.prev_col = prev; + info->entry = t.entry; +} + void ocf_metadata_set_collision_info(struct ocf_cache *cache, ocf_cache_line_t line, ocf_cache_line_t next, ocf_cache_line_t prev) @@ -21,12 +52,10 @@ void ocf_metadata_set_collision_info(struct ocf_cache *cache, info = ocf_metadata_raw_wr_access(cache, &(ctrl->raw_desc[metadata_segment_list_info]), line); - if (info) { - info->next_col = next; - info->prev_col = prev; - } else { + if (info) + ocf_metadata_list_info_set(info, next, prev); + else ocf_metadata_error(cache); - } } void ocf_metadata_set_collision_next(struct ocf_cache *cache, @@ -40,7 +69,7 @@ void ocf_metadata_set_collision_next(struct ocf_cache *cache, &(ctrl->raw_desc[metadata_segment_list_info]), line); if (info) - info->next_col = next; + ocf_metadata_list_info_set_next(info, next); else ocf_metadata_error(cache); } @@ -56,7 +85,7 @@ void ocf_metadata_set_collision_prev(struct ocf_cache *cache, &(ctrl->raw_desc[metadata_segment_list_info]), line); if (info) - info->prev_col = prev; + ocf_metadata_list_info_set_prev(info, prev); else ocf_metadata_error(cache); } @@ -74,10 +103,12 @@ void ocf_metadata_get_collision_info(struct ocf_cache *cache, info = ocf_metadata_raw_rd_access(cache, &(ctrl->raw_desc[metadata_segment_list_info]), line); if (info) { + /* Atomic read of the entry (aarch64/amd64) */ + struct ocf_metadata_list_info t = { .entry = info->entry }; if (next) - *next = info->next_col; + *next = t.next_col; if (prev) - *prev = info->prev_col; + *prev = t.prev_col; } else { ocf_metadata_error(cache); diff --git a/src/metadata/metadata_collision.h b/src/metadata/metadata_collision.h index 01d7a7ed2..0ba5028e3 100644 --- a/src/metadata/metadata_collision.h +++ b/src/metadata/metadata_collision.h @@ -15,13 +15,18 @@ */ struct ocf_metadata_list_info { - /* Previous cache line in collision list */ - ocf_cache_line_t prev_col : OCF_CACHE_LINE_BITS; - ocf_cache_line_t unused : 3; - /* Next cache line in collision list*/ - ocf_cache_line_t next_col : OCF_CACHE_LINE_BITS; - ocf_cache_line_t unused2 : 3; -} __attribute__((packed)); + union { + struct { + /* Previous cache line in collision list */ + ocf_cache_line_t prev_col : OCF_CACHE_LINE_BITS; + ocf_cache_line_t unused : 3; + /* Next cache line in collision list*/ + ocf_cache_line_t next_col : OCF_CACHE_LINE_BITS; + ocf_cache_line_t unused2 : 3; + } __attribute__((packed)); + uint64_t entry; + }; +}; /* Keep the struct ocf_metadata_list_info size of 8 bytes */ _Static_assert(sizeof(struct ocf_metadata_list_info) == sizeof(uint64_t)); From 9c8dea1590ca604035901c1bde31e61c11d0f0ce Mon Sep 17 00:00:00 2001 From: Robert Baldyga Date: Sat, 21 Mar 2026 22:11:28 +0100 Subject: [PATCH 3/4] metadata: Add ocf_metadata_is_hit_no_lock() Introduce a function for opportunistic lockless core line hit check. Signed-off-by: Robert Baldyga --- src/metadata/metadata.c | 25 +++++++++++++++++++++++++ src/metadata/metadata.h | 3 +++ 2 files changed, 28 insertions(+) diff --git a/src/metadata/metadata.c b/src/metadata/metadata.c index 8056e7817..ca9cdd299 100644 --- a/src/metadata/metadata.c +++ b/src/metadata/metadata.c @@ -1976,4 +1976,29 @@ void ocf_metadata_probe(ocf_ctx_t ctx, ocf_volume_t volume, OCF_CMPL_RET(priv, result, NULL); } +/* + * Fast lookup to see if the line is a hit without bash bucket lock. + */ +bool ocf_metadata_is_hit_no_lock(ocf_cache_t cache, ocf_core_id_t core_id, + uint64_t core_line) +{ + ocf_cache_line_t hash = ocf_metadata_hash_func(cache, + core_line, core_id); + ocf_cache_line_t line = ocf_metadata_get_hash(cache, hash); + + while (line != cache->device->collision_table_entries) { + ocf_core_id_t curr_core_id; + uint64_t curr_core_line; + + ocf_metadata_get_core_info(cache, line, + &curr_core_id, &curr_core_line); + + if (curr_core_line == core_line && core_id == curr_core_id) + return true; + + line = ocf_metadata_get_collision_next(cache, line); + } + + return false; +} diff --git a/src/metadata/metadata.h b/src/metadata/metadata.h index d048e7177..12a290a61 100644 --- a/src/metadata/metadata.h +++ b/src/metadata/metadata.h @@ -248,4 +248,7 @@ void ocf_metadata_zero_superblock(ocf_cache_t cache, */ bool ocf_metadata_check(struct ocf_cache *cache, ocf_cache_line_t line); +bool ocf_metadata_is_hit_no_lock(ocf_cache_t cache, ocf_core_id_t core_id, + uint64_t core_line); + #endif /* METADATA_H_ */ From 2f975f8c66f7d8dc761aec3d269c0af9c9c920ce Mon Sep 17 00:00:00 2001 From: Robert Baldyga Date: Thu, 26 Feb 2026 22:57:06 +0100 Subject: [PATCH 4/4] prefetch: Introduce prefetch subsystem - Implement prefetch policy management - Implement prefetch engine - Implement basic reference prefetch policy - readahead NOTE: This is an example policy, not meant for production use - Introduce dedicated prefetch io class - Introduce new prefetch statistics - Update tests Signed-off-by: Michael Lyulko Signed-off-by: Shai Fultheim Signed-off-by: Ido Ben Tsion Signed-off-by: Robert Baldyga --- inc/ocf_cache.h | 3 + inc/ocf_def.h | 2 +- inc/ocf_err.h | 3 + inc/ocf_mngt.h | 26 +++ inc/ocf_prefetch.h | 28 ++++ inc/ocf_stats.h | 12 +- src/engine/engine_common.c | 4 +- src/engine/engine_common.h | 7 + src/engine/engine_io.c | 7 +- src/engine/engine_prefetch.c | 164 +++++++++++++++++++ src/engine/engine_prefetch.h | 15 ++ src/metadata/metadata_partition.h | 1 + src/metadata/metadata_superblock.h | 3 + src/mngt/ocf_mngt_cache.c | 49 +++++- src/mngt/ocf_mngt_io_class.c | 7 +- src/ocf_cache.c | 1 + src/ocf_core.c | 12 +- src/ocf_io.c | 3 + src/ocf_request.c | 2 + src/ocf_request.h | 5 + src/ocf_stats.c | 35 +++- src/ocf_stats_builder.c | 90 +++++++++- src/ocf_stats_priv.h | 20 ++- src/prefetch/ocf_prefetch.c | 131 +++++++++++++++ src/prefetch/ocf_prefetch_priv.h | 34 ++++ src/prefetch/ocf_prefetch_readahead.c | 26 +++ src/prefetch/ocf_prefetch_readahead.h | 16 ++ src/utils/utils_cleaner.c | 2 +- tests/functional/pyocf/types/cache.py | 43 +++++ tests/functional/pyocf/types/ioclass.py | 3 +- tests/functional/pyocf/types/shared.py | 1 + tests/functional/pyocf/types/stats/cache.py | 1 + tests/functional/pyocf/types/stats/shared.py | 5 + 33 files changed, 734 insertions(+), 27 deletions(-) create mode 100644 inc/ocf_prefetch.h create mode 100644 src/engine/engine_prefetch.c create mode 100644 src/engine/engine_prefetch.h create mode 100644 src/prefetch/ocf_prefetch.c create mode 100644 src/prefetch/ocf_prefetch_priv.h create mode 100644 src/prefetch/ocf_prefetch_readahead.c create mode 100644 src/prefetch/ocf_prefetch_readahead.h diff --git a/inc/ocf_cache.h b/inc/ocf_cache.h index 683ee7793..a38a61fa6 100644 --- a/inc/ocf_cache.h +++ b/inc/ocf_cache.h @@ -80,6 +80,9 @@ struct ocf_cache_info { ocf_promotion_t promotion_policy; /*!< Promotion policy selected */ + ocf_pf_mask_t prefetch_mask; + /*!< Prefetchers selected (bitmask) */ + ocf_cache_line_size_t cache_line_size; /*!< Cache line size in KiB */ diff --git a/inc/ocf_def.h b/inc/ocf_def.h index 9db47014c..5c2f956a1 100644 --- a/inc/ocf_def.h +++ b/inc/ocf_def.h @@ -346,7 +346,7 @@ typedef enum { /** * Maximum numbers of IO classes per cache instance */ -#define OCF_USER_IO_CLASS_MAX 33 +#define OCF_USER_IO_CLASS_MAX 34 /** * Minimum value of a valid IO class ID */ diff --git a/inc/ocf_err.h b/inc/ocf_err.h index 654327dd1..eec29e873 100644 --- a/inc/ocf_err.h +++ b/inc/ocf_err.h @@ -28,6 +28,9 @@ typedef enum { /** Operation interrupted */ OCF_ERR_INTR, + /** Busy */ + OCF_ERR_BUSY, + /** Operation not supported */ OCF_ERR_NOT_SUPP, diff --git a/inc/ocf_mngt.h b/inc/ocf_mngt.h index 530a6d6a6..308cac49c 100644 --- a/inc/ocf_mngt.h +++ b/inc/ocf_mngt.h @@ -10,6 +10,7 @@ #include "ocf_cache.h" #include "ocf_core.h" +#include "ocf_prefetch.h" /** * @file @@ -1005,6 +1006,31 @@ int ocf_mngt_cache_promotion_set_param(ocf_cache_t cache, ocf_promotion_t type, int ocf_mngt_cache_promotion_get_param(ocf_cache_t cache, ocf_promotion_t type, uint8_t param_id, uint32_t *param_value); +/** + * @brief Set prefetch policy in given cache + * + * @attention This changes only runtime state. To make changes persistent + * use function ocf_mngt_cache_save(). + * + * @param[in] cache Cache handle + * @param[in] mask Bitmask of prefetch policies to enable + * + * @retval 0 Policy has been set successfully + * @retval Non-zero Error occurred and policy has not been set + */ +int ocf_mngt_cache_prefetch_set_policy(ocf_cache_t cache, ocf_pf_mask_t mask); + +/** + * @brief Get prefetch policy in given cache + * + * @param[in] cache Cache handle + * @param[out] mask Bitmask of enabled prefetch policies + * + * @retval 0 success + * @retval Non-zero Error occurred and policy could not be retrieved + */ +int ocf_mngt_cache_prefetch_get_policy(ocf_cache_t cache, ocf_pf_mask_t *mask); + /** * @brief IO class configuration */ diff --git a/inc/ocf_prefetch.h b/inc/ocf_prefetch.h new file mode 100644 index 000000000..89fea8baf --- /dev/null +++ b/inc/ocf_prefetch.h @@ -0,0 +1,28 @@ +/* + * Copyright(c) 2021-2024 Huawei Technologies Co., Ltd. + * Copyright(c) 2026 Unvertical + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __OCF_PREFETCH_H__ +#define __OCF_PREFETCH_H__ + +#include "ocf_def.h" + +/* + * Prefetch policy id + */ +typedef enum { + ocf_pf_none = -1, + ocf_pf_readahead = 0, + ocf_pf_num, +} ocf_pf_id_t; + +typedef uint8_t ocf_pf_mask_t; + +/* The bitmask must fit all the values of ocf_pf_id_t */ +_Static_assert(OCF_BITWIDTH(ocf_pf_mask_t) >= ocf_pf_num); + +#define OCF_PF_MASK_DEFAULT 0 + +#endif /* __OCF_PREFETCH_H__ */ diff --git a/inc/ocf_stats.h b/inc/ocf_stats.h index 46790290a..a526caf53 100644 --- a/inc/ocf_stats.h +++ b/inc/ocf_stats.h @@ -15,6 +15,8 @@ #ifndef __OCF_STATS_H__ #define __OCF_STATS_H__ +#include "ocf_prefetch.h" + /** * Entire row of statistcs */ @@ -71,7 +73,11 @@ struct ocf_stats_usage { * ║ Pass-Through writes │ 0 │ 0.0 │ Requests ║ * ║ Serviced requests │ 222 │ 100.0 │ Requests ║ * ╟──────────────────────┼───────┼───────┼──────────╢ - * ║ Total requests │ 222 │ 100.0 │ Requests ║ + * ║ Prefetch: readahead │ 2 │ 0.0 │ Requests ║ + * ║ Prefetch total │ 2 │ 0.0 │ Requests ║ + * ╟──────────────────────┼───────┼───────┼──────────╢ + * ║ User requests │ 222 │ 100.0 │ Requests ║ + * ║ Total requests │ 224 │ 100.0 │ Requests ║ * ╚══════════════════════╧═══════╧═══════╧══════════╝ * */ @@ -89,6 +95,8 @@ struct ocf_stats_requests { struct ocf_stat rd_pt; struct ocf_stat wr_pt; struct ocf_stat serviced; + struct ocf_stat prefetch[ocf_pf_num]; + struct ocf_stat user; struct ocf_stat total; }; @@ -127,6 +135,8 @@ struct ocf_stats_blocks { struct ocf_stat pass_through_rd; struct ocf_stat pass_through_wr; struct ocf_stat pass_through_total; + struct ocf_stat prefetch_core_rd[ocf_pf_num]; + struct ocf_stat prefetch_cache_wr[ocf_pf_num]; }; /** diff --git a/src/engine/engine_common.c b/src/engine/engine_common.c index c3f5d4dc9..6868868c1 100644 --- a/src/engine/engine_common.c +++ b/src/engine/engine_common.c @@ -442,7 +442,7 @@ static int lock_clines(struct ocf_request *req) * Caller must assure that request map info is up to date (request * is traversed). */ -static inline void ocf_prepare_clines_miss(struct ocf_request *req) +void ocf_prepare_clines_miss(struct ocf_request *req) { bool part_has_space; @@ -599,7 +599,7 @@ void ocf_engine_update_request_stats(struct ocf_request *req) { ocf_core_stats_request_update(req->core, req->part_id, req->rw, req->info.hit_no, req->core_line_count, - req->is_deferred); + req->io.pf_id, req->is_deferred); } void inc_fallback_pt_error_counter(ocf_cache_t cache) diff --git a/src/engine/engine_common.h b/src/engine/engine_common.h index eeb8ce492..392900a0d 100644 --- a/src/engine/engine_common.h +++ b/src/engine/engine_common.h @@ -223,6 +223,13 @@ struct ocf_engine_callbacks ocf_req_async_lock_cb resume; }; +/** + * @brief Prepare cache lines for miss request + * + * @param req OCF request + */ +void ocf_prepare_clines_miss(struct ocf_request *req); + /** * @brief Map and lock cachelines * diff --git a/src/engine/engine_io.c b/src/engine/engine_io.c index 73433bcb8..dbdabfd96 100644 --- a/src/engine/engine_io.c +++ b/src/engine/engine_io.c @@ -1,5 +1,6 @@ /* * Copyright(c) 2024 Huawei Technologies + * Copyright(c) 2026 Unvertical * SPDX-License-Identifier: BSD-3-Clause */ @@ -131,7 +132,7 @@ void ocf_engine_forward_core_io_req(struct ocf_request *req, ocf_req_end_t callback) { ocf_core_stats_core_block_update(req->core, req->part_id, req->rw, - req->bytes); + req->bytes, req->io.pf_id); ocf_req_forward_core_init(req, callback); @@ -143,7 +144,7 @@ void ocf_engine_forward_core_flush_req(struct ocf_request *req, ocf_req_end_t callback) { ocf_core_stats_core_block_update(req->core, req->part_id, req->rw, - req->bytes); + req->bytes, req->io.pf_id); ocf_req_forward_core_init(req, callback); @@ -154,7 +155,7 @@ void ocf_engine_forward_core_discard_req(struct ocf_request *req, ocf_req_end_t callback) { ocf_core_stats_core_block_update(req->core, req->part_id, req->rw, - req->bytes); + req->bytes, req->io.pf_id); ocf_req_forward_core_init(req, callback); diff --git a/src/engine/engine_prefetch.c b/src/engine/engine_prefetch.c new file mode 100644 index 000000000..ceb4bf34c --- /dev/null +++ b/src/engine/engine_prefetch.c @@ -0,0 +1,164 @@ +/* + * Copyright(c) 2012-2022 Intel Corporation + * Copyright(c) 2022-2024 Huawei Technologies + * Copyright(c) 2026 Unvertical + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include "ocf/ocf.h" +#include "engine_prefetch.h" +#include "engine_inv.h" +#include "engine_bf.h" +#include "engine_common.h" +#include "engine_io.h" +#include "../ocf_cache_priv.h" +#include "../concurrency/ocf_concurrency.h" +#include "../ocf_request.h" +#include "../utils/utils_cache_line.h" +#include "../utils/utils_user_part.h" +#include "../metadata/metadata.h" +#include "../ocf_def_priv.h" + +#define OCF_ENGINE_DEBUG_IO_NAME "prefetch" +#include "engine_debug.h" + +static void _ocf_prefetch_read_complete(struct ocf_request *req, int error) +{ + OCF_DEBUG_RQ(req, "Prefetch read completion"); + + ocf_req_get(req); + + if (error) { + req->complete(req, error); + + ctx_data_free(req->cache->owner, req->data); + + ocf_core_stats_core_error_update(req->core, OCF_READ); + + /* Invalidate metadata */ + ocf_engine_invalidate(req); + + return; + } + + /* Pretend the data is the copy, so that it's used by the backfill */ + req->cp_data = req->data; + req->data = NULL; + + /* Complete request */ + req->complete(req, error); + + ocf_engine_backfill(req); +} + +static int _ocf_prefetch_read_do(struct ocf_request *req) +{ + struct ocf_alock *c = ocf_cache_line_concurrency(req->cache); + + if (unlikely(ocf_engine_is_hit(req))) { + ocf_req_unlock(c, req); + req->complete(req, 0); + return 0; + } + + if (req->info.dirty_any) { + ocf_hb_req_prot_lock_rd(req); + + /* Request is dirty need to clean request */ + ocf_engine_clean(req); + + ocf_hb_req_prot_unlock_rd(req); + return 0; + } + + req->data = ctx_data_alloc(ocf_cache_get_ctx(req->cache), + OCF_DIV_ROUND_UP(req->bytes, PAGE_SIZE)); + if (!req->data) { + ocf_req_unlock(c, req); + req->complete(req, 0); + return 0; + } + + ocf_hb_req_prot_lock_wr(req); + + /* Set valid status bits map */ + ocf_set_valid_map_info(req); + + if (ocf_engine_needs_repart(req)) { + OCF_DEBUG_RQ(req, "Re-Part"); + + /* Probably some cache lines are assigned into wrong + * partition. Need to move it to new one + */ + ocf_user_part_move(req); + } + + ocf_hb_req_prot_unlock_wr(req); + + OCF_DEBUG_RQ(req, "Submit"); + + ocf_req_get(req); + ocf_engine_forward_core_io_req(req, _ocf_prefetch_read_complete); + + /* Update statistics */ + ocf_engine_update_request_stats(req); + ocf_engine_update_block_stats(req); + + /* Put OCF request - decrease reference counter */ + ocf_req_put(req); + + return 0; +} + +int ocf_prefetch_read(struct ocf_request *req) +{ + struct ocf_user_part *user_part = &req->cache->user_parts[req->part_id]; + struct ocf_alock *c = ocf_cache_line_concurrency(req->cache); + ocf_cache_t cache = req->cache; + int lock; + + if (env_atomic_read(&cache->pending_read_misses_list_blocked)) { + req->complete(req, -OCF_ERR_BUSY); + return 0; + } + + req->engine_handler = _ocf_prefetch_read_do; + + if (!ocf_user_part_is_enabled(user_part)) { + req->complete(req, -OCF_ERR_BUSY); + return 0; + } + + ocf_req_hash(req); + + ocf_hb_req_prot_lock_wr(req); + + ocf_engine_lookup(req); + + if (unlikely(ocf_engine_is_hit(req))) { + ocf_hb_req_prot_unlock_wr(req); + req->complete(req, 0); + return 0; + } + + ocf_prepare_clines_miss(req); + if (unlikely(ocf_req_test_mapping_error(req))) { + ocf_hb_req_prot_unlock_wr(req); + req->complete(req, 0); + return 0; + } + + lock = ocf_req_async_lock_wr(c, req, ocf_engine_on_resume); + if (unlikely(lock < 0)) { + ocf_hb_req_prot_unlock_wr(req); + req->complete(req, 0); + return 0; + } + + ocf_hb_req_prot_unlock_wr(req); + + if (lock == OCF_LOCK_ACQUIRED) + _ocf_prefetch_read_do(req); + + return 0; +} diff --git a/src/engine/engine_prefetch.h b/src/engine/engine_prefetch.h new file mode 100644 index 000000000..36a9b6a8d --- /dev/null +++ b/src/engine/engine_prefetch.h @@ -0,0 +1,15 @@ +/* + * Copyright(c) 2012-2022 Intel Corporation + * Copyright(c) 2022-2024 Huawei Technologies + * Copyright(c) 2026 Unvertical + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __ENGINE_PREFETCH_H__ +#define __ENGINE_PREFETCH_H__ + +#include "../ocf_request.h" + +int ocf_prefetch_read(struct ocf_request *req); + +#endif /* __ENGINE_PREFETCH_H__ */ diff --git a/src/metadata/metadata_partition.h b/src/metadata/metadata_partition.h index 6793bfaa5..3891322aa 100644 --- a/src/metadata/metadata_partition.h +++ b/src/metadata/metadata_partition.h @@ -11,6 +11,7 @@ #include "../ocf_cache_priv.h" #define PARTITION_DEFAULT 0 +#define PARTITION_PREFETCH 33 #define PARTITION_UNSPECIFIED ((ocf_part_id_t)-1) #define PARTITION_FREELIST (OCF_USER_IO_CLASS_MAX + 1) #define PARTITION_FREE_DETACHED (OCF_USER_IO_CLASS_MAX + 2) diff --git a/src/metadata/metadata_superblock.h b/src/metadata/metadata_superblock.h index 6c2934c82..78d347629 100644 --- a/src/metadata/metadata_superblock.h +++ b/src/metadata/metadata_superblock.h @@ -11,6 +11,7 @@ #include #include "metadata_segment.h" #include "../promotion/promotion.h" +#include "ocf/ocf_prefetch.h" #define CACHE_MAGIC_NUMBER 0x187E1CA6 @@ -54,6 +55,8 @@ struct ocf_superblock_config { ocf_promotion_t promotion_policy_type; struct promotion_policy_config promotion[PROMOTION_POLICY_TYPE_MAX]; + ocf_pf_mask_t prefetch_mask; + /* * Checksum for each metadata region. * This field has to be the last one! diff --git a/src/mngt/ocf_mngt_cache.c b/src/mngt/ocf_mngt_cache.c index 3d6039ec8..9bc2efa88 100644 --- a/src/mngt/ocf_mngt_cache.c +++ b/src/mngt/ocf_mngt_cache.c @@ -186,17 +186,26 @@ static void __init_partitions(ocf_cache_t cache) { ocf_part_id_t i_part; - /* Init default Partition */ + /* Init default partitions */ ENV_BUG_ON(ocf_mngt_add_partition_to_cache(cache, PARTITION_DEFAULT, "unclassified", 0, PARTITION_SIZE_MAX, OCF_IO_CLASS_PRIO_LOWEST, true)); + ENV_BUG_ON(ocf_mngt_add_partition_to_cache(cache, PARTITION_PREFETCH, + "prefetch", 0, PARTITION_SIZE_MAX, + OCF_IO_CLASS_PRIO_LOWEST, true)); + /* Add other partition to the cache and make it as dummy */ for (i_part = 0; i_part < OCF_USER_IO_CLASS_MAX; i_part++) { env_refcnt_freeze(&cache->user_parts[i_part].cleaning.counter); - if (i_part == PARTITION_DEFAULT) + switch (i_part) { + case PARTITION_DEFAULT: + case PARTITION_PREFETCH: continue; + default: + break; + } /* Init default Partition */ ENV_BUG_ON(ocf_mngt_add_partition_to_cache(cache, i_part, @@ -1572,6 +1581,7 @@ static void _ocf_mngt_cache_init(ocf_cache_t cache, */ cache->conf_meta->cache_mode = params->metadata.cache_mode; cache->conf_meta->promotion_policy_type = params->metadata.promotion_policy; + cache->conf_meta->prefetch_mask = OCF_PF_MASK_DEFAULT; __set_cleaning_policy(cache, ocf_cleaning_default); /* Init Partitions */ @@ -3734,6 +3744,41 @@ int ocf_mngt_cache_promotion_set_param(ocf_cache_t cache, ocf_promotion_t type, return result; } +int ocf_mngt_cache_prefetch_set_policy(ocf_cache_t cache, ocf_pf_mask_t mask) +{ + ocf_pf_mask_t valid_mask = (1 << ocf_pf_num) - 1; + + if (ocf_cache_is_standby(cache)) + return -OCF_ERR_CACHE_STANDBY; + + if (mask & ~valid_mask) + return -OCF_ERR_INVAL; + + ocf_metadata_start_exclusive_access(&cache->metadata.lock); + + cache->conf_meta->prefetch_mask = mask; + + ocf_metadata_end_exclusive_access(&cache->metadata.lock); + + return 0; +} + +int ocf_mngt_cache_prefetch_get_policy(ocf_cache_t cache, ocf_pf_mask_t *mask) +{ + OCF_CHECK_NULL(mask); + + if (ocf_cache_is_standby(cache)) + return -OCF_ERR_CACHE_STANDBY; + + ocf_metadata_start_shared_access(&cache->metadata.lock, 0); + + *mask = cache->conf_meta->prefetch_mask; + + ocf_metadata_end_shared_access(&cache->metadata.lock, 0); + + return 0; +} + int ocf_mngt_cache_reset_fallback_pt_error_counter(ocf_cache_t cache) { OCF_CHECK_NULL(cache); diff --git a/src/mngt/ocf_mngt_io_class.c b/src/mngt/ocf_mngt_io_class.c index b9241c63d..2e4c11b9e 100644 --- a/src/mngt/ocf_mngt_io_class.c +++ b/src/mngt/ocf_mngt_io_class.c @@ -1,5 +1,6 @@ /* * Copyright(c) 2012-2021 Intel Corporation + * Copyright(c) 2026 Unvertical * SPDX-License-Identifier: BSD-3-Clause */ @@ -133,8 +134,8 @@ static int _ocf_mngt_io_class_configure(ocf_cache_t cache, if (!name[0]) return -OCF_ERR_INVAL; - if (part_id == PARTITION_DEFAULT) { - /* Special behavior for default partition */ + if (part_id == PARTITION_DEFAULT || part_id == PARTITION_PREFETCH) { + /* Special behavior for default partitions */ if (env_strncmp(name, OCF_IO_CLASS_NAME_MAX, dest_part->config->name, OCF_IO_CLASS_NAME_MAX)) { @@ -155,7 +156,7 @@ static int _ocf_mngt_io_class_configure(ocf_cache_t cache, dest_part->config->cache_mode = cache_mode; ocf_cache_log(cache, log_info, - "Updating unclassified IO class, id: %u, name :'%s'," + "Updating default IO class, id: %u, name :'%s'," "max size: %u%% [ OK ]\n", part_id, dest_part->config->name, max); return 0; diff --git a/src/ocf_cache.c b/src/ocf_cache.c index 55f99e877..b8dd408a6 100644 --- a/src/ocf_cache.c +++ b/src/ocf_cache.c @@ -229,6 +229,7 @@ int ocf_cache_get_info(ocf_cache_t cache, struct ocf_cache_info *info) info->cleaning_policy = cache->cleaner.policy; info->promotion_policy = cache->conf_meta->promotion_policy_type; + info->prefetch_mask = cache->conf_meta->prefetch_mask; info->cache_line_size = ocf_line_size(cache); return 0; diff --git a/src/ocf_core.c b/src/ocf_core.c index d3618cdfa..f01db5a16 100644 --- a/src/ocf_core.c +++ b/src/ocf_core.c @@ -17,6 +17,7 @@ #include "ocf_request.h" #include "ocf_seq_detect.h" #include "ocf_seq_cutoff.h" +#include "prefetch/ocf_prefetch_priv.h" struct ocf_core_volume { ocf_core_t core; @@ -300,17 +301,15 @@ static void ocf_core_volume_submit_io(ocf_io_t io) ocf_core_update_stats(core, io); - /* In case of fastpath prevent completing the requets before updating - * sequential cutoff info */ + /* Prevent race condition with prefetch */ ocf_req_get(req); fastpath = ocf_core_submit_io_fast(req, cache); ocf_core_seq_detect_update(core, req); - ocf_req_put(req); if (fastpath == OCF_FAST_PATH_YES) - return; + goto prefetch; ocf_req_clear_map(req); @@ -320,6 +319,11 @@ static void ocf_core_volume_submit_io(ocf_io_t io) goto err; } +prefetch: + ocf_prefetch(req); + + ocf_req_put(req); + return; err: diff --git a/src/ocf_io.c b/src/ocf_io.c index 8b1929eb3..e91a30da3 100644 --- a/src/ocf_io.c +++ b/src/ocf_io.c @@ -11,6 +11,7 @@ #include "ocf_volume_priv.h" #include "ocf_core_priv.h" #include "utils/utils_io_allocator.h" +#include "prefetch/ocf_prefetch_priv.h" #include "ocf_env_refcnt.h" #ifdef OCF_DEBUG_STATS #include "ocf_stats_priv.h" @@ -97,6 +98,8 @@ ocf_io_t ocf_io_new(ocf_volume_t volume, ocf_queue_t queue, req->io.io_class = io_class; req->flags = flags; + req->io.pf_id = ocf_pf_none; + return req; } diff --git a/src/ocf_request.c b/src/ocf_request.c index d15a21732..034155a46 100644 --- a/src/ocf_request.c +++ b/src/ocf_request.c @@ -12,6 +12,7 @@ #include "engine/engine_common.h" #include "utils/utils_cache_line.h" #include "ocf_env_refcnt.h" +#include "ocf/ocf_prefetch.h" #define OCF_UTILS_RQ_DEBUG 0 @@ -97,6 +98,7 @@ static inline void ocf_req_init(struct ocf_request *req, ocf_cache_t cache, req->addr = addr; req->bytes = bytes; req->rw = rw; + req->io.pf_id = ocf_pf_none; } struct ocf_request *ocf_req_new_mngt(ocf_cache_t cache, ocf_queue_t queue) diff --git a/src/ocf_request.h b/src/ocf_request.h index 3add6248e..aadb6b918 100644 --- a/src/ocf_request.h +++ b/src/ocf_request.h @@ -130,6 +130,11 @@ struct ocf_request_io { */ uint8_t io_class; + /** + * @brief Prefetcher id (ocf_pf_none for non-prefetch requests) + */ + ocf_pf_id_t pf_id; + /** * @brief Front volume handle */ diff --git a/src/ocf_stats.c b/src/ocf_stats.c index 78e01671a..aa4c19e0b 100644 --- a/src/ocf_stats.c +++ b/src/ocf_stats.c @@ -9,6 +9,7 @@ #include "ocf_priv.h" #include "metadata/metadata.h" #include "engine/cache_engine.h" +#include "prefetch/ocf_prefetch_priv.h" #include "utils/utils_user_part.h" #include "utils/utils_cache_line.h" @@ -46,8 +47,13 @@ static void ocf_stats_block_init(struct ocf_counters_block *stats) static void ocf_stats_part_init(struct ocf_counters_part *stats) { + ocf_pf_id_t pf_id; ocf_stats_req_init(&stats->read_reqs); ocf_stats_req_init(&stats->write_reqs); + for_each_pf(pf_id) { + ocf_stats_req_init(&stats->prefetch_reqs[pf_id]); + ocf_stats_block_init(&stats->prefetch_blocks[pf_id]); + } ocf_stats_block_init(&stats->blocks); ocf_stats_block_init(&stats->core_blocks); @@ -95,12 +101,17 @@ void ocf_core_stats_cache_block_update(ocf_core_t core, ocf_part_id_t part_id, } void ocf_core_stats_core_block_update(ocf_core_t core, ocf_part_id_t part_id, - int dir, uint64_t bytes) + int dir, uint64_t bytes, ocf_pf_id_t pf_id) { struct ocf_counters_block *counters = &core->counters->part_counters[part_id].core_blocks; _ocf_stats_block_update(counters, dir, bytes); + if (OCF_PF_ID_VALID(pf_id)) { + counters = &core->counters-> + part_counters[part_id].prefetch_blocks[pf_id]; + _ocf_stats_block_update(counters, dir, bytes); + } } void ocf_core_stats_pt_block_update(ocf_core_t core, ocf_part_id_t part_id, @@ -114,13 +125,17 @@ void ocf_core_stats_pt_block_update(ocf_core_t core, ocf_part_id_t part_id, void ocf_core_stats_request_update(ocf_core_t core, ocf_part_id_t part_id, uint8_t dir, uint64_t hit_no, uint64_t core_line_count, - uint8_t deferred) + ocf_pf_id_t pf_id, uint8_t deferred) { struct ocf_counters_req *counters; switch (dir) { case OCF_READ: counters = &core->counters->part_counters[part_id].read_reqs; + if (OCF_PF_ID_VALID(pf_id)) + counters = &core->counters-> + part_counters[part_id]. + prefetch_reqs[pf_id]; break; case OCF_WRITE: counters = &core->counters->part_counters[part_id].write_reqs; @@ -300,6 +315,7 @@ int ocf_core_io_class_get_stats(ocf_core_t core, ocf_part_id_t part_id, { ocf_cache_t cache; struct ocf_counters_part *part_stat; + ocf_pf_id_t pf_id; OCF_CHECK_NULL(core); OCF_CHECK_NULL(stats); @@ -327,6 +343,13 @@ int ocf_core_io_class_get_stats(ocf_core_t core, ocf_part_id_t part_id, copy_req_stats(&stats->read_reqs, &part_stat->read_reqs); copy_req_stats(&stats->write_reqs, &part_stat->write_reqs); + for_each_pf(pf_id) { + copy_req_stats(&stats->prefetch_reqs[pf_id], + &part_stat->prefetch_reqs[pf_id]); + copy_block_stats(&stats->prefetch_blocks[pf_id], + &part_stat->prefetch_blocks[pf_id]); + } + copy_block_stats(&stats->blocks, &part_stat->blocks); copy_block_stats(&stats->cache_blocks, &part_stat->cache_blocks); copy_block_stats(&stats->core_blocks, &part_stat->core_blocks); @@ -366,6 +389,7 @@ int ocf_core_get_stats(ocf_core_t core, struct ocf_stats_core *stats) #endif for (i = 0; i != OCF_USER_IO_CLASS_MAX; i++) { + ocf_pf_id_t pf_id; curr = &core_stats->part_counters[i]; accum_req_stats(&stats->read_reqs, @@ -373,6 +397,13 @@ int ocf_core_get_stats(ocf_core_t core, struct ocf_stats_core *stats) accum_req_stats(&stats->write_reqs, &curr->write_reqs); + for_each_pf(pf_id) { + accum_req_stats(&stats->prefetch_reqs[pf_id], + &curr->prefetch_reqs[pf_id]); + accum_block_stats(&stats->prefetch_blocks[pf_id], + &curr->prefetch_blocks[pf_id]); + } + accum_block_stats(&stats->core, &curr->blocks); accum_block_stats(&stats->core_volume, &curr->core_blocks); accum_block_stats(&stats->cache_volume, &curr->cache_blocks); diff --git a/src/ocf_stats_builder.c b/src/ocf_stats_builder.c index fa6dac305..54a64118f 100644 --- a/src/ocf_stats_builder.c +++ b/src/ocf_stats_builder.c @@ -10,6 +10,7 @@ #include "ocf_env.h" #include "metadata/metadata.h" #include "engine/cache_engine.h" +#include "prefetch/ocf_prefetch_priv.h" #include "utils/utils_user_part.h" #include "utils/utils_cache_line.h" #include "utils/utils_stats.h" @@ -20,6 +21,8 @@ static void _fill_req(struct ocf_stats_requests *req, struct ocf_stats_core *s) uint64_t total = serviced + s->read_reqs.pass_through + s->write_reqs.pass_through; uint64_t hit; + uint64_t prefetch_total = 0; + ocf_pf_id_t pf_id; /* Reads Section */ hit = s->read_reqs.total - (s->read_reqs.full_miss + @@ -42,10 +45,19 @@ static void _fill_req(struct ocf_stats_requests *req, struct ocf_stats_core *s) /* Pass-Through section */ _set(&req->rd_pt, s->read_reqs.pass_through, total); _set(&req->wr_pt, s->write_reqs.pass_through, total); + _set(&req->serviced, serviced, total); + + /* Prefetch Section */ + for_each_pf(pf_id) + prefetch_total += s->prefetch_reqs[pf_id].total; + for_each_pf(pf_id) { + _set(&req->prefetch[pf_id], s->prefetch_reqs[pf_id].total, + prefetch_total); + } /* Summary */ - _set(&req->serviced, serviced, total); - _set(&req->total, total, total); + _set(&req->user, total, total); + _set(&req->total, total + prefetch_total, total + prefetch_total); } static void _fill_req_part(struct ocf_stats_requests *req, @@ -55,6 +67,8 @@ static void _fill_req_part(struct ocf_stats_requests *req, uint64_t total = serviced + s->read_reqs.pass_through + s->write_reqs.pass_through; uint64_t hit; + uint64_t prefetch_total = 0; + ocf_pf_id_t pf_id; /* Reads Section */ hit = s->read_reqs.total - (s->read_reqs.full_miss + @@ -77,21 +91,36 @@ static void _fill_req_part(struct ocf_stats_requests *req, /* Pass-Through section */ _set(&req->rd_pt, s->read_reqs.pass_through, total); _set(&req->wr_pt, s->write_reqs.pass_through, total); + _set(&req->serviced, serviced, total); + + /* Prefetch Section */ + for_each_pf(pf_id) + prefetch_total += s->prefetch_reqs[pf_id].total; + for_each_pf(pf_id) { + _set(&req->prefetch[pf_id], s->prefetch_reqs[pf_id].total, + prefetch_total); + } /* Summary */ - _set(&req->serviced, serviced, total); _set(&req->total, total, total); + _set(&req->total, total + prefetch_total, total + prefetch_total); } static void _fill_blocks(struct ocf_stats_blocks *blocks, - struct ocf_stats_core *s) + const struct ocf_stats_core *s) { uint64_t rd, wr, total; + uint64_t pf_rd, pf_wr; + ocf_pf_id_t pf_id; /* Core volume */ rd = _bytes4k(s->core_volume.read); wr = _bytes4k(s->core_volume.write); total = rd + wr; + + for_each_pf(pf_id) + total += _bytes4k(s->prefetch_blocks[pf_id].read); + _set(&blocks->core_volume_rd, rd, total); _set(&blocks->core_volume_wr, wr, total); _set(&blocks->core_volume_total, total, total); @@ -100,6 +129,10 @@ static void _fill_blocks(struct ocf_stats_blocks *blocks, rd = _bytes4k(s->cache_volume.read); wr = _bytes4k(s->cache_volume.write); total = rd + wr; + + for_each_pf(pf_id) + total += _bytes4k(s->prefetch_blocks[pf_id].write); + _set(&blocks->cache_volume_rd, rd, total); _set(&blocks->cache_volume_wr, wr, total); _set(&blocks->cache_volume_total, total, total); @@ -119,17 +152,33 @@ static void _fill_blocks(struct ocf_stats_blocks *blocks, _set(&blocks->pass_through_rd, rd, total); _set(&blocks->pass_through_wr, wr, total); _set(&blocks->pass_through_total, total, total); + + /* Prefetch */ + for_each_pf(pf_id) { + pf_rd = _bytes4k(s->prefetch_blocks[pf_id].read); + _set(&blocks->prefetch_core_rd[pf_id], pf_rd, total); + } + for_each_pf(pf_id) { + pf_wr = _bytes4k(s->prefetch_blocks[pf_id].write); + _set(&blocks->prefetch_cache_wr[pf_id], pf_wr, total); + } } static void _fill_blocks_part(struct ocf_stats_blocks *blocks, - struct ocf_stats_io_class *s) + const struct ocf_stats_io_class *s) { uint64_t rd, wr, total; + uint64_t pf_rd, pf_wr; + ocf_pf_id_t pf_id; /* Core volume */ rd = _bytes4k(s->core_blocks.read); wr = _bytes4k(s->core_blocks.write); total = rd + wr; + + for_each_pf(pf_id) + total += _bytes4k(s->prefetch_blocks[pf_id].read); + _set(&blocks->core_volume_rd, rd, total); _set(&blocks->core_volume_wr, wr, total); _set(&blocks->core_volume_total, total, total); @@ -138,6 +187,10 @@ static void _fill_blocks_part(struct ocf_stats_blocks *blocks, rd = _bytes4k(s->cache_blocks.read); wr = _bytes4k(s->cache_blocks.write); total = rd + wr; + + for_each_pf(pf_id) + total += _bytes4k(s->prefetch_blocks[pf_id].write); + _set(&blocks->cache_volume_rd, rd, total); _set(&blocks->cache_volume_wr, wr, total); _set(&blocks->cache_volume_total, total, total); @@ -157,6 +210,16 @@ static void _fill_blocks_part(struct ocf_stats_blocks *blocks, _set(&blocks->pass_through_rd, rd, total); _set(&blocks->pass_through_wr, wr, total); _set(&blocks->pass_through_total, total, total); + + /* Prefetch */ + for_each_pf(pf_id) { + pf_rd = _bytes4k(s->prefetch_blocks[pf_id].read); + _set(&blocks->prefetch_core_rd[pf_id], pf_rd, total); + } + for_each_pf(pf_id) { + pf_wr = _bytes4k(s->prefetch_blocks[pf_id].write); + _set(&blocks->prefetch_cache_wr[pf_id], pf_wr, total); + } } static void _fill_errors(struct ocf_stats_errors *errors, @@ -220,6 +283,7 @@ static int _accumulate_io_class_stats(ocf_core_t core, void *cntx) struct ocf_stats_io_class *total = ((struct io_class_stats_context*)cntx)->stats; ocf_part_id_t part_id = ((struct io_class_stats_context*)cntx)->part_id; + ocf_pf_id_t pf_id; result = ocf_core_io_class_get_stats(core, part_id, &stats); if (result) @@ -237,6 +301,14 @@ static int _accumulate_io_class_stats(ocf_core_t core, void *cntx) _accumulate_reqs(&total->read_reqs, &stats.read_reqs); _accumulate_reqs(&total->write_reqs, &stats.write_reqs); + /* Prefetch section */ + for_each_pf(pf_id) { + _accumulate_reqs(&total->prefetch_reqs[pf_id], + &stats.prefetch_reqs[pf_id]); + _accumulate_block(&total->prefetch_blocks[pf_id], + &stats.prefetch_blocks[pf_id]); + } + return 0; } @@ -411,6 +483,7 @@ static int _accumulate_stats(ocf_core_t core, void *cntx) { struct ocf_stats_core stats, *total = cntx; int result; + ocf_pf_id_t pf_id; result = ocf_core_get_stats(core, &stats); if (result) @@ -427,6 +500,13 @@ static int _accumulate_stats(ocf_core_t core, void *cntx) _accumulate_errors(&total->cache_errors, &stats.cache_errors); _accumulate_errors(&total->core_errors, &stats.core_errors); + for_each_pf(pf_id) { + _accumulate_reqs(&total->prefetch_reqs[pf_id], + &stats.prefetch_reqs[pf_id]); + _accumulate_block(&total->prefetch_blocks[pf_id], + &stats.prefetch_blocks[pf_id]); + } + return 0; } diff --git a/src/ocf_stats_priv.h b/src/ocf_stats_priv.h index 42fb52230..0fb7ba787 100644 --- a/src/ocf_stats_priv.h +++ b/src/ocf_stats_priv.h @@ -8,6 +8,8 @@ #ifndef __OCF_STATS_PRIV_H__ #define __OCF_STATS_PRIV_H__ +#include "ocf/ocf_prefetch.h" + struct ocf_counters_block { env_atomic64 read_bytes; env_atomic64 write_bytes; @@ -90,6 +92,12 @@ struct ocf_stats_io_class { /** Writes requests statistics */ struct ocf_stats_req write_reqs; + /** Prefetch requests per perfetcher */ + struct ocf_stats_req prefetch_reqs[ocf_pf_num]; + + /** Prefetch block per prefetcher */ + struct ocf_stats_block prefetch_blocks[ocf_pf_num]; + /** Block requests for ocf volume statistics */ struct ocf_stats_block blocks; @@ -139,6 +147,9 @@ struct ocf_stats_core { /** Write requests statistics */ struct ocf_stats_req write_reqs; + /** Prefetch requests per Prefetch Algorithm */ + struct ocf_stats_req prefetch_reqs[ocf_pf_num]; + /** Block requests for cache volume statistics */ struct ocf_stats_block cache_volume; @@ -148,6 +159,9 @@ struct ocf_stats_core { /** Block requests submitted by user to this core */ struct ocf_stats_block core; + /** Block requests submitted by Prefetcher to this core */ + struct ocf_stats_block prefetch_blocks[ocf_pf_num]; + /** Pass Through block requests statistics */ struct ocf_stats_block pass_through_blocks; @@ -167,8 +181,10 @@ struct ocf_stats_core { struct ocf_counters_part { struct ocf_counters_req read_reqs; struct ocf_counters_req write_reqs; + struct ocf_counters_req prefetch_reqs[ocf_pf_num]; struct ocf_counters_block blocks; + struct ocf_counters_block prefetch_blocks[ocf_pf_num]; struct ocf_counters_block core_blocks; struct ocf_counters_block cache_blocks; @@ -197,7 +213,7 @@ struct ocf_counters_core { }; void ocf_core_stats_core_block_update(ocf_core_t core, ocf_part_id_t part_id, - int dir, uint64_t bytes); + int dir, uint64_t bytes, ocf_pf_id_t pf_id); void ocf_core_stats_cache_block_update(ocf_core_t core, ocf_part_id_t part_id, int dir, uint64_t bytes); void ocf_core_stats_vol_block_update(ocf_core_t core, ocf_part_id_t part_id, @@ -207,7 +223,7 @@ void ocf_core_stats_pt_block_update(ocf_core_t core, ocf_part_id_t part_id, void ocf_core_stats_request_update(ocf_core_t core, ocf_part_id_t part_id, uint8_t dir, uint64_t hit_no, uint64_t core_line_count, - uint8_t deferred); + ocf_pf_id_t pf_id, uint8_t deferred); void ocf_core_stats_request_pt_update(ocf_core_t core, ocf_part_id_t part_id, uint8_t dir, uint64_t hit_no, uint64_t core_line_count); diff --git a/src/prefetch/ocf_prefetch.c b/src/prefetch/ocf_prefetch.c new file mode 100644 index 000000000..93a048a5c --- /dev/null +++ b/src/prefetch/ocf_prefetch.c @@ -0,0 +1,131 @@ +/* + * Copyright(c) 2022-2024 Huawei Technologies + * Copyright(c) 2026 Unvertical + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include "../ocf_cache_priv.h" +#include "../ocf_core_priv.h" +#include "../ocf_def_priv.h" +#include "../ocf_request.h" +#include "../engine/engine_common.h" +#include "../engine/engine_prefetch.h" +#include "ocf_env.h" +#include "ocf_prefetch_priv.h" +#include "ocf_prefetch_readahead.h" + +struct ocf_pf_ops { + void (*get_range)(struct ocf_request *req, struct ocf_pf_range *range); +}; + +static struct ocf_pf_ops ocf_pf_ops[ocf_pf_num] = { + [ocf_pf_readahead] = { + .get_range = ocf_pf_readahead_get_range, + }, +}; + +static void _ocf_prefetch_complete(struct ocf_request *req, int error) +{ + ocf_req_put(req); +} + +static bool ocf_pf_next_sub_range_miss(struct ocf_request *req, + struct ocf_pf_range *range, struct ocf_pf_range *sub_range, + uint32_t max_lines) +{ + ocf_cache_t cache = req->cache; + ocf_core_id_t core_id = ocf_core_get_id(req->core); + uint64_t curr, end; + uint64_t first_miss, last_miss; + + curr = sub_range->core_line_first + sub_range->core_line_count; + end = range->core_line_first + range->core_line_count; + + for (; curr < end; curr++) { + if (!ocf_metadata_is_hit_no_lock(cache, core_id, curr)) + break; + } + + if (curr >= end) + return false; + + first_miss = curr; + last_miss = curr; + + curr += 1; + end = OCF_MIN(end, first_miss + max_lines); + + for (; curr < end; curr++) { + if (ocf_metadata_is_hit_no_lock(cache, core_id, curr)) + break; + + last_miss = curr; + } + + sub_range->core_line_first = first_miss; + sub_range->core_line_count = last_miss - first_miss + 1; + + return true; +} + +void ocf_prefetch_range(struct ocf_request *req, ocf_pf_id_t pf_id, + struct ocf_pf_range *range) +{ + struct ocf_request *prefetch_req = NULL; + ocf_cache_t cache = req->cache; + uint64_t volume_length_cl = ocf_bytes_2_lines(cache, + ocf_volume_get_length(&req->core->volume)); + uint32_t max_total_cl = ocf_bytes_2_lines(cache, OCF_PF_MAX_TOTAL); + struct ocf_pf_range sub_range = { + .core_line_first = range->core_line_first, + .core_line_count = 0, + }; + uint32_t total_cl = 0, curmax_cl = 0; + uint64_t addr; + uint32_t bytes; + + if (unlikely(range->core_line_first >= volume_length_cl)) + return; + + range->core_line_count = OCF_MIN(range->core_line_count, + volume_length_cl - range->core_line_first); + + curmax_cl = OCF_MIN(range->core_line_count, max_total_cl); + while (ocf_pf_next_sub_range_miss(req, range, &sub_range, curmax_cl)) { + addr = ocf_lines_2_bytes(cache, sub_range.core_line_first); + bytes = ocf_lines_2_bytes(cache, sub_range.core_line_count); + prefetch_req = ocf_req_new_extended(req->io_queue, req->core, + addr, bytes, OCF_READ); + if (unlikely(!prefetch_req)) + break; + + prefetch_req->io.io_class = PARTITION_PREFETCH; + prefetch_req->io.pf_id = pf_id; + + prefetch_req->complete = _ocf_prefetch_complete; + + ocf_prefetch_read(prefetch_req); + + total_cl += sub_range.core_line_count; + if (total_cl >= max_total_cl) + break; + + curmax_cl = OCF_MIN(curmax_cl, max_total_cl - total_cl); + } +} + +void ocf_prefetch(struct ocf_request *req) +{ + ocf_pf_mask_t pf_mask = req->cache->conf_meta->prefetch_mask; + struct ocf_pf_range ranges[ocf_pf_num] = {}; + ocf_pf_id_t pf_id; + + if (req->rw != OCF_READ) + return; + + for_each_pf_mask(pf_id, pf_mask) + ocf_pf_ops[pf_id].get_range(req, &ranges[pf_id]); + + for_each_pf_mask(pf_id, pf_mask) + ocf_prefetch_range(req, pf_id, &ranges[pf_id]); +} diff --git a/src/prefetch/ocf_prefetch_priv.h b/src/prefetch/ocf_prefetch_priv.h new file mode 100644 index 000000000..76a2e3d72 --- /dev/null +++ b/src/prefetch/ocf_prefetch_priv.h @@ -0,0 +1,34 @@ +/* + * Copyright(c) 2022-2024 Huawei Technologies + * Copyright(c) 2026 Unvertical + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __OCF_PREFETCH_PRIV_H__ +#define __OCF_PREFETCH_PRIV_H__ + +#include "ocf/ocf_prefetch.h" +#include "../ocf_request.h" +#include "ocf/ocf_types.h" +#include "ocf/ocf_def.h" + +#define OCF_PF_MAX_TOTAL (8 * MiB) + +#define OCF_PF_ID_VALID(pf_id) ((pf_id) != ocf_pf_none && (pf_id) < ocf_pf_num) +#define OCF_PF_ID_ENABLED(pf_id, enabled_mask) ((1 << ((pf_id))) & enabled_mask) + +#define for_each_pf(pf_id) \ + for (pf_id = 0; pf_id < ocf_pf_num; pf_id++) + +#define for_each_pf_mask(pf_id, pf_mask) \ + for_each_pf(pf_id) \ + if (OCF_PF_ID_ENABLED(pf_id, pf_mask)) + +struct ocf_pf_range { + uint64_t core_line_first; + uint32_t core_line_count; +}; + +void ocf_prefetch(struct ocf_request *req); + +#endif /* __OCF_PREFETCH_PRIV_H__ */ diff --git a/src/prefetch/ocf_prefetch_readahead.c b/src/prefetch/ocf_prefetch_readahead.c new file mode 100644 index 000000000..0076587e2 --- /dev/null +++ b/src/prefetch/ocf_prefetch_readahead.c @@ -0,0 +1,26 @@ +/* + * Copyright(c) 2022-2024 Huawei Technologies + * Copyright(c) 2026 Unvertical + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include "ocf_prefetch_readahead.h" +#include "../utils/utils_cache_line.h" +#include "ocf/ocf_def.h" + +#define OCF_PF_READAHEAD_MIN (64 * KiB) + +/* + * NOTE: This simplistic implementation is meant to serve as an reference + * implementation for other prefetch policies. In the current form + * it's not expected to bring any performance improvements for most + * of the workloads (actually it's expected to cause performance + * degratation in most cases). + */ +void ocf_pf_readahead_get_range(struct ocf_request *req, + struct ocf_pf_range *range) +{ + range->core_line_first = req->core_line_first + req->core_line_count; + range->core_line_count = OCF_MIN(req->core_line_count, + ocf_bytes_2_lines(req->cache, OCF_PF_READAHEAD_MIN)); +} diff --git a/src/prefetch/ocf_prefetch_readahead.h b/src/prefetch/ocf_prefetch_readahead.h new file mode 100644 index 000000000..6abbb6422 --- /dev/null +++ b/src/prefetch/ocf_prefetch_readahead.h @@ -0,0 +1,16 @@ +/* + * Copyright(c) 2022-2024 Huawei Technologies + * Copyright(c) 2026 Unvertical + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __OCF_PREFETCH_READAHEAD_H__ +#define __OCF_PREFETCH_READAHEAD_H__ + +#include "ocf_prefetch_priv.h" +#include "ocf/ocf_types.h" + +void ocf_pf_readahead_get_range(struct ocf_request *req, + struct ocf_pf_range *range); + +#endif /* __OCF_PREFETCH_READAHEAD_H__ */ diff --git a/src/utils/utils_cleaner.c b/src/utils/utils_cleaner.c index bc25e9472..451ee2a6b 100644 --- a/src/utils/utils_cleaner.c +++ b/src/utils/utils_cleaner.c @@ -437,7 +437,7 @@ static void _ocf_cleaner_core_io_for_dirty_range(struct ocf_request *req, + BLOCKS_TO_BYTES(begin); ocf_core_stats_core_block_update(req->core, part_id, OCF_WRITE, - BLOCKS_TO_BYTES(end - begin)); + BLOCKS_TO_BYTES(end - begin), ocf_pf_none); OCF_DEBUG_PARAM(req->cache, "Core write, line = %llu, " "block = %llu, count = %llu", iter->core_line, begin, diff --git a/tests/functional/pyocf/types/cache.py b/tests/functional/pyocf/types/cache.py index db015de37..75375b794 100644 --- a/tests/functional/pyocf/types/cache.py +++ b/tests/functional/pyocf/types/cache.py @@ -224,6 +224,13 @@ def __str__(self): return self.name +class Prefetcher(IntEnum): + READAHEAD = 0 + + def __str__(self): + return self.name + + class MetadataLayout(IntEnum): STRIPING = 0 SEQUENTIAL = 1 @@ -438,6 +445,32 @@ def set_promotion_policy_param(self, promotion_type, param_id, param_value): if status: raise OcfError("Error setting promotion policy parameter", status) + def set_prefetch_policy(self, mask): + self.write_lock() + + status = self.owner.lib.ocf_mngt_cache_prefetch_set_policy( + self.cache_handle, mask + ) + + self.write_unlock() + if status: + raise OcfError("Error setting prefetch policy", status) + + def get_prefetch_policy(self): + self.read_lock() + + mask = c_uint8() + + status = self.owner.lib.ocf_mngt_cache_prefetch_get_policy( + self.cache_handle, byref(mask) + ) + + self.read_unlock() + if status: + raise OcfError("Error getting prefetch policy", status) + + return mask.value + def set_seq_cut_off_policy(self, policy: SeqCutOffPolicy): self.write_lock() @@ -980,6 +1013,11 @@ def get_conf(self): line_size = CacheLineSize(cache_info.cache_line_size) cache_name = self.owner.lib.ocf_cache_get_name(self).decode("ascii") + prefetch = [] + for pf_id in Prefetcher: + if cache_info.prefetch_mask & (1 << pf_id): + prefetch += [pf_id] + return { "attached": cache_info.attached, "volume_type": self.owner.volume_types[cache_info.volume_type], @@ -1001,6 +1039,7 @@ def get_conf(self): "state": cache_info.state, "cleaning_policy": CleaningPolicy(cache_info.cleaning_policy), "promotion_policy": PromotionPolicy(cache_info.promotion_policy), + "prefetch": prefetch, "cache_line_size": line_size, "flushed": CacheLines(cache_info.flushed, line_size), "core_count": cache_info.core_count, @@ -1200,6 +1239,10 @@ def get_by_name(cache_name, owner=None): c_uint32, ] lib.ocf_mngt_cache_cleaning_set_param.restype = c_int +lib.ocf_mngt_cache_prefetch_set_policy.argtypes = [c_void_p, c_uint8] +lib.ocf_mngt_cache_prefetch_set_policy.restype = c_int +lib.ocf_mngt_cache_prefetch_get_policy.argtypes = [c_void_p, POINTER(c_uint8)] +lib.ocf_mngt_cache_prefetch_get_policy.restype = c_int lib.ocf_cache_io_class_get_info.restype = c_int lib.ocf_cache_io_class_get_info.argtypes = [c_void_p, c_uint32, c_void_p] lib.ocf_mngt_add_partition_to_cache.restype = c_int diff --git a/tests/functional/pyocf/types/ioclass.py b/tests/functional/pyocf/types/ioclass.py index 94fe16fb1..434d351f4 100644 --- a/tests/functional/pyocf/types/ioclass.py +++ b/tests/functional/pyocf/types/ioclass.py @@ -1,5 +1,6 @@ # # Copyright(c) 2019-2021 Intel Corporation +# Copyright(c) 2026 Unvertical # SPDX-License-Identifier: BSD-3-Clause # @@ -30,5 +31,5 @@ class IoClassConfig(Structure): class IoClassesInfo(Structure): - MAX_IO_CLASSES = 33 + MAX_IO_CLASSES = 34 _fields_ = [("_config", IoClassConfig * MAX_IO_CLASSES)] diff --git a/tests/functional/pyocf/types/shared.py b/tests/functional/pyocf/types/shared.py index 74dcaf069..6203156c4 100644 --- a/tests/functional/pyocf/types/shared.py +++ b/tests/functional/pyocf/types/shared.py @@ -18,6 +18,7 @@ class OcfErrorCode(IntEnum): OCF_ERR_INVAL = 1000000 OCF_ERR_AGAIN = auto() OCF_ERR_INTR = auto() + OCF_ERR_BUSY = auto() OCF_ERR_NOT_SUPP = auto() OCF_ERR_NO_MEM = auto() OCF_ERR_NO_LOCK = auto() diff --git a/tests/functional/pyocf/types/stats/cache.py b/tests/functional/pyocf/types/stats/cache.py index 4566aa6e7..f88091fc7 100644 --- a/tests/functional/pyocf/types/stats/cache.py +++ b/tests/functional/pyocf/types/stats/cache.py @@ -31,6 +31,7 @@ class CacheInfo(Structure): ("fallback_pt", _FallbackPt), ("cleaning_policy", c_uint32), ("promotion_policy", c_uint32), + ("prefetch_mask", c_uint8), ("cache_line_size", c_uint64), ("flushed", c_uint32), ("core_count", c_uint32), diff --git a/tests/functional/pyocf/types/stats/shared.py b/tests/functional/pyocf/types/stats/shared.py index 0d0effbc5..c158b615e 100644 --- a/tests/functional/pyocf/types/stats/shared.py +++ b/tests/functional/pyocf/types/stats/shared.py @@ -7,6 +7,7 @@ from ctypes import c_uint64, c_uint32, Structure +PF_ID_NUM = 1 class _Stat(Structure): _fields_ = [("value", c_uint64), ("fraction", c_uint64)] @@ -62,6 +63,8 @@ class RequestsStats(Structure): ("rd_pt", _Stat), ("wr_pt", _Stat), ("serviced", _Stat), + ("prefetch", _Stat * PF_ID_NUM), + ("user", _Stat), ("total", _Stat), ] @@ -80,6 +83,8 @@ class BlocksStats(Structure): ("pt_rd", _Stat), ("pt_wr", _Stat), ("pt_total", _Stat), + ("prefetch_core_rd", _Stat * PF_ID_NUM), + ("prefetch_cache_wr", _Stat * PF_ID_NUM), ]