[PATCH v3 17/46] perf/x86/intel/cmt: add uflag CMT_UF_NOLAZY_RMID
This uflag allows user to specify that a rmid must be allocated at monr's initialization or fail otherwise. For this to work we split __pmonr_apply_uflags into reserve and apply modes. The reserve mode will try to reserve a free rmid, and if successful, the apply mode can proceed using the rmid previously reserved. Signed-off-by: David Carrillo-Cisneros--- arch/x86/events/intel/cmt.c | 116 +++- arch/x86/events/intel/cmt.h | 5 +- 2 files changed, 109 insertions(+), 12 deletions(-) diff --git a/arch/x86/events/intel/cmt.c b/arch/x86/events/intel/cmt.c index 9421a3e..3883cb4 100644 --- a/arch/x86/events/intel/cmt.c +++ b/arch/x86/events/intel/cmt.c @@ -33,7 +33,8 @@ static unsigned int __min_max_rmid; /* minimum max_rmid across all pkgs. */ static struct monr *monr_hrchy_root; /* Flags for root monr and all its pmonrs while being monitored. */ -static enum cmt_user_flags root_monr_uflags = CMT_UF_HAS_USER; +static enum cmt_user_flags root_monr_uflags = + CMT_UF_HAS_USER | CMT_UF_NOLAZY_RMID; /* Auxiliar flags */ static enum cmt_user_flags *pkg_uflags_zeroes; @@ -414,6 +415,7 @@ static void monr_dealloc(struct monr *monr) u16 p, nr_pkgs = topology_max_packages(); if (WARN_ON_ONCE(monr->nr_has_user) || + WARN_ON_ONCE(monr->nr_nolazy_rmid) || WARN_ON_ONCE(monr->mon_events)) return; @@ -478,11 +480,28 @@ static enum cmt_user_flags pmonr_uflags(struct pmonr *pmonr) return monr->uflags | monr->pkg_uflags[pmonr->pkgd->pkgid]; } +/* + * Callable in two modes: + * 1) @reserve == true: will check if uflags are applicable and store in + * @res_rmid the "reserved" rmid. + * 2) @reserve == false: will apply pmonr_uflags using the rmid stored in + * @res_rmid rmid (if any). Cannot fail. + */ static int __pmonr_apply_uflags(struct pmonr *pmonr, - enum cmt_user_flags pmonr_uflags) + enum cmt_user_flags pmonr_uflags, bool reserve, u32 *res_rmid) { + struct pkg_data *pkgd = pmonr->pkgd; + u32 free_rmid; + + if (WARN_ON_ONCE(!res_rmid)) + return -EINVAL; + if (WARN_ON_ONCE(reserve && *res_rmid != INVALID_RMID)) + return -EINVAL; + if (!(pmonr_uflags & CMT_UF_HAS_USER)) { if (pmonr->state != PMONR_OFF) { + if (reserve) + return 0; pmonr_to_unused(pmonr); pmonr_unused_to_off(pmonr); } @@ -492,8 +511,40 @@ static int __pmonr_apply_uflags(struct pmonr *pmonr, if (monr_is_root(pmonr->monr) && (~pmonr_uflags & root_monr_uflags)) return -EINVAL; - if (pmonr->state == PMONR_OFF) - pmonr_to_unused(pmonr); + if (pmonr->state == PMONR_OFF) { + if (!reserve) + pmonr_to_unused(pmonr); + } + if (pmonr->state == PMONR_ACTIVE) + return 0; + if (!(pmonr_uflags & CMT_UF_NOLAZY_RMID)) + return 0; + if (pmonr->state == PMONR_DEP_DIRTY) { + if (!reserve) + pmonr_dep_dirty_to_active(pmonr); + return 0; + } + + /* +* At this point pmonr is in either Unused or Dep_Idle state and +* needs a rmid to transition to Active. +*/ + if (reserve) { + free_rmid = find_first_bit(pkgd->free_rmids, CMT_MAX_NR_RMIDS); + if (free_rmid == CMT_MAX_NR_RMIDS) + return -ENOSPC; + *res_rmid = free_rmid; + __clear_bit(*res_rmid, pkgd->free_rmids); + return 0; + } + + /* both cases use the reserved rmid. */ + if (pmonr->state == PMONR_UNUSED) { + pmonr_unused_to_active(pmonr, *res_rmid); + } else { + WARN_ON_ONCE(pmonr->state != PMONR_DEP_IDLE); + pmonr_dep_idle_to_active(pmonr, *res_rmid); + } return 0; } @@ -514,7 +565,10 @@ static bool monr_has_user(struct monr *monr) pkg_uflags_has_user(monr->pkg_uflags); } -static int __monr_apply_uflags(struct monr *monr, enum cmt_user_flags *puflags) +static int __monr_apply_uflags(struct monr *monr, + enum cmt_user_flags *puflags, + bool reserve, + u32 *res_rmids) { enum cmt_user_flags pmonr_uflags; struct pkg_data *pkgd = NULL; @@ -526,7 +580,10 @@ static int __monr_apply_uflags(struct monr *monr, enum cmt_user_flags *puflags) pmonr_uflags = monr->uflags | (puflags ? puflags[p] : monr->pkg_uflags[p]); pmonr = pkgd_pmonr(pkgd, monr); - err = __pmonr_apply_uflags(pmonr, pmonr_uflags); + err = __pmonr_apply_uflags(pmonr, pmonr_uflags, +
[PATCH v3 17/46] perf/x86/intel/cmt: add uflag CMT_UF_NOLAZY_RMID
This uflag allows user to specify that a rmid must be allocated at monr's initialization or fail otherwise. For this to work we split __pmonr_apply_uflags into reserve and apply modes. The reserve mode will try to reserve a free rmid, and if successful, the apply mode can proceed using the rmid previously reserved. Signed-off-by: David Carrillo-Cisneros --- arch/x86/events/intel/cmt.c | 116 +++- arch/x86/events/intel/cmt.h | 5 +- 2 files changed, 109 insertions(+), 12 deletions(-) diff --git a/arch/x86/events/intel/cmt.c b/arch/x86/events/intel/cmt.c index 9421a3e..3883cb4 100644 --- a/arch/x86/events/intel/cmt.c +++ b/arch/x86/events/intel/cmt.c @@ -33,7 +33,8 @@ static unsigned int __min_max_rmid; /* minimum max_rmid across all pkgs. */ static struct monr *monr_hrchy_root; /* Flags for root monr and all its pmonrs while being monitored. */ -static enum cmt_user_flags root_monr_uflags = CMT_UF_HAS_USER; +static enum cmt_user_flags root_monr_uflags = + CMT_UF_HAS_USER | CMT_UF_NOLAZY_RMID; /* Auxiliar flags */ static enum cmt_user_flags *pkg_uflags_zeroes; @@ -414,6 +415,7 @@ static void monr_dealloc(struct monr *monr) u16 p, nr_pkgs = topology_max_packages(); if (WARN_ON_ONCE(monr->nr_has_user) || + WARN_ON_ONCE(monr->nr_nolazy_rmid) || WARN_ON_ONCE(monr->mon_events)) return; @@ -478,11 +480,28 @@ static enum cmt_user_flags pmonr_uflags(struct pmonr *pmonr) return monr->uflags | monr->pkg_uflags[pmonr->pkgd->pkgid]; } +/* + * Callable in two modes: + * 1) @reserve == true: will check if uflags are applicable and store in + * @res_rmid the "reserved" rmid. + * 2) @reserve == false: will apply pmonr_uflags using the rmid stored in + * @res_rmid rmid (if any). Cannot fail. + */ static int __pmonr_apply_uflags(struct pmonr *pmonr, - enum cmt_user_flags pmonr_uflags) + enum cmt_user_flags pmonr_uflags, bool reserve, u32 *res_rmid) { + struct pkg_data *pkgd = pmonr->pkgd; + u32 free_rmid; + + if (WARN_ON_ONCE(!res_rmid)) + return -EINVAL; + if (WARN_ON_ONCE(reserve && *res_rmid != INVALID_RMID)) + return -EINVAL; + if (!(pmonr_uflags & CMT_UF_HAS_USER)) { if (pmonr->state != PMONR_OFF) { + if (reserve) + return 0; pmonr_to_unused(pmonr); pmonr_unused_to_off(pmonr); } @@ -492,8 +511,40 @@ static int __pmonr_apply_uflags(struct pmonr *pmonr, if (monr_is_root(pmonr->monr) && (~pmonr_uflags & root_monr_uflags)) return -EINVAL; - if (pmonr->state == PMONR_OFF) - pmonr_to_unused(pmonr); + if (pmonr->state == PMONR_OFF) { + if (!reserve) + pmonr_to_unused(pmonr); + } + if (pmonr->state == PMONR_ACTIVE) + return 0; + if (!(pmonr_uflags & CMT_UF_NOLAZY_RMID)) + return 0; + if (pmonr->state == PMONR_DEP_DIRTY) { + if (!reserve) + pmonr_dep_dirty_to_active(pmonr); + return 0; + } + + /* +* At this point pmonr is in either Unused or Dep_Idle state and +* needs a rmid to transition to Active. +*/ + if (reserve) { + free_rmid = find_first_bit(pkgd->free_rmids, CMT_MAX_NR_RMIDS); + if (free_rmid == CMT_MAX_NR_RMIDS) + return -ENOSPC; + *res_rmid = free_rmid; + __clear_bit(*res_rmid, pkgd->free_rmids); + return 0; + } + + /* both cases use the reserved rmid. */ + if (pmonr->state == PMONR_UNUSED) { + pmonr_unused_to_active(pmonr, *res_rmid); + } else { + WARN_ON_ONCE(pmonr->state != PMONR_DEP_IDLE); + pmonr_dep_idle_to_active(pmonr, *res_rmid); + } return 0; } @@ -514,7 +565,10 @@ static bool monr_has_user(struct monr *monr) pkg_uflags_has_user(monr->pkg_uflags); } -static int __monr_apply_uflags(struct monr *monr, enum cmt_user_flags *puflags) +static int __monr_apply_uflags(struct monr *monr, + enum cmt_user_flags *puflags, + bool reserve, + u32 *res_rmids) { enum cmt_user_flags pmonr_uflags; struct pkg_data *pkgd = NULL; @@ -526,7 +580,10 @@ static int __monr_apply_uflags(struct monr *monr, enum cmt_user_flags *puflags) pmonr_uflags = monr->uflags | (puflags ? puflags[p] : monr->pkg_uflags[p]); pmonr = pkgd_pmonr(pkgd, monr); - err = __pmonr_apply_uflags(pmonr, pmonr_uflags); + err = __pmonr_apply_uflags(pmonr, pmonr_uflags, +