This patch just generalizes the naming for RDT so that we can get ready
to apply other resource controls like MBA.

RDT resource cbm values are named to ctrl_val representing generic
control values which will hold both cbm(cache bit mask) and memory b/w
throttle values. max_cbm is updated to no_ctrl which represents default
values which provide no control which is all bits set in case of CAT and
zero throttle_by in case of MBA. The tmp_cbm is updated to tmp_ctrls.
Similarly domain structures are updated to ctrl_val instead of cbm.

APIs are also generalized:
- get_cache_config is added to separate from memory specific apis.
- MSR update api names are changed from having cbm to ctrl.
- info file API names are set to reflect generic no_ctrl or control values
rather than cbm.

Signed-off-by: Vikas Shivappa <vikas.shiva...@linux.intel.com>
---
 arch/x86/include/asm/intel_rdt.h         | 20 ++++++++++----------
 arch/x86/kernel/cpu/intel_rdt.c          | 28 ++++++++++++++--------------
 arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 12 ++++++------
 arch/x86/kernel/cpu/intel_rdt_schemata.c | 24 ++++++++++++------------
 4 files changed, 42 insertions(+), 42 deletions(-)

diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h
index 95ce5c8..9f66bf5 100644
--- a/arch/x86/include/asm/intel_rdt.h
+++ b/arch/x86/include/asm/intel_rdt.h
@@ -71,14 +71,14 @@ struct rftype {
  * @capable:                   Is this feature available on this machine
  * @name:                      Name to use in "schemata" file
  * @num_closid:                        Number of CLOSIDs available
- * @max_cbm:                   Largest Cache Bit Mask allowed
+ * @no_ctrl:                   Specifies max cache cbm or min mem b/w delay.
  * @min_cbm_bits:              Minimum number of consecutive bits to be set
  *                             in a cache bit mask
  * @domains:                   All domains for this resource
  * @num_domains:               Number of domains active
  * @msr_base:                  Base MSR address for CBMs
- * @tmp_cbms:                  Scratch space when updating schemata
- * @num_tmp_cbms:              Number of CBMs in tmp_cbms
+ * @tmp_ctrl:                  Scratch space when updating schemata
+ * @num_tmp_ctrl:              Number of control values in tmp_ctrl
  * @cache_level:               Which cache level defines scope of this domain
  * @cbm_idx_multi:             Multiplier of CBM index
  * @cbm_idx_offset:            Offset of CBM index. CBM index is computed by:
@@ -91,12 +91,12 @@ struct rdt_resource {
        int                     num_closid;
        int                     cbm_len;
        int                     min_cbm_bits;
-       u32                     max_cbm;
+       u32                     no_ctrl;
        struct list_head        domains;
        int                     num_domains;
        int                     msr_base;
-       u32                     *tmp_cbms;
-       int                     num_tmp_cbms;
+       u32                     *tmp_ctrl;
+       int                     num_tmp_ctrl;
        int                     cache_level;
        int                     cbm_idx_multi;
        int                     cbm_idx_offset;
@@ -107,13 +107,13 @@ struct rdt_resource {
  * @list:      all instances of this resource
  * @id:                unique id for this instance
  * @cpu_mask:  which cpus share this resource
- * @cbm:       array of cache bit masks (indexed by CLOSID)
+ * @ctrl_val:  array of cache or mem ctrl values (indexed by CLOSID)
  */
 struct rdt_domain {
        struct list_head        list;
        int                     id;
        struct cpumask          cpu_mask;
-       u32                     *cbm;
+       u32                     *ctrl_val;
 };
 
 /**
@@ -165,7 +165,7 @@ enum {
 };
 
 /* CPUID.(EAX=10H, ECX=ResID=1).EDX */
-union cpuid_0x10_1_edx {
+union cpuid_0x10_x_edx {
        struct {
                unsigned int cos_max:16;
        } split;
@@ -174,7 +174,7 @@ enum {
 
 DECLARE_PER_CPU_READ_MOSTLY(int, cpu_closid);
 
-void rdt_cbm_update(void *arg);
+void rdt_ctrl_update(void *arg);
 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
 void rdtgroup_kn_unlock(struct kernfs_node *kn);
 ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 5a533fe..b2c037a 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -119,7 +119,7 @@ static inline bool cache_alloc_hsw_probe(void)
 
                r->num_closid = 4;
                r->cbm_len = 20;
-               r->max_cbm = max_cbm;
+               r->no_ctrl = max_cbm;
                r->min_cbm_bits = 2;
                r->capable = true;
                r->enabled = true;
@@ -130,16 +130,16 @@ static inline bool cache_alloc_hsw_probe(void)
        return false;
 }
 
-static void rdt_get_config(int idx, struct rdt_resource *r)
+static void rdt_get_cache_config(int idx, struct rdt_resource *r)
 {
        union cpuid_0x10_1_eax eax;
-       union cpuid_0x10_1_edx edx;
+       union cpuid_0x10_x_edx edx;
        u32 ebx, ecx;
 
        cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
        r->num_closid = edx.split.cos_max + 1;
        r->cbm_len = eax.split.cbm_len + 1;
-       r->max_cbm = BIT_MASK(eax.split.cbm_len + 1) - 1;
+       r->no_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
        r->capable = true;
        r->enabled = true;
 }
@@ -151,7 +151,7 @@ static void rdt_get_cdp_l3_config(int type)
 
        r->num_closid = r_l3->num_closid / 2;
        r->cbm_len = r_l3->cbm_len;
-       r->max_cbm = r_l3->max_cbm;
+       r->no_ctrl = r_l3->no_ctrl;
        r->capable = true;
        /*
         * By default, CDP is disabled. CDP can be enabled by mount parameter
@@ -171,7 +171,7 @@ static inline bool get_rdt_resources(void)
                return false;
 
        if (boot_cpu_has(X86_FEATURE_CAT_L3)) {
-               rdt_get_config(1, &rdt_resources_all[RDT_RESOURCE_L3]);
+               rdt_get_cache_config(1, &rdt_resources_all[RDT_RESOURCE_L3]);
                if (boot_cpu_has(X86_FEATURE_CDP_L3)) {
                        rdt_get_cdp_l3_config(RDT_RESOURCE_L3DATA);
                        rdt_get_cdp_l3_config(RDT_RESOURCE_L3CODE);
@@ -180,7 +180,7 @@ static inline bool get_rdt_resources(void)
        }
        if (boot_cpu_has(X86_FEATURE_CAT_L2)) {
                /* CPUID 0x10.2 fields are same format at 0x10.1 */
-               rdt_get_config(2, &rdt_resources_all[RDT_RESOURCE_L2]);
+               rdt_get_cache_config(2, &rdt_resources_all[RDT_RESOURCE_L2]);
                ret = true;
        }
 
@@ -200,7 +200,7 @@ static int get_cache_id(int cpu, int level)
        return -1;
 }
 
-void rdt_cbm_update(void *arg)
+void rdt_ctrl_update(void *arg)
 {
        struct msr_param *m = (struct msr_param *)arg;
        struct rdt_resource *r = m->res;
@@ -221,7 +221,7 @@ void rdt_cbm_update(void *arg)
        for (i = m->low; i < m->high; i++) {
                int idx = cbm_idx(r, i);
 
-               wrmsrl(r->msr_base + idx, d->cbm[i]);
+               wrmsrl(r->msr_base + idx, d->ctrl_val[i]);
        }
 }
 
@@ -294,8 +294,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
 
        d->id = id;
 
-       d->cbm = kmalloc_array(r->num_closid, sizeof(*d->cbm), GFP_KERNEL);
-       if (!d->cbm) {
+       d->ctrl_val = kmalloc_array(r->num_closid, sizeof(*d->ctrl_val), 
GFP_KERNEL);
+       if (!d->ctrl_val) {
                kfree(d);
                return;
        }
@@ -303,8 +303,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
        for (i = 0; i < r->num_closid; i++) {
                int idx = cbm_idx(r, i);
 
-               d->cbm[i] = r->max_cbm;
-               wrmsrl(r->msr_base + idx, d->cbm[i]);
+               d->ctrl_val[i] = r->no_ctrl;
+               wrmsrl(r->msr_base + idx, d->ctrl_val[i]);
        }
 
        cpumask_set_cpu(cpu, &d->cpu_mask);
@@ -326,7 +326,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource 
*r)
        cpumask_clear_cpu(cpu, &d->cpu_mask);
        if (cpumask_empty(&d->cpu_mask)) {
                r->num_domains--;
-               kfree(d->cbm);
+               kfree(d->ctrl_val);
                list_del(&d->list);
                kfree(d);
        }
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c 
b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 8af04af..edc6195 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -498,12 +498,12 @@ static int rdt_num_closids_show(struct kernfs_open_file 
*of,
        return 0;
 }
 
-static int rdt_cbm_mask_show(struct kernfs_open_file *of,
+static int rdt_no_ctrl_show(struct kernfs_open_file *of,
                             struct seq_file *seq, void *v)
 {
        struct rdt_resource *r = of->kn->parent->priv;
 
-       seq_printf(seq, "%x\n", r->max_cbm);
+       seq_printf(seq, "%x\n", r->no_ctrl);
 
        return 0;
 }
@@ -530,7 +530,7 @@ static int rdt_min_cbm_bits_show(struct kernfs_open_file 
*of,
                .name           = "cbm_mask",
                .mode           = 0444,
                .kf_ops         = &rdtgroup_kf_single_ops,
-               .seq_show       = rdt_cbm_mask_show,
+               .seq_show       = rdt_no_ctrl_show,
        },
        {
                .name           = "min_cbm_bits",
@@ -803,14 +803,14 @@ static int reset_all_cbms(struct rdt_resource *r)
                cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
 
                for (i = 0; i < r->num_closid; i++)
-                       d->cbm[i] = r->max_cbm;
+                       d->ctrl_val[i] = r->no_ctrl;
        }
        cpu = get_cpu();
        /* Update CBM on this cpu if it's in cpu_mask. */
        if (cpumask_test_cpu(cpu, cpu_mask))
-               rdt_cbm_update(&msr_param);
+               rdt_ctrl_update(&msr_param);
        /* Update CBM on all other cpus in cpu_mask. */
-       smp_call_function_many(cpu_mask, rdt_cbm_update, &msr_param, 1);
+       smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
        put_cpu();
 
        free_cpumask_var(cpu_mask);
diff --git a/arch/x86/kernel/cpu/intel_rdt_schemata.c 
b/arch/x86/kernel/cpu/intel_rdt_schemata.c
index f369cb8..c50f742 100644
--- a/arch/x86/kernel/cpu/intel_rdt_schemata.c
+++ b/arch/x86/kernel/cpu/intel_rdt_schemata.c
@@ -38,7 +38,7 @@ static bool cbm_validate(unsigned long var, struct 
rdt_resource *r)
 {
        unsigned long first_bit, zero_bit;
 
-       if (var == 0 || var > r->max_cbm)
+       if (var == 0 || var > r->no_ctrl)
                return false;
 
        first_bit = find_first_bit(&var, r->cbm_len);
@@ -66,7 +66,7 @@ static int parse_cbm(char *buf, struct rdt_resource *r)
                return ret;
        if (!cbm_validate(data, r))
                return -EINVAL;
-       r->tmp_cbms[r->num_tmp_cbms++] = data;
+       r->tmp_ctrl[r->num_tmp_ctrl++] = data;
 
        return 0;
 }
@@ -116,14 +116,14 @@ static int update_domains(struct rdt_resource *r, int 
closid)
 
        list_for_each_entry(d, &r->domains, list) {
                cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
-               d->cbm[msr_param.low] = r->tmp_cbms[idx++];
+               d->ctrl_val[msr_param.low] = r->tmp_ctrl[idx++];
        }
        cpu = get_cpu();
        /* Update CBM on this cpu if it's in cpu_mask. */
        if (cpumask_test_cpu(cpu, cpu_mask))
-               rdt_cbm_update(&msr_param);
+               rdt_ctrl_update(&msr_param);
        /* Update CBM on other cpus. */
-       smp_call_function_many(cpu_mask, rdt_cbm_update, &msr_param, 1);
+       smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
        put_cpu();
 
        free_cpumask_var(cpu_mask);
@@ -155,13 +155,13 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file 
*of,
 
        /* get scratch space to save all the masks while we validate input */
        for_each_enabled_rdt_resource(r) {
-               r->tmp_cbms = kcalloc(r->num_domains, sizeof(*l3_cbms),
+               r->tmp_ctrl = kcalloc(r->num_domains, sizeof(*l3_cbms),
                                      GFP_KERNEL);
-               if (!r->tmp_cbms) {
+               if (!r->tmp_ctrl) {
                        ret = -ENOMEM;
                        goto out;
                }
-               r->num_tmp_cbms = 0;
+               r->num_tmp_ctrl = 0;
        }
 
        while ((tok = strsep(&buf, "\n")) != NULL) {
@@ -187,7 +187,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
 
        /* Did the parser find all the masks we need? */
        for_each_enabled_rdt_resource(r) {
-               if (r->num_tmp_cbms != r->num_domains) {
+               if (r->num_tmp_ctrl != r->num_domains) {
                        ret = -EINVAL;
                        goto out;
                }
@@ -202,8 +202,8 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
 out:
        rdtgroup_kn_unlock(of->kn);
        for_each_enabled_rdt_resource(r) {
-               kfree(r->tmp_cbms);
-               r->tmp_cbms = NULL;
+               kfree(r->tmp_ctrl);
+               r->tmp_ctrl = NULL;
        }
        return ret ?: nbytes;
 }
@@ -217,7 +217,7 @@ static void show_doms(struct seq_file *s, struct 
rdt_resource *r, int closid)
        list_for_each_entry(dom, &r->domains, list) {
                if (sep)
                        seq_puts(s, ";");
-               seq_printf(s, "%d=%x", dom->id, dom->cbm[closid]);
+               seq_printf(s, "%d=%x", dom->id, dom->ctrl_val[closid]);
                sep = true;
        }
        seq_puts(s, "\n");
-- 
1.9.1

Reply via email to