On 11/03/2014 12:36 PM, Radim Krčmář wrote:
> 2014-10-31 12:05-0400, Wei Huang:
>> This patch converts existing pmu.c into Intel specific code and hooks
>> up with the PMU interface using the following steps:
>>
>> - Convert pmu.c to pmu_intel.c; All public PMU functions are renamed
>>   and hooked up with the newly defined intel_pmu_ops.
>> - Create a corresponding pmu_amd.c file with empty functions for AMD
>>   arch.
>> - The PMU function pointer, kvm_pmu_ops, is initialized by calling
>>   kvm_x86_ops->get_pmu_ops().
>> - To reduce the code size, Intel and AMD modules are now generated
>>   from their corrponding arch and PMU files; In the meanwhile, due
>>   to this arrangement several, functions are exposed as public ones
>>   to allow calling from PMU code.
> 
> (Patch would have been easier to review as two patches, where the first
>  one just renames pmu to pmu_intel and the second one does the split.)

Will do in next spin.

> 
>> Signed-off-by: Wei Huang <w...@redhat.com>
>> ---
> 
> ---
> The rest is an idea for consideration ...
> Please consider everything from now to be in parentheses
> = ignore you are not enjoying shuffling code around.
> 
>> +
>> +static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
>> +            u8 unit_mask)
> 
> The prototype could be exteded with
>   struct kvm_event_hw_type_mapping, size_t nr_events
> and reused for AMD as well.
> 
>> +{
>> +    int i;
>> +
>> +    for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
>> +            if (intel_arch_events[i].eventsel == event_select
>> +                            && intel_arch_events[i].unit_mask == unit_mask
>> +                            && (pmu->available_event_types & (1 << i)))
> 
> pmu->available_event_types would be -1 on AMD.
> 
>> +                    break;
>> +
>> +    if (i == ARRAY_SIZE(intel_arch_events))
>> +            return PERF_COUNT_HW_MAX;
>> +
>> +    return intel_arch_events[i].event_type;
>> +}
> 
>> +static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
>> +            unsigned config, bool exclude_user, bool exclude_kernel,
>> +            bool intr, bool in_tx, bool in_tx_cp)
> 
> This function could drop 'bool in_tx' and 'bool in_tx_cp', because it
> already accepts config, so these flags can already be included there.
> It has only one caller that uses them anyway.
> 
>> +{
>> +    struct perf_event *event;
>> +    struct perf_event_attr attr = {
>> +            .type = type,
>> +            .size = sizeof(attr),
>> +            .pinned = true,
>> +            .exclude_idle = true,
>> +            .exclude_host = 1,
>> +            .exclude_user = exclude_user,
>> +            .exclude_kernel = exclude_kernel,
>> +            .config = config,
>> +    };
>> +    if (in_tx)
>> +            attr.config |= HSW_IN_TX;
>> +    if (in_tx_cp)
>> +            attr.config |= HSW_IN_TX_CHECKPOINTED;
> 
> And after dropping this, it is identical to AMD.

Thanks. I will consolidate the ideas above into next spin.

> 
>> +
>> +    attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
>> +
>> +    event = perf_event_create_kernel_counter(&attr, -1, current,
>> +                                             intr ? kvm_perf_overflow_intr :
>> +                                             kvm_perf_overflow, pmc);
>> +    if (IS_ERR(event)) {
>> +            printk_once("kvm: pmu event creation failed %ld\n",
>> +                            PTR_ERR(event));
>> +            return;
>> +    }
>> +
>> +    pmc->perf_event = event;
>> +    clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
>> +}
>> +
>> +static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
>> +{
> 
> Because the two functions that this one uses have been merged, we could
> propagate the changes and reuse this one with AMD as well.
> 
>> +    unsigned config, type = PERF_TYPE_RAW;
>> +    u8 event_select, unit_mask;
>> +
>> +    if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
>> +            printk_once("kvm pmu: pin control bit is ignored\n");
>> +
>> +    pmc->eventsel = eventsel;
>> +
>> +    stop_counter(pmc);
>> +
>> +    if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_enabled(pmc))
>> +            return;
>> +
>> +    event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
>> +    unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
>> +
>> +    if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
>> +                            ARCH_PERFMON_EVENTSEL_INV |
>> +                            ARCH_PERFMON_EVENTSEL_CMASK |
>> +                            HSW_IN_TX |
>> +                            HSW_IN_TX_CHECKPOINTED))) {
>> +            config = find_arch_event(&pmc->vcpu->arch.pmu, event_select,
>> +                            unit_mask);
>> +            if (config != PERF_COUNT_HW_MAX)
>> +                    type = PERF_TYPE_HARDWARE;
>> +    }
>> +
>> +    if (type == PERF_TYPE_RAW)
>> +            config = eventsel & X86_RAW_EVENT_MASK;
>> +
>> +    reprogram_counter(pmc, type, config,
>> +                    !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
>> +                    !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
>> +                    eventsel & ARCH_PERFMON_EVENTSEL_INT,
>> +                    (eventsel & HSW_IN_TX),
>> +                    (eventsel & HSW_IN_TX_CHECKPOINTED));
>> +}
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to