Re: [Xen-devel] [PATCH] xen/ARM+sched: Don't opencode %pv in printk()'s

2018-09-10 Thread Julien Grall

Hi,

On 30/08/18 13:50, Andrew Cooper wrote:

No functional change.

Signed-off-by: Andrew Cooper 


I have committed the patch.

Cheers,

--
Julien Grall

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH] xen/ARM+sched: Don't opencode %pv in printk()'s

2018-09-06 Thread George Dunlap
On 08/30/2018 01:50 PM, Andrew Cooper wrote:
> No functional change.
> 
> Signed-off-by: Andrew Cooper 

Reviewed-by: George Dunlap 

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH] xen/ARM+sched: Don't opencode %pv in printk()'s

2018-08-30 Thread Andrew Cooper
No functional change.

Signed-off-by: Andrew Cooper 
---
CC: Stefano Stabellini 
CC: Julien Grall 
CC: George Dunlap 
CC: Dario Faggioli 
---
 xen/arch/arm/gic-vgic.c | 12 ++--
 xen/common/sched_null.c | 15 ++-
 2 files changed, 12 insertions(+), 15 deletions(-)

diff --git a/xen/arch/arm/gic-vgic.c b/xen/arch/arm/gic-vgic.c
index fd63906..990399c 100644
--- a/xen/arch/arm/gic-vgic.c
+++ b/xen/arch/arm/gic-vgic.c
@@ -94,8 +94,8 @@ void gic_raise_inflight_irq(struct vcpu *v, unsigned int 
virtual_irq)
 }
 #ifdef GIC_DEBUG
 else
-gdprintk(XENLOG_DEBUG, "trying to inject irq=%u into d%dv%d, when it 
is still lr_pending\n",
- virtual_irq, v->domain->domain_id, v->vcpu_id);
+gdprintk(XENLOG_DEBUG, "trying to inject irq=%u into %pv, when it is 
still lr_pending\n",
+ virtual_irq, v);
 #endif
 }
 
@@ -201,8 +201,8 @@ static void gic_update_one_lr(struct vcpu *v, int i)
 gic_hw_ops->write_lr(i, _val);
 }
 else
-gdprintk(XENLOG_WARNING, "unable to inject hw irq=%d into 
d%dv%d: already active in LR%d\n",
- irq, v->domain->domain_id, v->vcpu_id, i);
+gdprintk(XENLOG_WARNING, "unable to inject hw irq=%d into %pv: 
already active in LR%d\n",
+ irq, v, i);
 }
 }
 else if ( lr_val.pending )
@@ -210,8 +210,8 @@ static void gic_update_one_lr(struct vcpu *v, int i)
 int q __attribute__ ((unused)) = 
test_and_clear_bit(GIC_IRQ_GUEST_QUEUED, >status);
 #ifdef GIC_DEBUG
 if ( q )
-gdprintk(XENLOG_DEBUG, "trying to inject irq=%d into d%dv%d, when 
it is already pending in LR%d\n",
-irq, v->domain->domain_id, v->vcpu_id, i);
+gdprintk(XENLOG_DEBUG, "trying to inject irq=%d into %pv, when it 
is already pending in LR%d\n",
+irq, v, i);
 #endif
 }
 else
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index 784db71..7b039b7 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -344,7 +344,7 @@ static void vcpu_assign(struct null_private *prv, struct 
vcpu *v,
 v->processor = cpu;
 cpumask_clear_cpu(cpu, >cpus_free);
 
-dprintk(XENLOG_G_INFO, "%d <-- d%dv%d\n", cpu, v->domain->domain_id, 
v->vcpu_id);
+dprintk(XENLOG_G_INFO, "%d <-- %pv\n", cpu, v);
 
 if ( unlikely(tb_init_done) )
 {
@@ -365,7 +365,7 @@ static void vcpu_deassign(struct null_private *prv, struct 
vcpu *v,
 per_cpu(npc, cpu).vcpu = NULL;
 cpumask_set_cpu(cpu, >cpus_free);
 
-dprintk(XENLOG_G_INFO, "%d <-- NULL (d%dv%d)\n", cpu, 
v->domain->domain_id, v->vcpu_id);
+dprintk(XENLOG_G_INFO, "%d <-- NULL (%pv)\n", cpu, v);
 
 if ( unlikely(tb_init_done) )
 {
@@ -460,8 +460,7 @@ static void null_vcpu_insert(const struct scheduler *ops, 
struct vcpu *v)
  */
 spin_lock(>waitq_lock);
 list_add_tail(>waitq_elem, >waitq);
-dprintk(XENLOG_G_WARNING, "WARNING: d%dv%d not assigned to any CPU!\n",
-v->domain->domain_id, v->vcpu_id);
+dprintk(XENLOG_G_WARNING, "WARNING: %pv not assigned to any CPU!\n", 
v);
 spin_unlock(>waitq_lock);
 }
 spin_unlock_irq(lock);
@@ -649,8 +648,7 @@ static void null_vcpu_migrate(const struct scheduler *ops, 
struct vcpu *v,
 if ( list_empty(>waitq_elem) )
 {
 list_add_tail(>waitq_elem, >waitq);
-dprintk(XENLOG_G_WARNING, "WARNING: d%dv%d not assigned to any 
CPU!\n",
-v->domain->domain_id, v->vcpu_id);
+dprintk(XENLOG_G_WARNING, "WARNING: %pv not assigned to any 
CPU!\n", v);
 }
 spin_unlock(>waitq_lock);
 }
@@ -804,8 +802,7 @@ static void null_dump_pcpu(const struct scheduler *ops, int 
cpu)
 cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
 printk("core=%s", cpustr);
 if ( per_cpu(npc, cpu).vcpu != NULL )
-printk(", vcpu=d%dv%d", per_cpu(npc, cpu).vcpu->domain->domain_id,
-   per_cpu(npc, cpu).vcpu->vcpu_id);
+printk(", vcpu=%pv", per_cpu(npc, cpu).vcpu);
 printk("\n");
 
 /* current VCPU (nothing to say if that's the idle vcpu) */
@@ -870,7 +867,7 @@ static void null_dump(const struct scheduler *ops)
 printk(", ");
 if ( loop % 24 == 0 )
 printk("\n\t");
-printk("d%dv%d", nvc->vcpu->domain->domain_id, nvc->vcpu->vcpu_id);
+printk("%pv", nvc->vcpu);
 }
 printk("\n");
 spin_unlock(>waitq_lock);
-- 
2.1.4


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel