do_[ld|st]*() and mmu_lookup*() are changed to use CPUState over
CPUArchState, moving the target-dependence to the target-facing facing
cpu_[ld|st] functions.
Signed-off-by: Anton Johansson
---
accel/tcg/cputlb.c | 324 ++---
1 file changed, 161 insertions(+), 163 deletions(-)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 20ea2e2395..ebd174e23d 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1367,11 +1367,10 @@ static void save_iotlb_data(CPUState *cs,
MemoryRegionSection *section,
#endif
}
-static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
+static uint64_t io_readx(CPUState *cpu, CPUTLBEntryFull *full,
int mmu_idx, vaddr addr, uintptr_t retaddr,
MMUAccessType access_type, MemOp op)
{
-CPUState *cpu = env_cpu(env);
hwaddr mr_offset;
MemoryRegionSection *section;
MemoryRegion *mr;
@@ -1408,11 +1407,10 @@ static uint64_t io_readx(CPUArchState *env,
CPUTLBEntryFull *full,
return val;
}
-static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
+static void io_writex(CPUState *cpu, CPUTLBEntryFull *full,
int mmu_idx, uint64_t val, vaddr addr,
uintptr_t retaddr, MemOp op)
{
-CPUState *cpu = env_cpu(env);
hwaddr mr_offset;
MemoryRegionSection *section;
MemoryRegion *mr;
@@ -1776,7 +1774,7 @@ typedef struct MMULookupLocals {
/**
* mmu_lookup1: translate one page
- * @env: cpu context
+ * @cpu: generic cpu state
* @data: lookup parameters
* @mmu_idx: virtual address context
* @access_type: load/store/code
@@ -1787,12 +1785,12 @@ typedef struct MMULookupLocals {
* tlb_fill will longjmp out. Return true if the softmmu tlb for
* @mmu_idx may have resized.
*/
-static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data,
+static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
int mmu_idx, MMUAccessType access_type, uintptr_t ra)
{
vaddr addr = data->addr;
-uintptr_t index = tlb_index(env_cpu(env), mmu_idx, addr);
-CPUTLBEntry *entry = tlb_entry(env_cpu(env), mmu_idx, addr);
+uintptr_t index = tlb_index(cpu, mmu_idx, addr);
+CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
uint64_t tlb_addr = tlb_read_idx(entry, access_type);
bool maybe_resized = false;
CPUTLBEntryFull *full;
@@ -1800,17 +1798,17 @@ static bool mmu_lookup1(CPUArchState *env,
MMULookupPageData *data,
/* If the TLB entry is for a different page, reload and try again. */
if (!tlb_hit(tlb_addr, addr)) {
-if (!victim_tlb_hit(env_cpu(env), mmu_idx, index, access_type,
+if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
addr & TARGET_PAGE_MASK)) {
-tlb_fill(env_cpu(env), addr, data->size, access_type, mmu_idx, ra);
+tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra);
maybe_resized = true;
-index = tlb_index(env_cpu(env), mmu_idx, addr);
-entry = tlb_entry(env_cpu(env), mmu_idx, addr);
+index = tlb_index(cpu, mmu_idx, addr);
+entry = tlb_entry(cpu, mmu_idx, addr);
}
tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
}
-full = _tlb(env)->d[mmu_idx].fulltlb[index];
+full = _tlb(cpu)->d[mmu_idx].fulltlb[index];
flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
flags |= full->slow_flags[access_type];
@@ -1824,7 +1822,7 @@ static bool mmu_lookup1(CPUArchState *env,
MMULookupPageData *data,
/**
* mmu_watch_or_dirty
- * @env: cpu context
+ * @cpu: generic cpu state
* @data: lookup parameters
* @access_type: load/store/code
* @ra: return address into tcg generated code, or 0
@@ -1832,7 +1830,7 @@ static bool mmu_lookup1(CPUArchState *env,
MMULookupPageData *data,
* Trigger watchpoints for @data.addr:@data.size;
* record writes to protected clean pages.
*/
-static void mmu_watch_or_dirty(CPUArchState *env, MMULookupPageData *data,
+static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data,
MMUAccessType access_type, uintptr_t ra)
{
CPUTLBEntryFull *full = data->full;
@@ -1843,13 +1841,13 @@ static void mmu_watch_or_dirty(CPUArchState *env,
MMULookupPageData *data,
/* On watchpoint hit, this will longjmp out. */
if (flags & TLB_WATCHPOINT) {
int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ;
-cpu_check_watchpoint(env_cpu(env), addr, size, full->attrs, wp, ra);
+cpu_check_watchpoint(cpu, addr, size, full->attrs, wp, ra);
flags &= ~TLB_WATCHPOINT;
}
/* Note that notdirty is only set for writes. */
if (flags & TLB_NOTDIRTY) {
-notdirty_write(env_cpu(env), addr, size, full, ra);
+notdirty_write(cpu, addr, size, full, ra);
flags &= ~TLB_NOTDIRTY;