qemu-riscv
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v4 04/57] accel/tcg: Reorg system mode load helpers


From: Peter Maydell
Subject: Re: [PATCH v4 04/57] accel/tcg: Reorg system mode load helpers
Date: Thu, 4 May 2023 16:39:12 +0100

On Wed, 3 May 2023 at 08:11, Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> Instead of trying to unify all operations on uint64_t, pull out
> mmu_lookup() to perform the basic tlb hit and resolution.
> Create individual functions to handle access by size.
>
> Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

> +/**
> + * mmu_watch_or_dirty
> + * @env: cpu context
> + * @data: lookup parameters
> + * @access_type: load/store/code
> + * @ra: return address into tcg generated code, or 0
> + *
> + * Trigger watchpoints for @data.addr:@data.size;
> + * record writes to protected clean pages.
> + */
> +static void mmu_watch_or_dirty(CPUArchState *env, MMULookupPageData *data,
> +                               MMUAccessType access_type, uintptr_t ra)
> +{
> +    CPUTLBEntryFull *full = data->full;
> +    target_ulong addr = data->addr;
> +    int flags = data->flags;
> +    int size = data->size;
> +
> +    /* On watchpoint hit, this will longjmp out.  */
> +    if (flags & TLB_WATCHPOINT) {
> +        int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ;
> +        cpu_check_watchpoint(env_cpu(env), addr, size, full->attrs, wp, ra);
> +        flags &= ~TLB_WATCHPOINT;
> +    }
> +
> +    if (flags & TLB_NOTDIRTY) {
> +        notdirty_write(env_cpu(env), addr, size, full, ra);
> +        flags &= ~TLB_NOTDIRTY;
> +    }

Maybe worth a comment that the NOTDIRTY flag is only ever
set for the addr_write TLB entry? This confused me for a bit...

> +    data->flags = flags;
> +}
> +
> +/**
> + * mmu_lookup: translate page(s)
> + * @env: cpu context
> + * @addr: virtual address
> + * @oi: combined mmu_idx and MemOp
> + * @ra: return address into tcg generated code, or 0
> + * @access_type: load/store/code
> + * @l: output result
> + *
> + * Resolve the translation for the page(s) beginning at @addr, for MemOp.size
> + * bytes.  Return true if the lookup crosses a page boundary.
> + */
> +static bool mmu_lookup(CPUArchState *env, target_ulong addr, MemOpIdx oi,
> +                       uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
> +{
> +    unsigned a_bits;
> +    bool crosspage;
> +    int flags;
> +
> +    l->memop = get_memop(oi);
> +    l->mmu_idx = get_mmuidx(oi);
> +
> +    tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
> +
> +    /* Handle CPU specific unaligned behaviour */
> +    a_bits = get_alignment_bits(l->memop);
> +    if (addr & ((1 << a_bits) - 1)) {
> +        cpu_unaligned_access(env_cpu(env), addr, type, l->mmu_idx, ra);
> +    }
> +
> +    l->page[0].addr = addr;
> +    l->page[0].size = memop_size(l->memop);
> +    l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
> +    l->page[1].size = 0;
> +    crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
> +
> +    if (likely(!crosspage)) {
> +        mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra);
> +
> +        flags = l->page[0].flags;
> +        if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
> +            mmu_watch_or_dirty(env, &l->page[0], type, ra);
> +        }
> +        if (unlikely(flags & TLB_BSWAP)) {
> +            l->memop ^= MO_BSWAP;
> +        }
> +    } else {
> +        /* Finish compute of page crossing. */
> +        int size1 = l->page[1].addr - addr;
> +        l->page[1].size = l->page[0].size - size1;
> +        l->page[0].size = size1;

Should this local variable be named "size0" ? It seems to be
the size of the access done to page[0], not the size of the
access to page[1].

> +
> +        /*
> +         * Lookup both pages, recognizing exceptions from either.  If the
> +         * second lookup potentially resized, refresh first CPUTLBEntryFull.
> +         */
> +        mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra);
> +        if (mmu_lookup1(env, &l->page[1], l->mmu_idx, type, ra)) {
> +            uintptr_t index = tlb_index(env, l->mmu_idx, addr);
> +            l->page[0].full = &env_tlb(env)->d[l->mmu_idx].fulltlb[index];
> +        }
> +
> +        flags = l->page[0].flags | l->page[1].flags;
> +        if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
> +            mmu_watch_or_dirty(env, &l->page[0], type, ra);
> +            mmu_watch_or_dirty(env, &l->page[1], type, ra);
> +        }
> +
> +        /*
> +         * Since target/sparc is the only user of TLB_BSWAP, and all
> +         * Sparc accesses are aligned, any treatment across two pages
> +         * would be arbitrary.  Refuse it until there's a use.
> +         */
> +        tcg_debug_assert((flags & TLB_BSWAP) == 0);
> +    }
> +
> +    return crosspage;
> +}


> +/**
> + * do_ld_mmio_beN:
> + * @env: cpu context
> + * @p: translation parameters
> + * @ret_be: accumulated data
> + * @mmu_idx: virtual address context
> + * @ra: return address into tcg generated code, or 0
> + *
> + * Load @p->size bytes from @p->addr, which is memory-mapped i/o.
> + * The bytes are concatenated with in big-endian order with @ret_be.

"in big-endian order with"

> + */
> +static uint64_t do_ld_mmio_beN(CPUArchState *env, MMULookupPageData *p,
> +                               uint64_t ret_be, int mmu_idx,
> +                               MMUAccessType type, uintptr_t ra)
>  {
> -    validate_memop(oi, MO_UB);
> -    return load_helper(env, addr, oi, retaddr, MO_UB, MMU_DATA_LOAD,
> -                       full_ldub_mmu);
> +    CPUTLBEntryFull *full = p->full;
> +    target_ulong addr = p->addr;
> +    int i, size = p->size;
> +
> +    QEMU_IOTHREAD_LOCK_GUARD();
> +    for (i = 0; i < size; i++) {
> +        uint8_t x = io_readx(env, full, mmu_idx, addr + i, ra, type, MO_UB);
> +        ret_be = (ret_be << 8) | x;
> +    }
> +    return ret_be;
> +}

Otherwise
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>

thanks
-- PMM



reply via email to

[Prev in Thread] Current Thread [Next in Thread]