qemu-ppc
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-ppc] [QEMU-ppc for-2.10][PATCH V4 5/6] target/ppc: Implement I


From: Suraj Jitindar Singh
Subject: Re: [Qemu-ppc] [QEMU-ppc for-2.10][PATCH V4 5/6] target/ppc: Implement ISA V3.00 radix page fault handler
Date: Wed, 19 Apr 2017 11:55:23 +1000

On Wed, 2017-04-19 at 11:46 +1000, David Gibson wrote:
> On Tue, Apr 18, 2017 at 04:45:29PM +1000, Suraj Jitindar Singh wrote:
> > 
> > On Tue, 2017-04-18 at 15:51 +1000, David Gibson wrote:
> > > 
> > > On Thu, Apr 13, 2017 at 04:02:39PM +1000, Suraj Jitindar Singh
> > wrote:
> [snip]
> > 
> > > 
> > > > 
> > > > +static bool ppc_radix64_check_prot(PowerPCCPU *cpu, int rwx,
> > > > uint64_t pte,
> > > > +                                   int *fault_cause, int
> > > > *prot)
> > > > +{
> > > > +    CPUPPCState *env = &cpu->env;
> > > > +    const int need_prot[] = { PAGE_READ, PAGE_WRITE, PAGE_EXEC
> > > > };
> > > > +
> > > > +    /* Check Page Attributes (pte58:59) */
> > > > +    if (((pte & R_PTE_ATT) == R_PTE_ATT_G) && (rwx == 2)) {
> > > I think you just want (pte & R_PTE_ATT_G) here.  I don't know
> > > what
> > > the
> > > other attribute is for, but I'm guessing its presence won't mean
> > > execution of guarded storage beconmes allowed.
> > So the four possible combinations of these two bits each map to a
> > distinct hash-like WIMG value in a rather non-obvious way. It
> > happens
> > that the R_PTE_ATT_G value is the only one which leads to the guard
> > bit
> > being set.
> > 
> > I'm not sure which combinations we can even get in tcg, but
> > accoring to
> > the ISA what I have implemented is the correct behaviour (it states
> > that this value in the PTE_ATT bits results in no-execute storage).
> Ah... right.  So now I partially understand what the "decode" stuff
> was about in the previous version.
> 
> So the logic is correct.  However it's kind of confusing to anyone
> familiar with previous MMUs, who will see a constant named ..._G and
> assume it's simply a Guard bit, independent of those around it.
> 
> <looks at the V3 architecture docs...>
> 
> I think the way to go here is to make new constants for the various
> attribute values based on the names used in the architecture, rather
> than the older WIMG names.  So, say
> 
>       R_PTE_ATT_NORMAL
>       R_PTE_ATT_SAO
>       R_PTE_ATT_IO
>       R_PTE_ATT_TOLERANT_IO

Sounds good, will do :)

> 
> If we do need a new-attribute-to-wimg conversion function at some
> point we can do that, but for now we can just use the logic from this
> patch, but translated to the new names.
> 
> > 
> > 
> > > 
> > > 
> > > > 
> > > > 
> > > > +        /* Guarded Storage */
> > > > +        *fault_cause |= SRR1_NOEXEC_GUARD;
> > > > +        return true;
> > > > +    }
> > > > +
> > > > +    /* Determine permissions allowed by Encoded Access
> > > > Authority
> > > > */
> > > > +    if ((pte & R_PTE_EAA_PRIV) && msr_pr) { /* Insufficient
> > > > Privilege */
> > > > +        *prot = 0;
> > > > +    } else if (msr_pr || (pte & R_PTE_EAA_PRIV)) {
> > > > +        *prot = ppc_radix64_get_prot_eaa(pte);
> > > > +    } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) */
> > > > +        *prot = ppc_radix64_get_prot_eaa(pte);
> > > > +        *prot &= ppc_radix64_get_prot_amr(cpu); /* Least
> > > > combined
> > > > permissions */
> > > > +    }
> > > > +
> > > > +    /* Check if requested access type is allowed */
> > > > +    if (need_prot[rwx] & ~(*prot)) { /* Page Protected for
> > > > that
> > > > Access */
> > > > +        *fault_cause |= DSISR_PROTFAULT;
> > > > +        return true;
> > > > +    }
> > > > +
> > > > +    return false;
> > > > +}
> > > > +
> > > > +static void ppc_radix64_set_rc(PowerPCCPU *cpu, int rwx,
> > > > uint64_t
> > > > pte,
> > > > +                               hwaddr pte_addr, int *prot)
> > > > +{
> > > > +    CPUState *cs = CPU(cpu);
> > > > +    uint64_t npte;
> > > > +
> > > > +    npte = pte | R_PTE_R; /* Always set reference bit */
> > > > +
> > > > +    if (rwx == 1) { /* Store/Write */
> > > > +        npte |= R_PTE_C; /* Set change bit */
> > > > +    } else {
> > > > +        /*
> > > > +         * Treat the page as read-only for now, so that a
> > > > later
> > > > write
> > > > +         * will pass through this function again to set the C
> > > > bit.
> > > > +         */
> > > > +        *prot &= ~PAGE_WRITE;
> > > > +    }
> > > > +
> > > > +    if (pte ^ npte) { /* If pte has changed then write it back
> > > > */
> > > > +        stq_phys(cs->as, pte_addr, npte);
> > > > +    }
> > > > +}
> > > > +
> > > > +static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, int
> > > > rwx,
> > > > vaddr eaddr,
> > > > +                                      uint64_t base_addr,
> > > > uint64_t
> > > > nls,
> > > > +                                      hwaddr *raddr, int
> > > > *psize,
> > > > +                                      int *fault_cause, int
> > > > *prot,
> > > > +                                      hwaddr *pte_addr)
> > > > +{
> > > > +    CPUState *cs = CPU(cpu);
> > > > +    uint64_t index, pde;
> > > > +
> > > > +    if (nls < 5) { /* Directory maps less than 2**5 entries */
> > > > +        *fault_cause |= DSISR_R_BADCONFIG;
> > > > +        return 0;
> > > > +    }
> > > > +
> > > > +    /* Read page <directory/table> entry from guest address
> > > > space
> > > > */
> > > > +    index = eaddr >> (*psize - nls); /* Shift */
> > > > +    index &= ((1UL << nls) - 1); /* Mask */
> > > > +    pde = ldq_phys(cs->as, base_addr + (index * sizeof(pde)));
> > > > +    if (!(pde & R_PTE_VALID)) { /* Invalid Entry */
> > > > +        *fault_cause |= DSISR_NOPTE;
> > > > +        return 0;
> > > > +    }
> > > > +
> > > > +    *psize -= nls;
> > > > +
> > > > +    /* Check if Leaf Entry -> Page Table Entry -> Stop the
> > > > Search
> > > > */
> > > > +    if (pde & R_PTE_LEAF) {
> > > > +        uint64_t rpn = pde & R_PTE_RPN;
> > > > +        uint64_t mask = (1UL << *psize) - 1;
> > > > +
> > > > +        if (ppc_radix64_check_prot(cpu, rwx, pde, fault_cause,
> > > > prot)) {
> > > > +            return 0; /* Protection Denied Access */
> > > > +        }
> > > > +
> > > > +        /* Or high bits of rpn and low bits to ea to form
> > > > whole
> > > > real addr */
> > > > +        *raddr = (rpn & ~mask) | (eaddr & mask);
> > > > +        *pte_addr = base_addr + (index * sizeof(pde));
> > > > +        return pde;
> > > > +    }
> > > > +
> > > > +    /* Next Level of Radix Tree */
> > > > +    return ppc_radix64_walk_tree(cpu, rwx, eaddr, pde &
> > > > R_PDE_NLB,
> > > > +                                 pde & R_PDE_NLS, raddr,
> > > > psize,
> > > > +                                 fault_cause, prot, pte_addr);
> > > > +}
> > > > +
> > > > +int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
> > > > int
> > > > rwx,
> > > > +                                 int mmu_idx)
> > > > +{
> > > > +    CPUState *cs = CPU(cpu);
> > > > +    CPUPPCState *env = &cpu->env;
> > > > +    PPCVirtualHypervisorClass *vhc =
> > > > +        PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
> > > > +    hwaddr raddr, pte_addr;
> > > > +    uint64_t lpid = 0, pid = 0, offset, size, patbe, prtbe0,
> > > > pte;
> > > > +    int page_size, prot, fault_cause = 0;
> > > > +
> > > > +    assert((rwx == 0) || (rwx == 1) || (rwx == 2));
> > > > +    assert(!msr_hv); /* For now there is no Radix PowerNV
> > > > Support
> > > > */
> > > > +    assert(cpu->vhyp);
> > > > +    assert(ppc64_use_proc_tbl(cpu));
> > > > +
> > > > +    /* Real Mode Access */
> > > > +    if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) &&
> > > > (msr_dr ==
> > > > 0))) {
> > > > +        /* In real mode top 4 effective addr bits (mostly)
> > > > ignored
> > > > */
> > > > +        raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
> > > > +
> > > > +        tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr &
> > > > TARGET_PAGE_MASK,
> > > > +                     PAGE_READ | PAGE_WRITE | PAGE_EXEC,
> > > > mmu_idx,
> > > > +                     TARGET_PAGE_SIZE);
> > > > +        return 0;
> > > > +    }
> > > > +
> > > > +    /* Virtual Mode Access - get the fully qualified address
> > > > */
> > > > +    if (!ppc_radix64_get_fully_qualified_addr(env, eaddr,
> > > > &lpid,
> > > > &pid)) {
> > > > +        ppc_radix64_raise_segi(cpu, rwx, eaddr);
> > > > +        return 1;
> > > > +    }
> > > > +
> > > > +    /* Get Process Table */
> > > > +    patbe = vhc->get_patbe(cpu->vhyp);
> > > > +
> > > > +    /* Index Process Table by PID to Find Corresponding
> > > > Process
> > > > Table Entry */
> > > > +    offset = pid * sizeof(struct prtb_entry);
> > > > +    size = 1ULL << ((patbe & PATBE1_R_PRTS) + 12);
> > > > +    if (offset >= size) {
> > > > +        /* offset exceeds size of the process table */
> > > > +        ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
> > > > +        return 1;
> > > > +    }
> > > > +    prtbe0 = ldq_phys(cs->as, (patbe & PATBE1_R_PRTB) +
> > > > offset);
> > > > +
> > > > +    /* Walk Radix Tree from Process Table Entry to Convert EA
> > > > to
> > > > RA */
> > > > +    page_size = PRTBE_R_GET_RTS(prtbe0);
> > > > +    pte = ppc_radix64_walk_tree(cpu, rwx, eaddr &
> > > > R_EADDR_MASK,
> > > > +                                prtbe0 & PRTBE_R_RPDB, prtbe0
> > > > &
> > > > PRTBE_R_RPDS,
> > > > +                                &raddr, &page_size,
> > > > &fault_cause,
> > > > &prot,
> > > > +                                &pte_addr);
> > > > +    if (!pte) {
> > > > +        ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
> > > > +        return 1;
> > > > +    }
> > > > +
> > > > +    /* Update Reference and Change Bits */
> > > > +    ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, &prot);
> > > > +
> > > > +    tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr &
> > > > TARGET_PAGE_MASK,
> > > > +                 prot, mmu_idx, 1UL << page_size);
> > > > +    return 1;
> > > > +}
> > > > diff --git a/target/ppc/mmu-radix64.h b/target/ppc/mmu-
> > > > radix64.h
> > > > new file mode 100644
> > > > index 0000000..adc9e22
> > > > --- /dev/null
> > > > +++ b/target/ppc/mmu-radix64.h
> > > > @@ -0,0 +1,68 @@
> > > > +#ifndef MMU_RADIX64_H
> > > > +#define MMU_RADIX64_H
> > > > +
> > > > +#ifndef CONFIG_USER_ONLY
> > > > +
> > > > +/* Radix Quadrants */
> > > > +#define R_EADDR_MASK            0x3FFFFFFFFFFFFFFF
> > > > +#define R_EADDR_QUADRANT        0xC000000000000000
> > > > +#define R_EADDR_QUADRANT0       0x0000000000000000
> > > > +#define R_EADDR_QUADRANT1       0x4000000000000000
> > > > +#define R_EADDR_QUADRANT2       0x8000000000000000
> > > > +#define R_EADDR_QUADRANT3       0xC000000000000000
> > > > +
> > > > +/* Radix Partition Table Entry Fields */
> > > > +#define PATBE1_R_PRTB           0x0FFFFFFFFFFFF000
> > > > +#define PATBE1_R_PRTS           0x000000000000001F
> > > > +
> > > > +/* Radix Process Table Entry Fields */
> > > > +#define PRTBE_R_GET_RTS(rts)    (((rts >> 58) & 0x18) | ((rts
> > > > >>
> > > > 5) & 0x7)) + 31
> > > > +#define PRTBE_R_RPDB            0x0FFFFFFFFFFFFF00
> > > > +#define PRTBE_R_RPDS            0x000000000000001F
> > > > +
> > > > +/* Radix Page Directory/Table Entry Fields */
> > > > +#define R_PTE_VALID             0x8000000000000000
> > > > +#define R_PTE_LEAF              0x4000000000000000
> > > > +#define R_PTE_SW0               0x2000000000000000
> > > > +#define R_PTE_RPN               0x01FFFFFFFFFFF000
> > > > +#define R_PTE_SW1               0x0000000000000E00
> > > > +#define R_GET_SW(sw)            (((sw >> 58) & 0x8) | ((sw >>
> > > > 9) &
> > > > 0x7))
> > > > +#define R_PTE_R                 0x0000000000000100
> > > > +#define R_PTE_C                 0x0000000000000080
> > > > +#define R_PTE_ATT               0x0000000000000030
> > > > +#define R_PTE_ATT_G             0x0000000000000020
> > > > +#define R_PTE_EAA_PRIV          0x0000000000000008
> > > > +#define R_PTE_EAA_R             0x0000000000000004
> > > > +#define R_PTE_EAA_RW            0x0000000000000002
> > > > +#define R_PTE_EAA_X             0x0000000000000001
> > > > +#define R_PDE_NLB               PRTBE_R_RPDB
> > > > +#define R_PDE_NLS               PRTBE_R_RPDS
> > > > +
> > > > +#ifdef TARGET_PPC64
> > > > +
> > > > +int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
> > > > int
> > > > rwx,
> > > > +                                 int mmu_idx);
> > > > +
> > > > +static inline int ppc_radix64_get_prot_eaa(uint64_t pte)
> > > > +{
> > > > +    return (pte & R_PTE_EAA_R ? PAGE_READ : 0) |
> > > > +           (pte & R_PTE_EAA_RW ? PAGE_READ | PAGE_WRITE : 0) |
> > > > +           (pte & R_PTE_EAA_X ? PAGE_EXEC : 0);
> > > > +}
> > > > +
> > > > +static inline int ppc_radix64_get_prot_amr(PowerPCCPU *cpu)
> > > > +{
> > > > +    CPUPPCState *env = &cpu->env;
> > > > +    int amr = env->spr[SPR_AMR] >> 62; /* We only care about
> > > > key0
> > > > AMR63:62 */
> > > > +    int iamr = env->spr[SPR_IAMR] >> 62; /* We only care about
> > > > key0 IAMR63:62 */
> > > > +
> > > > +    return (amr & 0x2 ? 0 : PAGE_WRITE) | /* Access denied if
> > > > bit
> > > > is set */
> > > > +           (amr & 0x1 ? 0 : PAGE_READ) |
> > > > +           (iamr & 0x1 ? 0 : PAGE_EXEC);
> > > > +}
> > > > +
> > > > +#endif /* TARGET_PPC64 */
> > > > +
> > > > +#endif /* CONFIG_USER_ONLY */
> > > > +
> > > > +#endif /* MMU_RADIX64_H */



reply via email to

[Prev in Thread] Current Thread [Next in Thread]