Most I/O in the kernel is done using the kernel offset mapping.
However, there is one API that uses aliased kernel address ranges:
> The final category of APIs is for I/O to deliberately aliased address
> ranges inside the kernel. Such aliases are set up by use of the
> vmap/vmalloc API. Since kernel I/O goes via physical pages, the I/O
> subsystem assumes that the user mapping and kernel offset mapping are
> the only aliases. This isn't true for vmap aliases, so anything in
> the kernel trying to do I/O to vmap areas must manually manage
> coherency. It must do this by flushing the vmap range before doing
> I/O and invalidating it after the I/O returns.
For this reason, we should use the hardware lpa instruction to load the
physical address of kernel virtual addresses in the driver code.
I believe we only use the vmap/vmalloc API with old PA 1.x processors
which don't have a sba, so we don't hit this problem.
Tested on c3750, c8000 and rp3440.
Signed-off-by: John David Anglin <dave.anglin@bell.net>
Signed-off-by: Helge Deller <deller@gmx.de>
#ifndef __PARISC_SPECIAL_INSNS_H
#define __PARISC_SPECIAL_INSNS_H
+#define lpa(va) ({ \
+ unsigned long pa; \
+ __asm__ __volatile__( \
+ "copy %%r0,%0\n\t" \
+ "lpa %%r0(%1),%0" \
+ : "=r" (pa) \
+ : "r" (va) \
+ : "memory" \
+ ); \
+ pa; \
+})
+
+#define lpa_user(va) ({ \
+ unsigned long pa; \
+ __asm__ __volatile__( \
+ "copy %%r0,%0\n\t" \
+ "lpa %%r0(%%sr3,%1),%0" \
+ : "=r" (pa) \
+ : "r" (va) \
+ : "memory" \
+ ); \
+ pa; \
+})
+
#define mfctl(reg) ({ \
unsigned long cr; \
__asm__ __volatile__( \
** "hints" parm includes the VALID bit!
** "dep" clobbers the physical address offset bits as well.
*/
- pa = virt_to_phys(vba);
+ pa = lpa(vba);
asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
((u32 *)pdir_ptr)[1] = (u32) pa;
u64 pa; /* physical address */
register unsigned ci; /* coherent index */
- pa = virt_to_phys(vba);
+ pa = lpa(vba);
pa &= IOVP_MASK;
asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));