Tom Rini | 83d290c | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
Paul Burton | d4150ca | 2017-09-14 15:05:01 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Generic I/O functions. |
| 4 | * |
| 5 | * Copyright (c) 2016 Imagination Technologies Ltd. |
Paul Burton | d4150ca | 2017-09-14 15:05:01 -0700 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #ifndef __ASM_GENERIC_IO_H__ |
| 9 | #define __ASM_GENERIC_IO_H__ |
| 10 | |
| 11 | /* |
| 12 | * This file should be included at the end of each architecture-specific |
| 13 | * asm/io.h such that we may provide generic implementations without |
| 14 | * conflicting with architecture-specific code. |
| 15 | */ |
| 16 | |
| 17 | #ifndef __ASSEMBLY__ |
| 18 | |
| 19 | /** |
| 20 | * phys_to_virt() - Return a virtual address mapped to a given physical address |
| 21 | * @paddr: the physical address |
| 22 | * |
| 23 | * Returns a virtual address which the CPU can access that maps to the physical |
| 24 | * address @paddr. This should only be used where it is known that no dynamic |
| 25 | * mapping is required. In general, map_physmem should be used instead. |
| 26 | * |
| 27 | * Returns: a virtual address which maps to @paddr |
| 28 | */ |
| 29 | #ifndef phys_to_virt |
| 30 | static inline void *phys_to_virt(phys_addr_t paddr) |
| 31 | { |
| 32 | return (void *)(unsigned long)paddr; |
| 33 | } |
| 34 | #endif |
| 35 | |
| 36 | /** |
| 37 | * virt_to_phys() - Return the physical address that a virtual address maps to |
| 38 | * @vaddr: the virtual address |
| 39 | * |
| 40 | * Returns the physical address which the CPU-accessible virtual address @vaddr |
| 41 | * maps to. |
| 42 | * |
| 43 | * Returns: the physical address which @vaddr maps to |
| 44 | */ |
| 45 | #ifndef virt_to_phys |
| 46 | static inline phys_addr_t virt_to_phys(void *vaddr) |
| 47 | { |
| 48 | return (phys_addr_t)((unsigned long)vaddr); |
| 49 | } |
| 50 | #endif |
| 51 | |
| 52 | /* |
| 53 | * Flags for use with map_physmem() & unmap_physmem(). Architectures need not |
| 54 | * support all of these, in which case they will be defined as zero here & |
| 55 | * ignored. Callers that may run on multiple architectures should therefore |
| 56 | * treat them as hints rather than requirements. |
| 57 | */ |
| 58 | #ifndef MAP_NOCACHE |
| 59 | # define MAP_NOCACHE 0 /* Produce an uncached mapping */ |
| 60 | #endif |
| 61 | #ifndef MAP_WRCOMBINE |
| 62 | # define MAP_WRCOMBINE 0 /* Allow write-combining on the mapping */ |
| 63 | #endif |
| 64 | #ifndef MAP_WRBACK |
| 65 | # define MAP_WRBACK 0 /* Map using write-back caching */ |
| 66 | #endif |
| 67 | #ifndef MAP_WRTHROUGH |
| 68 | # define MAP_WRTHROUGH 0 /* Map using write-through caching */ |
| 69 | #endif |
| 70 | |
| 71 | /** |
| 72 | * map_physmem() - Return a virtual address mapped to a given physical address |
| 73 | * @paddr: the physical address |
| 74 | * @len: the length of the required mapping |
| 75 | * @flags: flags affecting the type of mapping |
| 76 | * |
| 77 | * Return a virtual address through which the CPU may access the memory at |
| 78 | * physical address @paddr. The mapping will be valid for at least @len bytes, |
| 79 | * and may be affected by flags passed to the @flags argument. This function |
| 80 | * may create new mappings, so should generally be paired with a matching call |
| 81 | * to unmap_physmem once the caller is finished with the memory in question. |
| 82 | * |
| 83 | * Returns: a virtual address suitably mapped to @paddr |
| 84 | */ |
| 85 | #ifndef map_physmem |
| 86 | static inline void *map_physmem(phys_addr_t paddr, unsigned long len, |
| 87 | unsigned long flags) |
| 88 | { |
| 89 | return phys_to_virt(paddr); |
| 90 | } |
| 91 | #endif |
| 92 | |
| 93 | /** |
| 94 | * unmap_physmem() - Remove mappings created by a prior call to map_physmem() |
| 95 | * @vaddr: the virtual address which map_physmem() previously returned |
| 96 | * @flags: flags matching those originally passed to map_physmem() |
| 97 | * |
| 98 | * Unmap memory which was previously mapped by a call to map_physmem(). If |
| 99 | * map_physmem() dynamically created a mapping for the memory in question then |
| 100 | * unmap_physmem() will remove that mapping. |
| 101 | */ |
| 102 | #ifndef unmap_physmem |
| 103 | static inline void unmap_physmem(void *vaddr, unsigned long flags) |
| 104 | { |
| 105 | } |
| 106 | #endif |
| 107 | |
Igor Prusov | fa34fbf | 2023-11-14 14:02:54 +0300 | [diff] [blame^] | 108 | /* |
| 109 | * __raw_{read,write}{b,w,l,q}() access memory in native endianness. |
| 110 | * |
| 111 | * On some architectures memory mapped IO needs to be accessed differently. |
| 112 | * On the simple architectures, we just read/write the memory location |
| 113 | * directly. |
| 114 | */ |
| 115 | |
| 116 | #ifndef __raw_readb |
| 117 | #define __raw_readb __raw_readb |
| 118 | static inline u8 __raw_readb(const volatile void __iomem *addr) |
| 119 | { |
| 120 | return *(const volatile u8 __force *)addr; |
| 121 | } |
| 122 | #endif |
| 123 | |
| 124 | #ifndef __raw_readw |
| 125 | #define __raw_readw __raw_readw |
| 126 | static inline u16 __raw_readw(const volatile void __iomem *addr) |
| 127 | { |
| 128 | return *(const volatile u16 __force *)addr; |
| 129 | } |
| 130 | #endif |
| 131 | |
| 132 | #ifndef __raw_readl |
| 133 | #define __raw_readl __raw_readl |
| 134 | static inline u32 __raw_readl(const volatile void __iomem *addr) |
| 135 | { |
| 136 | return *(const volatile u32 __force *)addr; |
| 137 | } |
| 138 | #endif |
| 139 | |
| 140 | #ifdef CONFIG_64BIT |
| 141 | #ifndef __raw_readq |
| 142 | #define __raw_readq __raw_readq |
| 143 | static inline u64 __raw_readq(const volatile void __iomem *addr) |
| 144 | { |
| 145 | return *(const volatile u64 __force *)addr; |
| 146 | } |
| 147 | #endif |
| 148 | #endif /* CONFIG_64BIT */ |
| 149 | |
| 150 | #ifndef __raw_writeb |
| 151 | #define __raw_writeb __raw_writeb |
| 152 | static inline void __raw_writeb(u8 value, volatile void __iomem *addr) |
| 153 | { |
| 154 | *(volatile u8 __force *)addr = value; |
| 155 | } |
| 156 | #endif |
| 157 | |
| 158 | #ifndef __raw_writew |
| 159 | #define __raw_writew __raw_writew |
| 160 | static inline void __raw_writew(u16 value, volatile void __iomem *addr) |
| 161 | { |
| 162 | *(volatile u16 __force *)addr = value; |
| 163 | } |
| 164 | #endif |
| 165 | |
| 166 | #ifndef __raw_writel |
| 167 | #define __raw_writel __raw_writel |
| 168 | static inline void __raw_writel(u32 value, volatile void __iomem *addr) |
| 169 | { |
| 170 | *(volatile u32 __force *)addr = value; |
| 171 | } |
| 172 | #endif |
| 173 | |
| 174 | #ifdef CONFIG_64BIT |
| 175 | #ifndef __raw_writeq |
| 176 | #define __raw_writeq __raw_writeq |
| 177 | static inline void __raw_writeq(u64 value, volatile void __iomem *addr) |
| 178 | { |
| 179 | *(volatile u64 __force *)addr = value; |
| 180 | } |
| 181 | #endif |
| 182 | #endif /* CONFIG_64BIT */ |
| 183 | |
| 184 | /* |
| 185 | * {read,write}s{b,w,l,q}() repeatedly access the same memory address in |
| 186 | * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times). |
| 187 | */ |
| 188 | #ifndef readsb |
| 189 | #define readsb readsb |
| 190 | static inline void readsb(const volatile void __iomem *addr, void *buffer, |
| 191 | unsigned int count) |
| 192 | { |
| 193 | if (count) { |
| 194 | u8 *buf = buffer; |
| 195 | |
| 196 | do { |
| 197 | u8 x = __raw_readb(addr); |
| 198 | *buf++ = x; |
| 199 | } while (--count); |
| 200 | } |
| 201 | } |
| 202 | #endif |
| 203 | |
| 204 | #ifndef readsw |
| 205 | #define readsw readsw |
| 206 | static inline void readsw(const volatile void __iomem *addr, void *buffer, |
| 207 | unsigned int count) |
| 208 | { |
| 209 | if (count) { |
| 210 | u16 *buf = buffer; |
| 211 | |
| 212 | do { |
| 213 | u16 x = __raw_readw(addr); |
| 214 | *buf++ = x; |
| 215 | } while (--count); |
| 216 | } |
| 217 | } |
| 218 | #endif |
| 219 | |
| 220 | #ifndef readsl |
| 221 | #define readsl readsl |
| 222 | static inline void readsl(const volatile void __iomem *addr, void *buffer, |
| 223 | unsigned int count) |
| 224 | { |
| 225 | if (count) { |
| 226 | u32 *buf = buffer; |
| 227 | |
| 228 | do { |
| 229 | u32 x = __raw_readl(addr); |
| 230 | *buf++ = x; |
| 231 | } while (--count); |
| 232 | } |
| 233 | } |
| 234 | #endif |
| 235 | |
| 236 | #ifdef CONFIG_64BIT |
| 237 | #ifndef readsq |
| 238 | #define readsq readsq |
| 239 | static inline void readsq(const volatile void __iomem *addr, void *buffer, |
| 240 | unsigned int count) |
| 241 | { |
| 242 | if (count) { |
| 243 | u64 *buf = buffer; |
| 244 | |
| 245 | do { |
| 246 | u64 x = __raw_readq(addr); |
| 247 | *buf++ = x; |
| 248 | } while (--count); |
| 249 | } |
| 250 | } |
| 251 | #endif |
| 252 | #endif /* CONFIG_64BIT */ |
| 253 | |
| 254 | #ifndef writesb |
| 255 | #define writesb writesb |
| 256 | static inline void writesb(volatile void __iomem *addr, const void *buffer, |
| 257 | unsigned int count) |
| 258 | { |
| 259 | if (count) { |
| 260 | const u8 *buf = buffer; |
| 261 | |
| 262 | do { |
| 263 | __raw_writeb(*buf++, addr); |
| 264 | } while (--count); |
| 265 | } |
| 266 | } |
| 267 | #endif |
| 268 | |
| 269 | #ifndef writesw |
| 270 | #define writesw writesw |
| 271 | static inline void writesw(volatile void __iomem *addr, const void *buffer, |
| 272 | unsigned int count) |
| 273 | { |
| 274 | if (count) { |
| 275 | const u16 *buf = buffer; |
| 276 | |
| 277 | do { |
| 278 | __raw_writew(*buf++, addr); |
| 279 | } while (--count); |
| 280 | } |
| 281 | } |
| 282 | #endif |
| 283 | |
| 284 | #ifndef writesl |
| 285 | #define writesl writesl |
| 286 | static inline void writesl(volatile void __iomem *addr, const void *buffer, |
| 287 | unsigned int count) |
| 288 | { |
| 289 | if (count) { |
| 290 | const u32 *buf = buffer; |
| 291 | |
| 292 | do { |
| 293 | __raw_writel(*buf++, addr); |
| 294 | } while (--count); |
| 295 | } |
| 296 | } |
| 297 | #endif |
| 298 | |
| 299 | #ifdef CONFIG_64BIT |
| 300 | #ifndef writesq |
| 301 | #define writesq writesq |
| 302 | static inline void writesq(volatile void __iomem *addr, const void *buffer, |
| 303 | unsigned int count) |
| 304 | { |
| 305 | if (count) { |
| 306 | const u64 *buf = buffer; |
| 307 | |
| 308 | do { |
| 309 | __raw_writeq(*buf++, addr); |
| 310 | } while (--count); |
| 311 | } |
| 312 | } |
| 313 | #endif |
| 314 | #endif /* CONFIG_64BIT */ |
| 315 | |
| 316 | #ifndef PCI_IOBASE |
| 317 | #define PCI_IOBASE ((void __iomem *)0) |
| 318 | #endif |
| 319 | |
| 320 | /* |
| 321 | * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a |
| 322 | * single I/O port multiple times. |
| 323 | */ |
| 324 | |
| 325 | #ifndef insb |
| 326 | #define insb insb |
| 327 | static inline void insb(unsigned long addr, void *buffer, unsigned int count) |
| 328 | { |
| 329 | readsb(PCI_IOBASE + addr, buffer, count); |
| 330 | } |
| 331 | #endif |
| 332 | |
| 333 | #ifndef insw |
| 334 | #define insw insw |
| 335 | static inline void insw(unsigned long addr, void *buffer, unsigned int count) |
| 336 | { |
| 337 | readsw(PCI_IOBASE + addr, buffer, count); |
| 338 | } |
| 339 | #endif |
| 340 | |
| 341 | #ifndef insl |
| 342 | #define insl insl |
| 343 | static inline void insl(unsigned long addr, void *buffer, unsigned int count) |
| 344 | { |
| 345 | readsl(PCI_IOBASE + addr, buffer, count); |
| 346 | } |
| 347 | #endif |
| 348 | |
| 349 | #ifndef outsb |
| 350 | #define outsb outsb |
| 351 | static inline void outsb(unsigned long addr, const void *buffer, |
| 352 | unsigned int count) |
| 353 | { |
| 354 | writesb(PCI_IOBASE + addr, buffer, count); |
| 355 | } |
| 356 | #endif |
| 357 | |
| 358 | #ifndef outsw |
| 359 | #define outsw outsw |
| 360 | static inline void outsw(unsigned long addr, const void *buffer, |
| 361 | unsigned int count) |
| 362 | { |
| 363 | writesw(PCI_IOBASE + addr, buffer, count); |
| 364 | } |
| 365 | #endif |
| 366 | |
| 367 | #ifndef outsl |
| 368 | #define outsl outsl |
| 369 | static inline void outsl(unsigned long addr, const void *buffer, |
| 370 | unsigned int count) |
| 371 | { |
| 372 | writesl(PCI_IOBASE + addr, buffer, count); |
| 373 | } |
| 374 | #endif |
| 375 | |
| 376 | #ifndef ioread8_rep |
| 377 | #define ioread8_rep ioread8_rep |
| 378 | static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, |
| 379 | unsigned int count) |
| 380 | { |
| 381 | readsb(addr, buffer, count); |
| 382 | } |
| 383 | #endif |
| 384 | |
| 385 | #ifndef ioread16_rep |
| 386 | #define ioread16_rep ioread16_rep |
| 387 | static inline void ioread16_rep(const volatile void __iomem *addr, |
| 388 | void *buffer, unsigned int count) |
| 389 | { |
| 390 | readsw(addr, buffer, count); |
| 391 | } |
| 392 | #endif |
| 393 | |
| 394 | #ifndef ioread32_rep |
| 395 | #define ioread32_rep ioread32_rep |
| 396 | static inline void ioread32_rep(const volatile void __iomem *addr, |
| 397 | void *buffer, unsigned int count) |
| 398 | { |
| 399 | readsl(addr, buffer, count); |
| 400 | } |
| 401 | #endif |
| 402 | |
| 403 | #ifdef CONFIG_64BIT |
| 404 | #ifndef ioread64_rep |
| 405 | #define ioread64_rep ioread64_rep |
| 406 | static inline void ioread64_rep(const volatile void __iomem *addr, |
| 407 | void *buffer, unsigned int count) |
| 408 | { |
| 409 | readsq(addr, buffer, count); |
| 410 | } |
| 411 | #endif |
| 412 | #endif /* CONFIG_64BIT */ |
| 413 | |
| 414 | #ifndef iowrite8_rep |
| 415 | #define iowrite8_rep iowrite8_rep |
| 416 | static inline void iowrite8_rep(volatile void __iomem *addr, |
| 417 | const void *buffer, |
| 418 | unsigned int count) |
| 419 | { |
| 420 | writesb(addr, buffer, count); |
| 421 | } |
| 422 | #endif |
| 423 | |
| 424 | #ifndef iowrite16_rep |
| 425 | #define iowrite16_rep iowrite16_rep |
| 426 | static inline void iowrite16_rep(volatile void __iomem *addr, |
| 427 | const void *buffer, |
| 428 | unsigned int count) |
| 429 | { |
| 430 | writesw(addr, buffer, count); |
| 431 | } |
| 432 | #endif |
| 433 | |
| 434 | #ifndef iowrite32_rep |
| 435 | #define iowrite32_rep iowrite32_rep |
| 436 | static inline void iowrite32_rep(volatile void __iomem *addr, |
| 437 | const void *buffer, |
| 438 | unsigned int count) |
| 439 | { |
| 440 | writesl(addr, buffer, count); |
| 441 | } |
| 442 | #endif |
| 443 | |
| 444 | #ifdef CONFIG_64BIT |
| 445 | #ifndef iowrite64_rep |
| 446 | #define iowrite64_rep iowrite64_rep |
| 447 | static inline void iowrite64_rep(volatile void __iomem *addr, |
| 448 | const void *buffer, |
| 449 | unsigned int count) |
| 450 | { |
| 451 | writesq(addr, buffer, count); |
| 452 | } |
| 453 | #endif |
| 454 | #endif /* CONFIG_64BIT */ |
| 455 | |
Paul Burton | d4150ca | 2017-09-14 15:05:01 -0700 | [diff] [blame] | 456 | #endif /* !__ASSEMBLY__ */ |
| 457 | #endif /* __ASM_GENERIC_IO_H__ */ |