mips: octeon: Add misc remaining header files
Import misc remaining header files from 2013 U-Boot. These will be used by the later added drivers to support PCIe and networking on the MIPS Octeon II / III platforms. Signed-off-by: Aaron Williams <awilliams@marvell.com> Signed-off-by: Stefan Roese <sr@denx.de> Cc: Aaron Williams <awilliams@marvell.com> Cc: Chandrakala Chavva <cchavva@marvell.com> Cc: Daniel Schwierzeck <daniel.schwierzeck@gmail.com>
This commit is contained in:
		
							parent
							
								
									deb8b23bc0
								
							
						
					
					
						commit
						fe3334d0a3
					
				|  | @ -0,0 +1,209 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  * | ||||
|  * Typedefs and defines for working with Octeon physical addresses. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __CVMX_ADDRESS_H__ | ||||
| #define __CVMX_ADDRESS_H__ | ||||
| 
 | ||||
| typedef enum { | ||||
| 	CVMX_MIPS_SPACE_XKSEG = 3LL, | ||||
| 	CVMX_MIPS_SPACE_XKPHYS = 2LL, | ||||
| 	CVMX_MIPS_SPACE_XSSEG = 1LL, | ||||
| 	CVMX_MIPS_SPACE_XUSEG = 0LL | ||||
| } cvmx_mips_space_t; | ||||
| 
 | ||||
| typedef enum { | ||||
| 	CVMX_MIPS_XKSEG_SPACE_KSEG0 = 0LL, | ||||
| 	CVMX_MIPS_XKSEG_SPACE_KSEG1 = 1LL, | ||||
| 	CVMX_MIPS_XKSEG_SPACE_SSEG = 2LL, | ||||
| 	CVMX_MIPS_XKSEG_SPACE_KSEG3 = 3LL | ||||
| } cvmx_mips_xkseg_space_t; | ||||
| 
 | ||||
| /* decodes <14:13> of a kseg3 window address */ | ||||
| typedef enum { | ||||
| 	CVMX_ADD_WIN_SCR = 0L, | ||||
| 	CVMX_ADD_WIN_DMA = 1L, | ||||
| 	CVMX_ADD_WIN_UNUSED = 2L, | ||||
| 	CVMX_ADD_WIN_UNUSED2 = 3L | ||||
| } cvmx_add_win_dec_t; | ||||
| 
 | ||||
| /* decode within DMA space */ | ||||
| typedef enum { | ||||
| 	CVMX_ADD_WIN_DMA_ADD = 0L, | ||||
| 	CVMX_ADD_WIN_DMA_SENDMEM = 1L, | ||||
| 	/* store data must be normal DRAM memory space address in this case */ | ||||
| 	CVMX_ADD_WIN_DMA_SENDDMA = 2L, | ||||
| 	/* see CVMX_ADD_WIN_DMA_SEND_DEC for data contents */ | ||||
| 	CVMX_ADD_WIN_DMA_SENDIO = 3L, | ||||
| 	/* store data must be normal IO space address in this case */ | ||||
| 	CVMX_ADD_WIN_DMA_SENDSINGLE = 4L, | ||||
| 	/* no write buffer data needed/used */ | ||||
| } cvmx_add_win_dma_dec_t; | ||||
| 
 | ||||
| /**
 | ||||
|  *   Physical Address Decode | ||||
|  * | ||||
|  * Octeon-I HW never interprets this X (<39:36> reserved | ||||
|  * for future expansion), software should set to 0. | ||||
|  * | ||||
|  *  - 0x0 XXX0 0000 0000 to      DRAM         Cached | ||||
|  *  - 0x0 XXX0 0FFF FFFF | ||||
|  * | ||||
|  *  - 0x0 XXX0 1000 0000 to      Boot Bus     Uncached  (Converted to 0x1 00X0 1000 0000 | ||||
|  *  - 0x0 XXX0 1FFF FFFF         + EJTAG                           to 0x1 00X0 1FFF FFFF) | ||||
|  * | ||||
|  *  - 0x0 XXX0 2000 0000 to      DRAM         Cached | ||||
|  *  - 0x0 XXXF FFFF FFFF | ||||
|  * | ||||
|  *  - 0x1 00X0 0000 0000 to      Boot Bus     Uncached | ||||
|  *  - 0x1 00XF FFFF FFFF | ||||
|  * | ||||
|  *  - 0x1 01X0 0000 0000 to      Other NCB    Uncached | ||||
|  *  - 0x1 FFXF FFFF FFFF         devices | ||||
|  * | ||||
|  * Decode of all Octeon addresses | ||||
|  */ | ||||
| typedef union { | ||||
| 	u64 u64; | ||||
| 	struct { | ||||
| 		cvmx_mips_space_t R : 2; | ||||
| 		u64 offset : 62; | ||||
| 	} sva; | ||||
| 
 | ||||
| 	struct { | ||||
| 		u64 zeroes : 33; | ||||
| 		u64 offset : 31; | ||||
| 	} suseg; | ||||
| 
 | ||||
| 	struct { | ||||
| 		u64 ones : 33; | ||||
| 		cvmx_mips_xkseg_space_t sp : 2; | ||||
| 		u64 offset : 29; | ||||
| 	} sxkseg; | ||||
| 
 | ||||
| 	struct { | ||||
| 		cvmx_mips_space_t R : 2; | ||||
| 		u64 cca : 3; | ||||
| 		u64 mbz : 10; | ||||
| 		u64 pa : 49; | ||||
| 	} sxkphys; | ||||
| 
 | ||||
| 	struct { | ||||
| 		u64 mbz : 15; | ||||
| 		u64 is_io : 1; | ||||
| 		u64 did : 8; | ||||
| 		u64 unaddr : 4; | ||||
| 		u64 offset : 36; | ||||
| 	} sphys; | ||||
| 
 | ||||
| 	struct { | ||||
| 		u64 zeroes : 24; | ||||
| 		u64 unaddr : 4; | ||||
| 		u64 offset : 36; | ||||
| 	} smem; | ||||
| 
 | ||||
| 	struct { | ||||
| 		u64 mem_region : 2; | ||||
| 		u64 mbz : 13; | ||||
| 		u64 is_io : 1; | ||||
| 		u64 did : 8; | ||||
| 		u64 unaddr : 4; | ||||
| 		u64 offset : 36; | ||||
| 	} sio; | ||||
| 
 | ||||
| 	struct { | ||||
| 		u64 ones : 49; | ||||
| 		cvmx_add_win_dec_t csrdec : 2; | ||||
| 		u64 addr : 13; | ||||
| 	} sscr; | ||||
| 
 | ||||
| 	/* there should only be stores to IOBDMA space, no loads */ | ||||
| 	struct { | ||||
| 		u64 ones : 49; | ||||
| 		cvmx_add_win_dec_t csrdec : 2; | ||||
| 		u64 unused2 : 3; | ||||
| 		cvmx_add_win_dma_dec_t type : 3; | ||||
| 		u64 addr : 7; | ||||
| 	} sdma; | ||||
| 
 | ||||
| 	struct { | ||||
| 		u64 didspace : 24; | ||||
| 		u64 unused : 40; | ||||
| 	} sfilldidspace; | ||||
| } cvmx_addr_t; | ||||
| 
 | ||||
| /* These macros for used by 32 bit applications */ | ||||
| 
 | ||||
| #define CVMX_MIPS32_SPACE_KSEG0	     1l | ||||
| #define CVMX_ADD_SEG32(segment, add) (((s32)segment << 31) | (s32)(add)) | ||||
| 
 | ||||
| /*
 | ||||
|  * Currently all IOs are performed using XKPHYS addressing. Linux uses the | ||||
|  * CvmMemCtl register to enable XKPHYS addressing to IO space from user mode. | ||||
|  * Future OSes may need to change the upper bits of IO addresses. The | ||||
|  * following define controls the upper two bits for all IO addresses generated | ||||
|  * by the simple executive library | ||||
|  */ | ||||
| #define CVMX_IO_SEG CVMX_MIPS_SPACE_XKPHYS | ||||
| 
 | ||||
| /* These macros simplify the process of creating common IO addresses */ | ||||
| #define CVMX_ADD_SEG(segment, add) ((((u64)segment) << 62) | (add)) | ||||
| 
 | ||||
| #define CVMX_ADD_IO_SEG(add) (add) | ||||
| 
 | ||||
| #define CVMX_ADDR_DIDSPACE(did)	   (((CVMX_IO_SEG) << 22) | ((1ULL) << 8) | (did)) | ||||
| #define CVMX_ADDR_DID(did)	   (CVMX_ADDR_DIDSPACE(did) << 40) | ||||
| #define CVMX_FULL_DID(did, subdid) (((did) << 3) | (subdid)) | ||||
| 
 | ||||
| /* from include/ncb_rsl_id.v */ | ||||
| #define CVMX_OCT_DID_MIS  0ULL /* misc stuff */ | ||||
| #define CVMX_OCT_DID_GMX0 1ULL | ||||
| #define CVMX_OCT_DID_GMX1 2ULL | ||||
| #define CVMX_OCT_DID_PCI  3ULL | ||||
| #define CVMX_OCT_DID_KEY  4ULL | ||||
| #define CVMX_OCT_DID_FPA  5ULL | ||||
| #define CVMX_OCT_DID_DFA  6ULL | ||||
| #define CVMX_OCT_DID_ZIP  7ULL | ||||
| #define CVMX_OCT_DID_RNG  8ULL | ||||
| #define CVMX_OCT_DID_IPD  9ULL | ||||
| #define CVMX_OCT_DID_PKT  10ULL | ||||
| #define CVMX_OCT_DID_TIM  11ULL | ||||
| #define CVMX_OCT_DID_TAG  12ULL | ||||
| /* the rest are not on the IO bus */ | ||||
| #define CVMX_OCT_DID_L2C  16ULL | ||||
| #define CVMX_OCT_DID_LMC  17ULL | ||||
| #define CVMX_OCT_DID_SPX0 18ULL | ||||
| #define CVMX_OCT_DID_SPX1 19ULL | ||||
| #define CVMX_OCT_DID_PIP  20ULL | ||||
| #define CVMX_OCT_DID_ASX0 22ULL | ||||
| #define CVMX_OCT_DID_ASX1 23ULL | ||||
| #define CVMX_OCT_DID_IOB  30ULL | ||||
| 
 | ||||
| #define CVMX_OCT_DID_PKT_SEND	 CVMX_FULL_DID(CVMX_OCT_DID_PKT, 2ULL) | ||||
| #define CVMX_OCT_DID_TAG_SWTAG	 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 0ULL) | ||||
| #define CVMX_OCT_DID_TAG_TAG1	 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 1ULL) | ||||
| #define CVMX_OCT_DID_TAG_TAG2	 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 2ULL) | ||||
| #define CVMX_OCT_DID_TAG_TAG3	 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 3ULL) | ||||
| #define CVMX_OCT_DID_TAG_NULL_RD CVMX_FULL_DID(CVMX_OCT_DID_TAG, 4ULL) | ||||
| #define CVMX_OCT_DID_TAG_TAG5	 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 5ULL) | ||||
| #define CVMX_OCT_DID_TAG_CSR	 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 7ULL) | ||||
| #define CVMX_OCT_DID_FAU_FAI	 CVMX_FULL_DID(CVMX_OCT_DID_IOB, 0ULL) | ||||
| #define CVMX_OCT_DID_TIM_CSR	 CVMX_FULL_DID(CVMX_OCT_DID_TIM, 0ULL) | ||||
| #define CVMX_OCT_DID_KEY_RW	 CVMX_FULL_DID(CVMX_OCT_DID_KEY, 0ULL) | ||||
| #define CVMX_OCT_DID_PCI_6	 CVMX_FULL_DID(CVMX_OCT_DID_PCI, 6ULL) | ||||
| #define CVMX_OCT_DID_MIS_BOO	 CVMX_FULL_DID(CVMX_OCT_DID_MIS, 0ULL) | ||||
| #define CVMX_OCT_DID_PCI_RML	 CVMX_FULL_DID(CVMX_OCT_DID_PCI, 0ULL) | ||||
| #define CVMX_OCT_DID_IPD_CSR	 CVMX_FULL_DID(CVMX_OCT_DID_IPD, 7ULL) | ||||
| #define CVMX_OCT_DID_DFA_CSR	 CVMX_FULL_DID(CVMX_OCT_DID_DFA, 7ULL) | ||||
| #define CVMX_OCT_DID_MIS_CSR	 CVMX_FULL_DID(CVMX_OCT_DID_MIS, 7ULL) | ||||
| #define CVMX_OCT_DID_ZIP_CSR	 CVMX_FULL_DID(CVMX_OCT_DID_ZIP, 0ULL) | ||||
| 
 | ||||
| /* Cast to unsigned long long, mainly for use in printfs. */ | ||||
| #define CAST_ULL(v) ((unsigned long long)(v)) | ||||
| 
 | ||||
| #define UNMAPPED_PTR(x) ((1ULL << 63) | (x)) | ||||
| 
 | ||||
| #endif /* __CVMX_ADDRESS_H__ */ | ||||
|  | @ -0,0 +1,441 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  * | ||||
|  * Support functions for managing command queues used for | ||||
|  * various hardware blocks. | ||||
|  * | ||||
|  * The common command queue infrastructure abstracts out the | ||||
|  * software necessary for adding to Octeon's chained queue | ||||
|  * structures. These structures are used for commands to the | ||||
|  * PKO, ZIP, DFA, RAID, HNA, and DMA engine blocks. Although each | ||||
|  * hardware unit takes commands and CSRs of different types, | ||||
|  * they all use basic linked command buffers to store the | ||||
|  * pending request. In general, users of the CVMX API don't | ||||
|  * call cvmx-cmd-queue functions directly. Instead the hardware | ||||
|  * unit specific wrapper should be used. The wrappers perform | ||||
|  * unit specific validation and CSR writes to submit the | ||||
|  * commands. | ||||
|  * | ||||
|  * Even though most software will never directly interact with | ||||
|  * cvmx-cmd-queue, knowledge of its internal workings can help | ||||
|  * in diagnosing performance problems and help with debugging. | ||||
|  * | ||||
|  * Command queue pointers are stored in a global named block | ||||
|  * called "cvmx_cmd_queues". Except for the PKO queues, each | ||||
|  * hardware queue is stored in its own cache line to reduce SMP | ||||
|  * contention on spin locks. The PKO queues are stored such that | ||||
|  * every 16th queue is next to each other in memory. This scheme | ||||
|  * allows for queues being in separate cache lines when there | ||||
|  * are low number of queues per port. With 16 queues per port, | ||||
|  * the first queue for each port is in the same cache area. The | ||||
|  * second queues for each port are in another area, etc. This | ||||
|  * allows software to implement very efficient lockless PKO with | ||||
|  * 16 queues per port using a minimum of cache lines per core. | ||||
|  * All queues for a given core will be isolated in the same | ||||
|  * cache area. | ||||
|  * | ||||
|  * In addition to the memory pointer layout, cvmx-cmd-queue | ||||
|  * provides an optimized fair ll/sc locking mechanism for the | ||||
|  * queues. The lock uses a "ticket / now serving" model to | ||||
|  * maintain fair order on contended locks. In addition, it uses | ||||
|  * predicted locking time to limit cache contention. When a core | ||||
|  * know it must wait in line for a lock, it spins on the | ||||
|  * internal cycle counter to completely eliminate any causes of | ||||
|  * bus traffic. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __CVMX_CMD_QUEUE_H__ | ||||
| #define __CVMX_CMD_QUEUE_H__ | ||||
| 
 | ||||
| /**
 | ||||
|  * By default we disable the max depth support. Most programs | ||||
|  * don't use it and it slows down the command queue processing | ||||
|  * significantly. | ||||
|  */ | ||||
| #ifndef CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH | ||||
| #define CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH 0 | ||||
| #endif | ||||
| 
 | ||||
| /**
 | ||||
|  * Enumeration representing all hardware blocks that use command | ||||
|  * queues. Each hardware block has up to 65536 sub identifiers for | ||||
|  * multiple command queues. Not all chips support all hardware | ||||
|  * units. | ||||
|  */ | ||||
| typedef enum { | ||||
| 	CVMX_CMD_QUEUE_PKO_BASE = 0x00000, | ||||
| #define CVMX_CMD_QUEUE_PKO(queue)                                                                  \ | ||||
| 	((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_PKO_BASE + (0xffff & (queue)))) | ||||
| 	CVMX_CMD_QUEUE_ZIP = 0x10000, | ||||
| #define CVMX_CMD_QUEUE_ZIP_QUE(queue)                                                              \ | ||||
| 	((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_ZIP + (0xffff & (queue)))) | ||||
| 	CVMX_CMD_QUEUE_DFA = 0x20000, | ||||
| 	CVMX_CMD_QUEUE_RAID = 0x30000, | ||||
| 	CVMX_CMD_QUEUE_DMA_BASE = 0x40000, | ||||
| #define CVMX_CMD_QUEUE_DMA(queue)                                                                  \ | ||||
| 	((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_DMA_BASE + (0xffff & (queue)))) | ||||
| 	CVMX_CMD_QUEUE_BCH = 0x50000, | ||||
| #define CVMX_CMD_QUEUE_BCH(queue) ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_BCH + (0xffff & (queue)))) | ||||
| 	CVMX_CMD_QUEUE_HNA = 0x60000, | ||||
| 	CVMX_CMD_QUEUE_END = 0x70000, | ||||
| } cvmx_cmd_queue_id_t; | ||||
| 
 | ||||
| #define CVMX_CMD_QUEUE_ZIP3_QUE(node, queue)                                                       \ | ||||
| 	((cvmx_cmd_queue_id_t)((node) << 24 | CVMX_CMD_QUEUE_ZIP | (0xffff & (queue)))) | ||||
| 
 | ||||
| /**
 | ||||
|  * Command write operations can fail if the command queue needs | ||||
|  * a new buffer and the associated FPA pool is empty. It can also | ||||
|  * fail if the number of queued command words reaches the maximum | ||||
|  * set at initialization. | ||||
|  */ | ||||
| typedef enum { | ||||
| 	CVMX_CMD_QUEUE_SUCCESS = 0, | ||||
| 	CVMX_CMD_QUEUE_NO_MEMORY = -1, | ||||
| 	CVMX_CMD_QUEUE_FULL = -2, | ||||
| 	CVMX_CMD_QUEUE_INVALID_PARAM = -3, | ||||
| 	CVMX_CMD_QUEUE_ALREADY_SETUP = -4, | ||||
| } cvmx_cmd_queue_result_t; | ||||
| 
 | ||||
| typedef struct { | ||||
| 	/* First 64-bit word: */ | ||||
| 	u64 fpa_pool : 16; | ||||
| 	u64 base_paddr : 48; | ||||
| 	s32 index; | ||||
| 	u16 max_depth; | ||||
| 	u16 pool_size_m1; | ||||
| } __cvmx_cmd_queue_state_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * command-queue locking uses a fair ticket spinlock algo, | ||||
|  * with 64-bit tickets for endianness-neutrality and | ||||
|  * counter overflow protection. | ||||
|  * Lock is free when both counters are of equal value. | ||||
|  */ | ||||
| typedef struct { | ||||
| 	u64 ticket; | ||||
| 	u64 now_serving; | ||||
| } __cvmx_cmd_queue_lock_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * This structure contains the global state of all command queues. | ||||
|  * It is stored in a bootmem named block and shared by all | ||||
|  * applications running on Octeon. Tickets are stored in a different | ||||
|  * cache line that queue information to reduce the contention on the | ||||
|  * ll/sc used to get a ticket. If this is not the case, the update | ||||
|  * of queue state causes the ll/sc to fail quite often. | ||||
|  */ | ||||
| typedef struct { | ||||
| 	__cvmx_cmd_queue_lock_t lock[(CVMX_CMD_QUEUE_END >> 16) * 256]; | ||||
| 	__cvmx_cmd_queue_state_t state[(CVMX_CMD_QUEUE_END >> 16) * 256]; | ||||
| } __cvmx_cmd_queue_all_state_t; | ||||
| 
 | ||||
| extern __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptrs[CVMX_MAX_NODES]; | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * Internal function to handle the corner cases | ||||
|  * of adding command words to a queue when the current | ||||
|  * block is getting full. | ||||
|  */ | ||||
| cvmx_cmd_queue_result_t __cvmx_cmd_queue_write_raw(cvmx_cmd_queue_id_t queue_id, | ||||
| 						   __cvmx_cmd_queue_state_t *qptr, int cmd_count, | ||||
| 						   const u64 *cmds); | ||||
| 
 | ||||
| /**
 | ||||
|  * Initialize a command queue for use. The initial FPA buffer is | ||||
|  * allocated and the hardware unit is configured to point to the | ||||
|  * new command queue. | ||||
|  * | ||||
|  * @param queue_id  Hardware command queue to initialize. | ||||
|  * @param max_depth Maximum outstanding commands that can be queued. | ||||
|  * @param fpa_pool  FPA pool the command queues should come from. | ||||
|  * @param pool_size Size of each buffer in the FPA pool (bytes) | ||||
|  * | ||||
|  * @return CVMX_CMD_QUEUE_SUCCESS or a failure code | ||||
|  */ | ||||
| cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id, int max_depth, | ||||
| 						  int fpa_pool, int pool_size); | ||||
| 
 | ||||
| /**
 | ||||
|  * Shutdown a queue a free it's command buffers to the FPA. The | ||||
|  * hardware connected to the queue must be stopped before this | ||||
|  * function is called. | ||||
|  * | ||||
|  * @param queue_id Queue to shutdown | ||||
|  * | ||||
|  * @return CVMX_CMD_QUEUE_SUCCESS or a failure code | ||||
|  */ | ||||
| cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id); | ||||
| 
 | ||||
| /**
 | ||||
|  * Return the number of command words pending in the queue. This | ||||
|  * function may be relatively slow for some hardware units. | ||||
|  * | ||||
|  * @param queue_id Hardware command queue to query | ||||
|  * | ||||
|  * @return Number of outstanding commands | ||||
|  */ | ||||
| int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id); | ||||
| 
 | ||||
| /**
 | ||||
|  * Return the command buffer to be written to. The purpose of this | ||||
|  * function is to allow CVMX routine access to the low level buffer | ||||
|  * for initial hardware setup. User applications should not call this | ||||
|  * function directly. | ||||
|  * | ||||
|  * @param queue_id Command queue to query | ||||
|  * | ||||
|  * @return Command buffer or NULL on failure | ||||
|  */ | ||||
| void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id); | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * Retrieve or allocate command queue state named block | ||||
|  */ | ||||
| cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(unsigned int node); | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * Get the index into the state arrays for the supplied queue id. | ||||
|  * | ||||
|  * @param queue_id Queue ID to get an index for | ||||
|  * | ||||
|  * @return Index into the state arrays | ||||
|  */ | ||||
| static inline unsigned int __cvmx_cmd_queue_get_index(cvmx_cmd_queue_id_t queue_id) | ||||
| { | ||||
| 	/* Warning: This code currently only works with devices that have 256
 | ||||
| 	 * queues or less.  Devices with more than 16 queues are laid out in | ||||
| 	 * memory to allow cores quick access to every 16th queue. This reduces | ||||
| 	 * cache thrashing when you are running 16 queues per port to support | ||||
| 	 * lockless operation | ||||
| 	 */ | ||||
| 	unsigned int unit = (queue_id >> 16) & 0xff; | ||||
| 	unsigned int q = (queue_id >> 4) & 0xf; | ||||
| 	unsigned int core = queue_id & 0xf; | ||||
| 
 | ||||
| 	return (unit << 8) | (core << 4) | q; | ||||
| } | ||||
| 
 | ||||
| static inline int __cvmx_cmd_queue_get_node(cvmx_cmd_queue_id_t queue_id) | ||||
| { | ||||
| 	unsigned int node = queue_id >> 24; | ||||
| 	return node; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * Lock the supplied queue so nobody else is updating it at the same | ||||
|  * time as us. | ||||
|  * | ||||
|  * @param queue_id Queue ID to lock | ||||
|  * | ||||
|  */ | ||||
| static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id) | ||||
| { | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * Unlock the queue, flushing all writes. | ||||
|  * | ||||
|  * @param queue_id Queue ID to lock | ||||
|  * | ||||
|  */ | ||||
| static inline void __cvmx_cmd_queue_unlock(cvmx_cmd_queue_id_t queue_id) | ||||
| { | ||||
| 	CVMX_SYNCWS; /* nudge out the unlock. */ | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * Initialize a command-queue lock to "unlocked" state. | ||||
|  */ | ||||
| static inline void __cvmx_cmd_queue_lock_init(cvmx_cmd_queue_id_t queue_id) | ||||
| { | ||||
| 	unsigned int index = __cvmx_cmd_queue_get_index(queue_id); | ||||
| 	unsigned int node = __cvmx_cmd_queue_get_node(queue_id); | ||||
| 
 | ||||
| 	__cvmx_cmd_queue_state_ptrs[node]->lock[index] = (__cvmx_cmd_queue_lock_t){ 0, 0 }; | ||||
| 	CVMX_SYNCWS; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * Get the queue state structure for the given queue id | ||||
|  * | ||||
|  * @param queue_id Queue id to get | ||||
|  * | ||||
|  * @return Queue structure or NULL on failure | ||||
|  */ | ||||
| static inline __cvmx_cmd_queue_state_t *__cvmx_cmd_queue_get_state(cvmx_cmd_queue_id_t queue_id) | ||||
| { | ||||
| 	unsigned int index; | ||||
| 	unsigned int node; | ||||
| 	__cvmx_cmd_queue_state_t *qptr; | ||||
| 
 | ||||
| 	node = __cvmx_cmd_queue_get_node(queue_id); | ||||
| 	index = __cvmx_cmd_queue_get_index(queue_id); | ||||
| 
 | ||||
| 	if (cvmx_unlikely(!__cvmx_cmd_queue_state_ptrs[node])) | ||||
| 		__cvmx_cmd_queue_init_state_ptr(node); | ||||
| 
 | ||||
| 	qptr = &__cvmx_cmd_queue_state_ptrs[node]->state[index]; | ||||
| 	return qptr; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Write an arbitrary number of command words to a command queue. | ||||
|  * This is a generic function; the fixed number of command word | ||||
|  * functions yield higher performance. | ||||
|  * | ||||
|  * @param queue_id  Hardware command queue to write to | ||||
|  * @param use_locking | ||||
|  *                  Use internal locking to ensure exclusive access for queue | ||||
|  *                  updates. If you don't use this locking you must ensure | ||||
|  *                  exclusivity some other way. Locking is strongly recommended. | ||||
|  * @param cmd_count Number of command words to write | ||||
|  * @param cmds      Array of commands to write | ||||
|  * | ||||
|  * @return CVMX_CMD_QUEUE_SUCCESS or a failure code | ||||
|  */ | ||||
| static inline cvmx_cmd_queue_result_t | ||||
| cvmx_cmd_queue_write(cvmx_cmd_queue_id_t queue_id, bool use_locking, int cmd_count, const u64 *cmds) | ||||
| { | ||||
| 	cvmx_cmd_queue_result_t ret = CVMX_CMD_QUEUE_SUCCESS; | ||||
| 	u64 *cmd_ptr; | ||||
| 
 | ||||
| 	__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id); | ||||
| 
 | ||||
| 	/* Make sure nobody else is updating the same queue */ | ||||
| 	if (cvmx_likely(use_locking)) | ||||
| 		__cvmx_cmd_queue_lock(queue_id); | ||||
| 
 | ||||
| 	/* Most of the time there is lots of free words in current block */ | ||||
| 	if (cvmx_unlikely((qptr->index + cmd_count) >= qptr->pool_size_m1)) { | ||||
| 		/* The rare case when nearing end of block */ | ||||
| 		ret = __cvmx_cmd_queue_write_raw(queue_id, qptr, cmd_count, cmds); | ||||
| 	} else { | ||||
| 		cmd_ptr = (u64 *)cvmx_phys_to_ptr((u64)qptr->base_paddr); | ||||
| 		/* Loop easy for compiler to unroll for the likely case */ | ||||
| 		while (cmd_count > 0) { | ||||
| 			cmd_ptr[qptr->index++] = *cmds++; | ||||
| 			cmd_count--; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	/* All updates are complete. Release the lock and return */ | ||||
| 	if (cvmx_likely(use_locking)) | ||||
| 		__cvmx_cmd_queue_unlock(queue_id); | ||||
| 	else | ||||
| 		CVMX_SYNCWS; | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Simple function to write two command words to a command queue. | ||||
|  * | ||||
|  * @param queue_id Hardware command queue to write to | ||||
|  * @param use_locking | ||||
|  *                 Use internal locking to ensure exclusive access for queue | ||||
|  *                 updates. If you don't use this locking you must ensure | ||||
|  *                 exclusivity some other way. Locking is strongly recommended. | ||||
|  * @param cmd1     Command | ||||
|  * @param cmd2     Command | ||||
|  * | ||||
|  * @return CVMX_CMD_QUEUE_SUCCESS or a failure code | ||||
|  */ | ||||
| static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t queue_id, | ||||
| 							    bool use_locking, u64 cmd1, u64 cmd2) | ||||
| { | ||||
| 	cvmx_cmd_queue_result_t ret = CVMX_CMD_QUEUE_SUCCESS; | ||||
| 	u64 *cmd_ptr; | ||||
| 
 | ||||
| 	__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id); | ||||
| 
 | ||||
| 	/* Make sure nobody else is updating the same queue */ | ||||
| 	if (cvmx_likely(use_locking)) | ||||
| 		__cvmx_cmd_queue_lock(queue_id); | ||||
| 
 | ||||
| 	if (cvmx_unlikely((qptr->index + 2) >= qptr->pool_size_m1)) { | ||||
| 		/* The rare case when nearing end of block */ | ||||
| 		u64 cmds[2]; | ||||
| 
 | ||||
| 		cmds[0] = cmd1; | ||||
| 		cmds[1] = cmd2; | ||||
| 		ret = __cvmx_cmd_queue_write_raw(queue_id, qptr, 2, cmds); | ||||
| 	} else { | ||||
| 		/* Likely case to work fast */ | ||||
| 		cmd_ptr = (u64 *)cvmx_phys_to_ptr((u64)qptr->base_paddr); | ||||
| 		cmd_ptr += qptr->index; | ||||
| 		qptr->index += 2; | ||||
| 		cmd_ptr[0] = cmd1; | ||||
| 		cmd_ptr[1] = cmd2; | ||||
| 	} | ||||
| 
 | ||||
| 	/* All updates are complete. Release the lock and return */ | ||||
| 	if (cvmx_likely(use_locking)) | ||||
| 		__cvmx_cmd_queue_unlock(queue_id); | ||||
| 	else | ||||
| 		CVMX_SYNCWS; | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Simple function to write three command words to a command queue. | ||||
|  * | ||||
|  * @param queue_id Hardware command queue to write to | ||||
|  * @param use_locking | ||||
|  *                 Use internal locking to ensure exclusive access for queue | ||||
|  *                 updates. If you don't use this locking you must ensure | ||||
|  *                 exclusivity some other way. Locking is strongly recommended. | ||||
|  * @param cmd1     Command | ||||
|  * @param cmd2     Command | ||||
|  * @param cmd3     Command | ||||
|  * | ||||
|  * @return CVMX_CMD_QUEUE_SUCCESS or a failure code | ||||
|  */ | ||||
| static inline cvmx_cmd_queue_result_t | ||||
| cvmx_cmd_queue_write3(cvmx_cmd_queue_id_t queue_id, bool use_locking, u64 cmd1, u64 cmd2, u64 cmd3) | ||||
| { | ||||
| 	cvmx_cmd_queue_result_t ret = CVMX_CMD_QUEUE_SUCCESS; | ||||
| 	__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id); | ||||
| 	u64 *cmd_ptr; | ||||
| 
 | ||||
| 	/* Make sure nobody else is updating the same queue */ | ||||
| 	if (cvmx_likely(use_locking)) | ||||
| 		__cvmx_cmd_queue_lock(queue_id); | ||||
| 
 | ||||
| 	if (cvmx_unlikely((qptr->index + 3) >= qptr->pool_size_m1)) { | ||||
| 		/* Most of the time there is lots of free words in current block */ | ||||
| 		u64 cmds[3]; | ||||
| 
 | ||||
| 		cmds[0] = cmd1; | ||||
| 		cmds[1] = cmd2; | ||||
| 		cmds[2] = cmd3; | ||||
| 		ret = __cvmx_cmd_queue_write_raw(queue_id, qptr, 3, cmds); | ||||
| 	} else { | ||||
| 		cmd_ptr = (u64 *)cvmx_phys_to_ptr((u64)qptr->base_paddr); | ||||
| 		cmd_ptr += qptr->index; | ||||
| 		qptr->index += 3; | ||||
| 		cmd_ptr[0] = cmd1; | ||||
| 		cmd_ptr[1] = cmd2; | ||||
| 		cmd_ptr[2] = cmd3; | ||||
| 	} | ||||
| 
 | ||||
| 	/* All updates are complete. Release the lock and return */ | ||||
| 	if (cvmx_likely(use_locking)) | ||||
| 		__cvmx_cmd_queue_unlock(queue_id); | ||||
| 	else | ||||
| 		CVMX_SYNCWS; | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| #endif /* __CVMX_CMD_QUEUE_H__ */ | ||||
|  | @ -0,0 +1,87 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  * | ||||
|  * Definitions for enumerations used with Octeon CSRs. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __CVMX_CSR_ENUMS_H__ | ||||
| #define __CVMX_CSR_ENUMS_H__ | ||||
| 
 | ||||
| typedef enum { | ||||
| 	CVMX_IPD_OPC_MODE_STT = 0LL, | ||||
| 	CVMX_IPD_OPC_MODE_STF = 1LL, | ||||
| 	CVMX_IPD_OPC_MODE_STF1_STT = 2LL, | ||||
| 	CVMX_IPD_OPC_MODE_STF2_STT = 3LL | ||||
| } cvmx_ipd_mode_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * Enumeration representing the amount of packet processing | ||||
|  * and validation performed by the input hardware. | ||||
|  */ | ||||
| typedef enum { | ||||
| 	CVMX_PIP_PORT_CFG_MODE_NONE = 0ull, | ||||
| 	CVMX_PIP_PORT_CFG_MODE_SKIPL2 = 1ull, | ||||
| 	CVMX_PIP_PORT_CFG_MODE_SKIPIP = 2ull | ||||
| } cvmx_pip_port_parse_mode_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * This enumeration controls how a QoS watcher matches a packet. | ||||
|  * | ||||
|  * @deprecated  This enumeration was used with cvmx_pip_config_watcher which has | ||||
|  *              been deprecated. | ||||
|  */ | ||||
| typedef enum { | ||||
| 	CVMX_PIP_QOS_WATCH_DISABLE = 0ull, | ||||
| 	CVMX_PIP_QOS_WATCH_PROTNH = 1ull, | ||||
| 	CVMX_PIP_QOS_WATCH_TCP = 2ull, | ||||
| 	CVMX_PIP_QOS_WATCH_UDP = 3ull | ||||
| } cvmx_pip_qos_watch_types; | ||||
| 
 | ||||
| /**
 | ||||
|  * This enumeration is used in PIP tag config to control how | ||||
|  * POW tags are generated by the hardware. | ||||
|  */ | ||||
| typedef enum { | ||||
| 	CVMX_PIP_TAG_MODE_TUPLE = 0ull, | ||||
| 	CVMX_PIP_TAG_MODE_MASK = 1ull, | ||||
| 	CVMX_PIP_TAG_MODE_IP_OR_MASK = 2ull, | ||||
| 	CVMX_PIP_TAG_MODE_TUPLE_XOR_MASK = 3ull | ||||
| } cvmx_pip_tag_mode_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * Tag type definitions | ||||
|  */ | ||||
| typedef enum { | ||||
| 	CVMX_POW_TAG_TYPE_ORDERED = 0L, | ||||
| 	CVMX_POW_TAG_TYPE_ATOMIC = 1L, | ||||
| 	CVMX_POW_TAG_TYPE_NULL = 2L, | ||||
| 	CVMX_POW_TAG_TYPE_NULL_NULL = 3L | ||||
| } cvmx_pow_tag_type_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * LCR bits 0 and 1 control the number of bits per character. See the following table for encodings: | ||||
|  * | ||||
|  * - 00 = 5 bits (bits 0-4 sent) | ||||
|  * - 01 = 6 bits (bits 0-5 sent) | ||||
|  * - 10 = 7 bits (bits 0-6 sent) | ||||
|  * - 11 = 8 bits (all bits sent) | ||||
|  */ | ||||
| typedef enum { | ||||
| 	CVMX_UART_BITS5 = 0, | ||||
| 	CVMX_UART_BITS6 = 1, | ||||
| 	CVMX_UART_BITS7 = 2, | ||||
| 	CVMX_UART_BITS8 = 3 | ||||
| } cvmx_uart_bits_t; | ||||
| 
 | ||||
| typedef enum { | ||||
| 	CVMX_UART_IID_NONE = 1, | ||||
| 	CVMX_UART_IID_RX_ERROR = 6, | ||||
| 	CVMX_UART_IID_RX_DATA = 4, | ||||
| 	CVMX_UART_IID_RX_TIMEOUT = 12, | ||||
| 	CVMX_UART_IID_TX_EMPTY = 2, | ||||
| 	CVMX_UART_IID_MODEM = 0, | ||||
| 	CVMX_UART_IID_BUSY = 7 | ||||
| } cvmx_uart_iid_t; | ||||
| 
 | ||||
| #endif /* __CVMX_CSR_ENUMS_H__ */ | ||||
|  | @ -0,0 +1,78 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  * | ||||
|  * Configuration and status register (CSR) address and type definitions for | ||||
|  * Octoen. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __CVMX_CSR_H__ | ||||
| #define __CVMX_CSR_H__ | ||||
| 
 | ||||
| #include "cvmx-csr-enums.h" | ||||
| #include "cvmx-pip-defs.h" | ||||
| 
 | ||||
| typedef cvmx_pip_prt_cfgx_t cvmx_pip_port_cfg_t; | ||||
| 
 | ||||
| /* The CSRs for bootbus region zero used to be independent of the
 | ||||
|     other 1-7. As of SDK 1.7.0 these were combined. These macros | ||||
|     are for backwards compactability */ | ||||
| #define CVMX_MIO_BOOT_REG_CFG0 CVMX_MIO_BOOT_REG_CFGX(0) | ||||
| #define CVMX_MIO_BOOT_REG_TIM0 CVMX_MIO_BOOT_REG_TIMX(0) | ||||
| 
 | ||||
| /* The CN3XXX and CN58XX chips used to not have a LMC number
 | ||||
|     passed to the address macros. These are here to supply backwards | ||||
|     compatibility with old code. Code should really use the new addresses | ||||
|     with bus arguments for support on other chips */ | ||||
| #define CVMX_LMC_BIST_CTL	  CVMX_LMCX_BIST_CTL(0) | ||||
| #define CVMX_LMC_BIST_RESULT	  CVMX_LMCX_BIST_RESULT(0) | ||||
| #define CVMX_LMC_COMP_CTL	  CVMX_LMCX_COMP_CTL(0) | ||||
| #define CVMX_LMC_CTL		  CVMX_LMCX_CTL(0) | ||||
| #define CVMX_LMC_CTL1		  CVMX_LMCX_CTL1(0) | ||||
| #define CVMX_LMC_DCLK_CNT_HI	  CVMX_LMCX_DCLK_CNT_HI(0) | ||||
| #define CVMX_LMC_DCLK_CNT_LO	  CVMX_LMCX_DCLK_CNT_LO(0) | ||||
| #define CVMX_LMC_DCLK_CTL	  CVMX_LMCX_DCLK_CTL(0) | ||||
| #define CVMX_LMC_DDR2_CTL	  CVMX_LMCX_DDR2_CTL(0) | ||||
| #define CVMX_LMC_DELAY_CFG	  CVMX_LMCX_DELAY_CFG(0) | ||||
| #define CVMX_LMC_DLL_CTL	  CVMX_LMCX_DLL_CTL(0) | ||||
| #define CVMX_LMC_DUAL_MEMCFG	  CVMX_LMCX_DUAL_MEMCFG(0) | ||||
| #define CVMX_LMC_ECC_SYND	  CVMX_LMCX_ECC_SYND(0) | ||||
| #define CVMX_LMC_FADR		  CVMX_LMCX_FADR(0) | ||||
| #define CVMX_LMC_IFB_CNT_HI	  CVMX_LMCX_IFB_CNT_HI(0) | ||||
| #define CVMX_LMC_IFB_CNT_LO	  CVMX_LMCX_IFB_CNT_LO(0) | ||||
| #define CVMX_LMC_MEM_CFG0	  CVMX_LMCX_MEM_CFG0(0) | ||||
| #define CVMX_LMC_MEM_CFG1	  CVMX_LMCX_MEM_CFG1(0) | ||||
| #define CVMX_LMC_OPS_CNT_HI	  CVMX_LMCX_OPS_CNT_HI(0) | ||||
| #define CVMX_LMC_OPS_CNT_LO	  CVMX_LMCX_OPS_CNT_LO(0) | ||||
| #define CVMX_LMC_PLL_BWCTL	  CVMX_LMCX_PLL_BWCTL(0) | ||||
| #define CVMX_LMC_PLL_CTL	  CVMX_LMCX_PLL_CTL(0) | ||||
| #define CVMX_LMC_PLL_STATUS	  CVMX_LMCX_PLL_STATUS(0) | ||||
| #define CVMX_LMC_READ_LEVEL_CTL	  CVMX_LMCX_READ_LEVEL_CTL(0) | ||||
| #define CVMX_LMC_READ_LEVEL_DBG	  CVMX_LMCX_READ_LEVEL_DBG(0) | ||||
| #define CVMX_LMC_READ_LEVEL_RANKX CVMX_LMCX_READ_LEVEL_RANKX(0) | ||||
| #define CVMX_LMC_RODT_COMP_CTL	  CVMX_LMCX_RODT_COMP_CTL(0) | ||||
| #define CVMX_LMC_RODT_CTL	  CVMX_LMCX_RODT_CTL(0) | ||||
| #define CVMX_LMC_WODT_CTL	  CVMX_LMCX_WODT_CTL0(0) | ||||
| #define CVMX_LMC_WODT_CTL0	  CVMX_LMCX_WODT_CTL0(0) | ||||
| #define CVMX_LMC_WODT_CTL1	  CVMX_LMCX_WODT_CTL1(0) | ||||
| 
 | ||||
| /* The CN3XXX and CN58XX chips used to not have a TWSI bus number
 | ||||
|     passed to the address macros. These are here to supply backwards | ||||
|     compatibility with old code. Code should really use the new addresses | ||||
|     with bus arguments for support on other chips */ | ||||
| #define CVMX_MIO_TWS_INT	 CVMX_MIO_TWSX_INT(0) | ||||
| #define CVMX_MIO_TWS_SW_TWSI	 CVMX_MIO_TWSX_SW_TWSI(0) | ||||
| #define CVMX_MIO_TWS_SW_TWSI_EXT CVMX_MIO_TWSX_SW_TWSI_EXT(0) | ||||
| #define CVMX_MIO_TWS_TWSI_SW	 CVMX_MIO_TWSX_TWSI_SW(0) | ||||
| 
 | ||||
| /* The CN3XXX and CN58XX chips used to not have a SMI/MDIO bus number
 | ||||
|     passed to the address macros. These are here to supply backwards | ||||
|     compatibility with old code. Code should really use the new addresses | ||||
|     with bus arguments for support on other chips */ | ||||
| #define CVMX_SMI_CLK	CVMX_SMIX_CLK(0) | ||||
| #define CVMX_SMI_CMD	CVMX_SMIX_CMD(0) | ||||
| #define CVMX_SMI_EN	CVMX_SMIX_EN(0) | ||||
| #define CVMX_SMI_RD_DAT CVMX_SMIX_RD_DAT(0) | ||||
| #define CVMX_SMI_WR_DAT CVMX_SMIX_WR_DAT(0) | ||||
| 
 | ||||
| #endif /* __CVMX_CSR_H__ */ | ||||
|  | @ -0,0 +1,456 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  * | ||||
|  * Interface to the Octeon extended error status. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __CVMX_ERROR_H__ | ||||
| #define __CVMX_ERROR_H__ | ||||
| 
 | ||||
| /**
 | ||||
|  * There are generally many error status bits associated with a | ||||
|  * single logical group. The enumeration below is used to | ||||
|  * communicate high level groups to the error infastructure so | ||||
|  * error status bits can be enable or disabled in large groups. | ||||
|  */ | ||||
| typedef enum { | ||||
| 	CVMX_ERROR_GROUP_INTERNAL, | ||||
| 	CVMX_ERROR_GROUP_L2C, | ||||
| 	CVMX_ERROR_GROUP_ETHERNET, | ||||
| 	CVMX_ERROR_GROUP_MGMT_PORT, | ||||
| 	CVMX_ERROR_GROUP_PCI, | ||||
| 	CVMX_ERROR_GROUP_SRIO, | ||||
| 	CVMX_ERROR_GROUP_USB, | ||||
| 	CVMX_ERROR_GROUP_LMC, | ||||
| 	CVMX_ERROR_GROUP_ILK, | ||||
| 	CVMX_ERROR_GROUP_DFM, | ||||
| 	CVMX_ERROR_GROUP_ILA, | ||||
| } cvmx_error_group_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * Flags representing special handling for some error registers. | ||||
|  * These flags are passed to cvmx_error_initialize() to control | ||||
|  * the handling of bits where the same flags were passed to the | ||||
|  * added cvmx_error_info_t. | ||||
|  */ | ||||
| typedef enum { | ||||
| 	CVMX_ERROR_TYPE_NONE = 0, | ||||
| 	CVMX_ERROR_TYPE_SBE = 1 << 0, | ||||
| 	CVMX_ERROR_TYPE_DBE = 1 << 1, | ||||
| } cvmx_error_type_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * When registering for interest in an error status register, the | ||||
|  * type of the register needs to be known by cvmx-error. Most | ||||
|  * registers are either IO64 or IO32, but some blocks contain | ||||
|  * registers that can't be directly accessed. A good example of | ||||
|  * would be PCIe extended error state stored in config space. | ||||
|  */ | ||||
| typedef enum { | ||||
| 	__CVMX_ERROR_REGISTER_NONE, | ||||
| 	CVMX_ERROR_REGISTER_IO64, | ||||
| 	CVMX_ERROR_REGISTER_IO32, | ||||
| 	CVMX_ERROR_REGISTER_PCICONFIG, | ||||
| 	CVMX_ERROR_REGISTER_SRIOMAINT, | ||||
| } cvmx_error_register_t; | ||||
| 
 | ||||
| struct cvmx_error_info; | ||||
| /**
 | ||||
|  * Error handling functions must have the following prototype. | ||||
|  */ | ||||
| typedef int (*cvmx_error_func_t)(const struct cvmx_error_info *info); | ||||
| 
 | ||||
| /**
 | ||||
|  * This structure is passed to all error handling functions. | ||||
|  */ | ||||
| typedef struct cvmx_error_info { | ||||
| 	cvmx_error_register_t reg_type; | ||||
| 	u64 status_addr; | ||||
| 	u64 status_mask; | ||||
| 	u64 enable_addr; | ||||
| 	u64 enable_mask; | ||||
| 	cvmx_error_type_t flags; | ||||
| 	cvmx_error_group_t group; | ||||
| 	int group_index; | ||||
| 	cvmx_error_func_t func; | ||||
| 	u64 user_info; | ||||
| 	struct { | ||||
| 		cvmx_error_register_t reg_type; | ||||
| 		u64 status_addr; | ||||
| 		u64 status_mask; | ||||
| 	} parent; | ||||
| } cvmx_error_info_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * Initialize the error status system. This should be called once | ||||
|  * before any other functions are called. This function adds default | ||||
|  * handlers for most all error events but does not enable them. Later | ||||
|  * calls to cvmx_error_enable() are needed. | ||||
|  * | ||||
|  * @param flags  Optional flags. | ||||
|  * | ||||
|  * @return Zero on success, negative on failure. | ||||
|  */ | ||||
| int cvmx_error_initialize(void); | ||||
| 
 | ||||
| /**
 | ||||
|  * Poll the error status registers and call the appropriate error | ||||
|  * handlers. This should be called in the RSL interrupt handler | ||||
|  * for your application or operating system. | ||||
|  * | ||||
|  * @return Number of error handlers called. Zero means this call | ||||
|  *         found no errors and was spurious. | ||||
|  */ | ||||
| int cvmx_error_poll(void); | ||||
| 
 | ||||
| /**
 | ||||
|  * Register to be called when an error status bit is set. Most users | ||||
|  * will not need to call this function as cvmx_error_initialize() | ||||
|  * registers default handlers for most error conditions. This function | ||||
|  * is normally used to add more handlers without changing the existing | ||||
|  * handlers. | ||||
|  * | ||||
|  * @param new_info Information about the handler for a error register. The | ||||
|  *                 structure passed is copied and can be destroyed after the | ||||
|  *                 call. All members of the structure must be populated, even the | ||||
|  *                 parent information. | ||||
|  * | ||||
|  * @return Zero on success, negative on failure. | ||||
|  */ | ||||
| int cvmx_error_add(const cvmx_error_info_t *new_info); | ||||
| 
 | ||||
| /**
 | ||||
|  * Remove all handlers for a status register and mask. Normally | ||||
|  * this function should not be called. Instead a new handler should be | ||||
|  * installed to replace the existing handler. In the even that all | ||||
|  * reporting of a error bit should be removed, then use this | ||||
|  * function. | ||||
|  * | ||||
|  * @param reg_type Type of the status register to remove | ||||
|  * @param status_addr | ||||
|  *                 Status register to remove. | ||||
|  * @param status_mask | ||||
|  *                 All handlers for this status register with this mask will be | ||||
|  *                 removed. | ||||
|  * @param old_info If not NULL, this is filled with information about the handler | ||||
|  *                 that was removed. | ||||
|  * | ||||
|  * @return Zero on success, negative on failure (not found). | ||||
|  */ | ||||
| int cvmx_error_remove(cvmx_error_register_t reg_type, u64 status_addr, u64 status_mask, | ||||
| 		      cvmx_error_info_t *old_info); | ||||
| 
 | ||||
| /**
 | ||||
|  * Change the function and user_info for an existing error status | ||||
|  * register. This function should be used to replace the default | ||||
|  * handler with an application specific version as needed. | ||||
|  * | ||||
|  * @param reg_type Type of the status register to change | ||||
|  * @param status_addr | ||||
|  *                 Status register to change. | ||||
|  * @param status_mask | ||||
|  *                 All handlers for this status register with this mask will be | ||||
|  *                 changed. | ||||
|  * @param new_func New function to use to handle the error status | ||||
|  * @param new_user_info | ||||
|  *                 New user info parameter for the function | ||||
|  * @param old_func If not NULL, the old function is returned. Useful for restoring | ||||
|  *                 the old handler. | ||||
|  * @param old_user_info | ||||
|  *                 If not NULL, the old user info parameter. | ||||
|  * | ||||
|  * @return Zero on success, negative on failure | ||||
|  */ | ||||
| int cvmx_error_change_handler(cvmx_error_register_t reg_type, u64 status_addr, u64 status_mask, | ||||
| 			      cvmx_error_func_t new_func, u64 new_user_info, | ||||
| 			      cvmx_error_func_t *old_func, u64 *old_user_info); | ||||
| 
 | ||||
| /**
 | ||||
|  * Enable all error registers for a logical group. This should be | ||||
|  * called whenever a logical group is brought online. | ||||
|  * | ||||
|  * @param group  Logical group to enable | ||||
|  * @param group_index | ||||
|  *               Index for the group as defined in the cvmx_error_group_t | ||||
|  *               comments. | ||||
|  * | ||||
|  * @return Zero on success, negative on failure. | ||||
|  */ | ||||
| /*
 | ||||
|  * Rather than conditionalize the calls throughout the executive to not enable | ||||
|  * interrupts in Uboot, simply make the enable function do nothing | ||||
|  */ | ||||
| static inline int cvmx_error_enable_group(cvmx_error_group_t group, int group_index) | ||||
| { | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Disable all error registers for a logical group. This should be | ||||
|  * called whenever a logical group is brought offline. Many blocks | ||||
|  * will report spurious errors when offline unless this function | ||||
|  * is called. | ||||
|  * | ||||
|  * @param group  Logical group to disable | ||||
|  * @param group_index | ||||
|  *               Index for the group as defined in the cvmx_error_group_t | ||||
|  *               comments. | ||||
|  * | ||||
|  * @return Zero on success, negative on failure. | ||||
|  */ | ||||
| /*
 | ||||
|  * Rather than conditionalize the calls throughout the executive to not disable | ||||
|  * interrupts in Uboot, simply make the enable function do nothing | ||||
|  */ | ||||
| static inline int cvmx_error_disable_group(cvmx_error_group_t group, int group_index) | ||||
| { | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Enable all handlers for a specific status register mask. | ||||
|  * | ||||
|  * @param reg_type Type of the status register | ||||
|  * @param status_addr | ||||
|  *                 Status register address | ||||
|  * @param status_mask | ||||
|  *                 All handlers for this status register with this mask will be | ||||
|  *                 enabled. | ||||
|  * | ||||
|  * @return Zero on success, negative on failure. | ||||
|  */ | ||||
| int cvmx_error_enable(cvmx_error_register_t reg_type, u64 status_addr, u64 status_mask); | ||||
| 
 | ||||
| /**
 | ||||
|  * Disable all handlers for a specific status register and mask. | ||||
|  * | ||||
|  * @param reg_type Type of the status register | ||||
|  * @param status_addr | ||||
|  *                 Status register address | ||||
|  * @param status_mask | ||||
|  *                 All handlers for this status register with this mask will be | ||||
|  *                 disabled. | ||||
|  * | ||||
|  * @return Zero on success, negative on failure. | ||||
|  */ | ||||
| int cvmx_error_disable(cvmx_error_register_t reg_type, u64 status_addr, u64 status_mask); | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * Function for processing non leaf error status registers. This function | ||||
|  * calls all handlers for this passed register and all children linked | ||||
|  * to it. | ||||
|  * | ||||
|  * @param info   Error register to check | ||||
|  * | ||||
|  * @return Number of error status bits found or zero if no bits were set. | ||||
|  */ | ||||
| int __cvmx_error_decode(const cvmx_error_info_t *info); | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * This error bit handler simply prints a message and clears the status bit | ||||
|  * | ||||
|  * @param info   Error register to check | ||||
|  * | ||||
|  * @return | ||||
|  */ | ||||
| int __cvmx_error_display(const cvmx_error_info_t *info); | ||||
| 
 | ||||
| /**
 | ||||
|  * Find the handler for a specific status register and mask | ||||
|  * | ||||
|  * @param status_addr | ||||
|  *                Status register address | ||||
|  * | ||||
|  * @return  Return the handler on success or null on failure. | ||||
|  */ | ||||
| cvmx_error_info_t *cvmx_error_get_index(u64 status_addr); | ||||
| 
 | ||||
| void __cvmx_install_gmx_error_handler_for_xaui(void); | ||||
| 
 | ||||
| /**
 | ||||
|  * 78xx related | ||||
|  */ | ||||
| /**
 | ||||
|  * Compare two INTSN values. | ||||
|  * | ||||
|  * @param key INTSN value to search for | ||||
|  * @param data current entry from the searched array | ||||
|  * | ||||
|  * @return Negative, 0 or positive when respectively key is less than, | ||||
|  *		equal or greater than data. | ||||
|  */ | ||||
| int cvmx_error_intsn_cmp(const void *key, const void *data); | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * | ||||
|  * @param intsn   Interrupt source number to display | ||||
|  * | ||||
|  * @param node Node number | ||||
|  * | ||||
|  * @return Zero on success, -1 on error | ||||
|  */ | ||||
| int cvmx_error_intsn_display_v3(int node, u32 intsn); | ||||
| 
 | ||||
| /**
 | ||||
|  * Initialize the error status system for cn78xx. This should be called once | ||||
|  * before any other functions are called. This function enables the interrupts | ||||
|  * described in the array. | ||||
|  * | ||||
|  * @param node Node number | ||||
|  * | ||||
|  * @return Zero on success, negative on failure. | ||||
|  */ | ||||
| int cvmx_error_initialize_cn78xx(int node); | ||||
| 
 | ||||
| /**
 | ||||
|  * Enable interrupt for a specific INTSN. | ||||
|  * | ||||
|  * @param node Node number | ||||
|  * @param intsn Interrupt source number | ||||
|  * | ||||
|  * @return Zero on success, negative on failure. | ||||
|  */ | ||||
| int cvmx_error_intsn_enable_v3(int node, u32 intsn); | ||||
| 
 | ||||
| /**
 | ||||
|  * Disable interrupt for a specific INTSN. | ||||
|  * | ||||
|  * @param node Node number | ||||
|  * @param intsn Interrupt source number | ||||
|  * | ||||
|  * @return Zero on success, negative on failure. | ||||
|  */ | ||||
| int cvmx_error_intsn_disable_v3(int node, u32 intsn); | ||||
| 
 | ||||
| /**
 | ||||
|  * Clear interrupt for a specific INTSN. | ||||
|  * | ||||
|  * @param intsn Interrupt source number | ||||
|  * | ||||
|  * @return Zero on success, negative on failure. | ||||
|  */ | ||||
| int cvmx_error_intsn_clear_v3(int node, u32 intsn); | ||||
| 
 | ||||
| /**
 | ||||
|  * Enable interrupts for a specific CSR(all the bits/intsn in the csr). | ||||
|  * | ||||
|  * @param node Node number | ||||
|  * @param csr_address CSR address | ||||
|  * | ||||
|  * @return Zero on success, negative on failure. | ||||
|  */ | ||||
| int cvmx_error_csr_enable_v3(int node, u64 csr_address); | ||||
| 
 | ||||
| /**
 | ||||
|  * Disable interrupts for a specific CSR (all the bits/intsn in the csr). | ||||
|  * | ||||
|  * @param node Node number | ||||
|  * @param csr_address CSR address | ||||
|  * | ||||
|  * @return Zero | ||||
|  */ | ||||
| int cvmx_error_csr_disable_v3(int node, u64 csr_address); | ||||
| 
 | ||||
| /**
 | ||||
|  * Enable all error registers for a logical group. This should be | ||||
|  * called whenever a logical group is brought online. | ||||
|  * | ||||
|  * @param group  Logical group to enable | ||||
|  * @param xipd_port  The IPD port value | ||||
|  * | ||||
|  * @return Zero. | ||||
|  */ | ||||
| int cvmx_error_enable_group_v3(cvmx_error_group_t group, int xipd_port); | ||||
| 
 | ||||
| /**
 | ||||
|  * Disable all error registers for a logical group. | ||||
|  * | ||||
|  * @param group  Logical group to enable | ||||
|  * @param xipd_port  The IPD port value | ||||
|  * | ||||
|  * @return Zero. | ||||
|  */ | ||||
| int cvmx_error_disable_group_v3(cvmx_error_group_t group, int xipd_port); | ||||
| 
 | ||||
| /**
 | ||||
|  * Enable all error registers for a specific category in a logical group. | ||||
|  * This should be called whenever a logical group is brought online. | ||||
|  * | ||||
|  * @param group  Logical group to enable | ||||
|  * @param type   Category in a logical group to enable | ||||
|  * @param xipd_port  The IPD port value | ||||
|  * | ||||
|  * @return Zero. | ||||
|  */ | ||||
| int cvmx_error_enable_group_type_v3(cvmx_error_group_t group, cvmx_error_type_t type, | ||||
| 				    int xipd_port); | ||||
| 
 | ||||
| /**
 | ||||
|  * Disable all error registers for a specific category in a logical group. | ||||
|  * This should be called whenever a logical group is brought online. | ||||
|  * | ||||
|  * @param group  Logical group to disable | ||||
|  * @param type   Category in a logical group to disable | ||||
|  * @param xipd_port  The IPD port value | ||||
|  * | ||||
|  * @return Zero. | ||||
|  */ | ||||
| int cvmx_error_disable_group_type_v3(cvmx_error_group_t group, cvmx_error_type_t type, | ||||
| 				     int xipd_port); | ||||
| 
 | ||||
| /**
 | ||||
|  * Clear all error registers for a logical group. | ||||
|  * | ||||
|  * @param group  Logical group to disable | ||||
|  * @param xipd_port  The IPD port value | ||||
|  * | ||||
|  * @return Zero. | ||||
|  */ | ||||
| int cvmx_error_clear_group_v3(cvmx_error_group_t group, int xipd_port); | ||||
| 
 | ||||
| /**
 | ||||
|  * Enable all error registers for a particular category. | ||||
|  * | ||||
|  * @param node  CCPI node | ||||
|  * @param type  category to enable | ||||
|  * | ||||
|  *@return Zero. | ||||
|  */ | ||||
| int cvmx_error_enable_type_v3(int node, cvmx_error_type_t type); | ||||
| 
 | ||||
| /**
 | ||||
|  * Disable all error registers for a particular category. | ||||
|  * | ||||
|  * @param node  CCPI node | ||||
|  * @param type  category to disable | ||||
|  * | ||||
|  *@return Zero. | ||||
|  */ | ||||
| int cvmx_error_disable_type_v3(int node, cvmx_error_type_t type); | ||||
| 
 | ||||
| void cvmx_octeon_hang(void) __attribute__((__noreturn__)); | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * | ||||
|  * Process L2C single and multi-bit ECC errors | ||||
|  * | ||||
|  */ | ||||
| int __cvmx_cn7xxx_l2c_l2d_ecc_error_display(int node, int intsn); | ||||
| 
 | ||||
| /**
 | ||||
|  * Handle L2 cache TAG ECC errors and noway errors | ||||
|  * | ||||
|  * @param	CCPI node | ||||
|  * @param	intsn	intsn from error array. | ||||
|  * @param	remote	true for remote node (cn78xx only) | ||||
|  * | ||||
|  * @return	1 if handled, 0 if not handled | ||||
|  */ | ||||
| int __cvmx_cn7xxx_l2c_tag_error_display(int node, int intsn, bool remote); | ||||
| 
 | ||||
| #endif | ||||
|  | @ -0,0 +1,217 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  * | ||||
|  * Interface to the hardware Free Pool Allocator. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __CVMX_FPA_H__ | ||||
| #define __CVMX_FPA_H__ | ||||
| 
 | ||||
| #include "cvmx-scratch.h" | ||||
| #include "cvmx-fpa-defs.h" | ||||
| #include "cvmx-fpa1.h" | ||||
| #include "cvmx-fpa3.h" | ||||
| 
 | ||||
| #define CVMX_FPA_MIN_BLOCK_SIZE 128 | ||||
| #define CVMX_FPA_ALIGNMENT	128 | ||||
| #define CVMX_FPA_POOL_NAME_LEN	16 | ||||
| 
 | ||||
| /* On CN78XX in backward-compatible mode, pool is mapped to AURA */ | ||||
| #define CVMX_FPA_NUM_POOLS                                                                         \ | ||||
| 	(octeon_has_feature(OCTEON_FEATURE_FPA3) ? cvmx_fpa3_num_auras() : CVMX_FPA1_NUM_POOLS) | ||||
| 
 | ||||
| /**
 | ||||
|  * Structure to store FPA pool configuration parameters. | ||||
|  */ | ||||
| struct cvmx_fpa_pool_config { | ||||
| 	s64 pool_num; | ||||
| 	u64 buffer_size; | ||||
| 	u64 buffer_count; | ||||
| }; | ||||
| 
 | ||||
| typedef struct cvmx_fpa_pool_config cvmx_fpa_pool_config_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * Return the name of the pool | ||||
|  * | ||||
|  * @param pool_num   Pool to get the name of | ||||
|  * @return The name | ||||
|  */ | ||||
| const char *cvmx_fpa_get_name(int pool_num); | ||||
| 
 | ||||
| /**
 | ||||
|  * Initialize FPA per node | ||||
|  */ | ||||
| int cvmx_fpa_global_init_node(int node); | ||||
| 
 | ||||
| /**
 | ||||
|  * Enable the FPA | ||||
|  */ | ||||
| static inline void cvmx_fpa_enable(void) | ||||
| { | ||||
| 	if (!octeon_has_feature(OCTEON_FEATURE_FPA3)) | ||||
| 		cvmx_fpa1_enable(); | ||||
| 	else | ||||
| 		cvmx_fpa_global_init_node(cvmx_get_node_num()); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Disable the FPA | ||||
|  */ | ||||
| static inline void cvmx_fpa_disable(void) | ||||
| { | ||||
| 	if (!octeon_has_feature(OCTEON_FEATURE_FPA3)) | ||||
| 		cvmx_fpa1_disable(); | ||||
| 	/* FPA3 does not have a disable function */ | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * @deprecated OBSOLETE | ||||
|  * | ||||
|  * Kept for transition assistance only | ||||
|  */ | ||||
| static inline void cvmx_fpa_global_initialize(void) | ||||
| { | ||||
| 	cvmx_fpa_global_init_node(cvmx_get_node_num()); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * | ||||
|  * Convert FPA1 style POOL into FPA3 AURA in | ||||
|  * backward compatibility mode. | ||||
|  */ | ||||
| static inline cvmx_fpa3_gaura_t cvmx_fpa1_pool_to_fpa3_aura(cvmx_fpa1_pool_t pool) | ||||
| { | ||||
| 	if ((octeon_has_feature(OCTEON_FEATURE_FPA3))) { | ||||
| 		unsigned int node = cvmx_get_node_num(); | ||||
| 		cvmx_fpa3_gaura_t aura = __cvmx_fpa3_gaura(node, pool); | ||||
| 		return aura; | ||||
| 	} | ||||
| 	return CVMX_FPA3_INVALID_GAURA; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Get a new block from the FPA | ||||
|  * | ||||
|  * @param pool   Pool to get the block from | ||||
|  * @return Pointer to the block or NULL on failure | ||||
|  */ | ||||
| static inline void *cvmx_fpa_alloc(u64 pool) | ||||
| { | ||||
| 	/* FPA3 is handled differently */ | ||||
| 	if ((octeon_has_feature(OCTEON_FEATURE_FPA3))) { | ||||
| 		return cvmx_fpa3_alloc(cvmx_fpa1_pool_to_fpa3_aura(pool)); | ||||
| 	} else | ||||
| 		return cvmx_fpa1_alloc(pool); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Asynchronously get a new block from the FPA | ||||
|  * | ||||
|  * The result of cvmx_fpa_async_alloc() may be retrieved using | ||||
|  * cvmx_fpa_async_alloc_finish(). | ||||
|  * | ||||
|  * @param scr_addr Local scratch address to put response in.  This is a byte | ||||
|  *		   address but must be 8 byte aligned. | ||||
|  * @param pool      Pool to get the block from | ||||
|  */ | ||||
| static inline void cvmx_fpa_async_alloc(u64 scr_addr, u64 pool) | ||||
| { | ||||
| 	if ((octeon_has_feature(OCTEON_FEATURE_FPA3))) { | ||||
| 		return cvmx_fpa3_async_alloc(scr_addr, cvmx_fpa1_pool_to_fpa3_aura(pool)); | ||||
| 	} else | ||||
| 		return cvmx_fpa1_async_alloc(scr_addr, pool); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Retrieve the result of cvmx_fpa_async_alloc | ||||
|  * | ||||
|  * @param scr_addr The Local scratch address.  Must be the same value | ||||
|  * passed to cvmx_fpa_async_alloc(). | ||||
|  * | ||||
|  * @param pool Pool the block came from.  Must be the same value | ||||
|  * passed to cvmx_fpa_async_alloc. | ||||
|  * | ||||
|  * @return Pointer to the block or NULL on failure | ||||
|  */ | ||||
| static inline void *cvmx_fpa_async_alloc_finish(u64 scr_addr, u64 pool) | ||||
| { | ||||
| 	if ((octeon_has_feature(OCTEON_FEATURE_FPA3))) | ||||
| 		return cvmx_fpa3_async_alloc_finish(scr_addr, cvmx_fpa1_pool_to_fpa3_aura(pool)); | ||||
| 	else | ||||
| 		return cvmx_fpa1_async_alloc_finish(scr_addr, pool); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Free a block allocated with a FPA pool. | ||||
|  * Does NOT provide memory ordering in cases where the memory block was | ||||
|  * modified by the core. | ||||
|  * | ||||
|  * @param ptr    Block to free | ||||
|  * @param pool   Pool to put it in | ||||
|  * @param num_cache_lines | ||||
|  *               Cache lines to invalidate | ||||
|  */ | ||||
| static inline void cvmx_fpa_free_nosync(void *ptr, u64 pool, u64 num_cache_lines) | ||||
| { | ||||
| 	/* FPA3 is handled differently */ | ||||
| 	if ((octeon_has_feature(OCTEON_FEATURE_FPA3))) | ||||
| 		cvmx_fpa3_free_nosync(ptr, cvmx_fpa1_pool_to_fpa3_aura(pool), num_cache_lines); | ||||
| 	else | ||||
| 		cvmx_fpa1_free_nosync(ptr, pool, num_cache_lines); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Free a block allocated with a FPA pool.  Provides required memory | ||||
|  * ordering in cases where memory block was modified by core. | ||||
|  * | ||||
|  * @param ptr    Block to free | ||||
|  * @param pool   Pool to put it in | ||||
|  * @param num_cache_lines | ||||
|  *               Cache lines to invalidate | ||||
|  */ | ||||
| static inline void cvmx_fpa_free(void *ptr, u64 pool, u64 num_cache_lines) | ||||
| { | ||||
| 	if ((octeon_has_feature(OCTEON_FEATURE_FPA3))) | ||||
| 		cvmx_fpa3_free(ptr, cvmx_fpa1_pool_to_fpa3_aura(pool), num_cache_lines); | ||||
| 	else | ||||
| 		cvmx_fpa1_free(ptr, pool, num_cache_lines); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Setup a FPA pool to control a new block of memory. | ||||
|  * This can only be called once per pool. Make sure proper | ||||
|  * locking enforces this. | ||||
|  * | ||||
|  * @param pool       Pool to initialize | ||||
|  * @param name       Constant character string to name this pool. | ||||
|  *                   String is not copied. | ||||
|  * @param buffer     Pointer to the block of memory to use. This must be | ||||
|  *                   accessible by all processors and external hardware. | ||||
|  * @param block_size Size for each block controlled by the FPA | ||||
|  * @param num_blocks Number of blocks | ||||
|  * | ||||
|  * @return the pool number on Success, | ||||
|  *         -1 on failure | ||||
|  */ | ||||
| int cvmx_fpa_setup_pool(int pool, const char *name, void *buffer, u64 block_size, u64 num_blocks); | ||||
| 
 | ||||
| int cvmx_fpa_shutdown_pool(int pool); | ||||
| 
 | ||||
| /**
 | ||||
|  * Gets the block size of buffer in specified pool | ||||
|  * @param pool	 Pool to get the block size from | ||||
|  * @return       Size of buffer in specified pool | ||||
|  */ | ||||
| unsigned int cvmx_fpa_get_block_size(int pool); | ||||
| 
 | ||||
| int cvmx_fpa_is_pool_available(int pool_num); | ||||
| u64 cvmx_fpa_get_pool_owner(int pool_num); | ||||
| int cvmx_fpa_get_max_pools(void); | ||||
| int cvmx_fpa_get_current_count(int pool_num); | ||||
| int cvmx_fpa_validate_pool(int pool); | ||||
| 
 | ||||
| #endif /*  __CVM_FPA_H__ */ | ||||
|  | @ -0,0 +1,196 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  * | ||||
|  * Interface to the hardware Free Pool Allocator on Octeon chips. | ||||
|  * These are the legacy models, i.e. prior to CN78XX/CN76XX. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __CVMX_FPA1_HW_H__ | ||||
| #define __CVMX_FPA1_HW_H__ | ||||
| 
 | ||||
| #include "cvmx-scratch.h" | ||||
| #include "cvmx-fpa-defs.h" | ||||
| #include "cvmx-fpa3.h" | ||||
| 
 | ||||
| /* Legacy pool range is 0..7 and 8 on CN68XX */ | ||||
| typedef int cvmx_fpa1_pool_t; | ||||
| 
 | ||||
| #define CVMX_FPA1_NUM_POOLS    8 | ||||
| #define CVMX_FPA1_INVALID_POOL ((cvmx_fpa1_pool_t)-1) | ||||
| #define CVMX_FPA1_NAME_SIZE    16 | ||||
| 
 | ||||
| /**
 | ||||
|  * Structure describing the data format used for stores to the FPA. | ||||
|  */ | ||||
| typedef union { | ||||
| 	u64 u64; | ||||
| 	struct { | ||||
| 		u64 scraddr : 8; | ||||
| 		u64 len : 8; | ||||
| 		u64 did : 8; | ||||
| 		u64 addr : 40; | ||||
| 	} s; | ||||
| } cvmx_fpa1_iobdma_data_t; | ||||
| 
 | ||||
| /*
 | ||||
|  * Allocate or reserve the specified fpa pool. | ||||
|  * | ||||
|  * @param pool	  FPA pool to allocate/reserve. If -1 it | ||||
|  *                finds an empty pool to allocate. | ||||
|  * @return        Alloctaed pool number or CVMX_FPA1_POOL_INVALID | ||||
|  *                if fails to allocate the pool | ||||
|  */ | ||||
| cvmx_fpa1_pool_t cvmx_fpa1_reserve_pool(cvmx_fpa1_pool_t pool); | ||||
| 
 | ||||
| /**
 | ||||
|  * Free the specified fpa pool. | ||||
|  * @param pool	   Pool to free | ||||
|  * @return         0 for success -1 failure | ||||
|  */ | ||||
| int cvmx_fpa1_release_pool(cvmx_fpa1_pool_t pool); | ||||
| 
 | ||||
| static inline void cvmx_fpa1_free(void *ptr, cvmx_fpa1_pool_t pool, u64 num_cache_lines) | ||||
| { | ||||
| 	cvmx_addr_t newptr; | ||||
| 
 | ||||
| 	newptr.u64 = cvmx_ptr_to_phys(ptr); | ||||
| 	newptr.sfilldidspace.didspace = CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)); | ||||
| 	/* Make sure that any previous writes to memory go out before we free
 | ||||
| 	 * this buffer.  This also serves as a barrier to prevent GCC from | ||||
| 	 * reordering operations to after the free. | ||||
| 	 */ | ||||
| 	CVMX_SYNCWS; | ||||
| 	/* value written is number of cache lines not written back */ | ||||
| 	cvmx_write_io(newptr.u64, num_cache_lines); | ||||
| } | ||||
| 
 | ||||
| static inline void cvmx_fpa1_free_nosync(void *ptr, cvmx_fpa1_pool_t pool, | ||||
| 					 unsigned int num_cache_lines) | ||||
| { | ||||
| 	cvmx_addr_t newptr; | ||||
| 
 | ||||
| 	newptr.u64 = cvmx_ptr_to_phys(ptr); | ||||
| 	newptr.sfilldidspace.didspace = CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)); | ||||
| 	/* Prevent GCC from reordering around free */ | ||||
| 	asm volatile("" : : : "memory"); | ||||
| 	/* value written is number of cache lines not written back */ | ||||
| 	cvmx_write_io(newptr.u64, num_cache_lines); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Enable the FPA for use. Must be performed after any CSR | ||||
|  * configuration but before any other FPA functions. | ||||
|  */ | ||||
| static inline void cvmx_fpa1_enable(void) | ||||
| { | ||||
| 	cvmx_fpa_ctl_status_t status; | ||||
| 
 | ||||
| 	status.u64 = csr_rd(CVMX_FPA_CTL_STATUS); | ||||
| 	if (status.s.enb) { | ||||
| 		/*
 | ||||
| 		 * CN68XXP1 should not reset the FPA (doing so may break | ||||
| 		 * the SSO, so we may end up enabling it more than once. | ||||
| 		 * Just return and don't spew messages. | ||||
| 		 */ | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	status.u64 = 0; | ||||
| 	status.s.enb = 1; | ||||
| 	csr_wr(CVMX_FPA_CTL_STATUS, status.u64); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Reset FPA to disable. Make sure buffers from all FPA pools are freed | ||||
|  * before disabling FPA. | ||||
|  */ | ||||
| static inline void cvmx_fpa1_disable(void) | ||||
| { | ||||
| 	cvmx_fpa_ctl_status_t status; | ||||
| 
 | ||||
| 	if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1)) | ||||
| 		return; | ||||
| 
 | ||||
| 	status.u64 = csr_rd(CVMX_FPA_CTL_STATUS); | ||||
| 	status.s.reset = 1; | ||||
| 	csr_wr(CVMX_FPA_CTL_STATUS, status.u64); | ||||
| } | ||||
| 
 | ||||
| static inline void *cvmx_fpa1_alloc(cvmx_fpa1_pool_t pool) | ||||
| { | ||||
| 	u64 address; | ||||
| 
 | ||||
| 	for (;;) { | ||||
| 		address = csr_rd(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool))); | ||||
| 		if (cvmx_likely(address)) { | ||||
| 			return cvmx_phys_to_ptr(address); | ||||
| 		} else { | ||||
| 			if (csr_rd(CVMX_FPA_QUEX_AVAILABLE(pool)) > 0) | ||||
| 				udelay(50); | ||||
| 			else | ||||
| 				return NULL; | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Asynchronously get a new block from the FPA | ||||
|  * @INTERNAL | ||||
|  * | ||||
|  * The result of cvmx_fpa_async_alloc() may be retrieved using | ||||
|  * cvmx_fpa_async_alloc_finish(). | ||||
|  * | ||||
|  * @param scr_addr Local scratch address to put response in.  This is a byte | ||||
|  *		   address but must be 8 byte aligned. | ||||
|  * @param pool      Pool to get the block from | ||||
|  */ | ||||
| static inline void cvmx_fpa1_async_alloc(u64 scr_addr, cvmx_fpa1_pool_t pool) | ||||
| { | ||||
| 	cvmx_fpa1_iobdma_data_t data; | ||||
| 
 | ||||
| 	/* Hardware only uses 64 bit aligned locations, so convert from byte
 | ||||
| 	 * address to 64-bit index | ||||
| 	 */ | ||||
| 	data.u64 = 0ull; | ||||
| 	data.s.scraddr = scr_addr >> 3; | ||||
| 	data.s.len = 1; | ||||
| 	data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool); | ||||
| 	data.s.addr = 0; | ||||
| 
 | ||||
| 	cvmx_scratch_write64(scr_addr, 0ull); | ||||
| 	CVMX_SYNCW; | ||||
| 	cvmx_send_single(data.u64); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Retrieve the result of cvmx_fpa_async_alloc | ||||
|  * @INTERNAL | ||||
|  * | ||||
|  * @param scr_addr The Local scratch address.  Must be the same value | ||||
|  * passed to cvmx_fpa_async_alloc(). | ||||
|  * | ||||
|  * @param pool Pool the block came from.  Must be the same value | ||||
|  * passed to cvmx_fpa_async_alloc. | ||||
|  * | ||||
|  * @return Pointer to the block or NULL on failure | ||||
|  */ | ||||
| static inline void *cvmx_fpa1_async_alloc_finish(u64 scr_addr, cvmx_fpa1_pool_t pool) | ||||
| { | ||||
| 	u64 address; | ||||
| 
 | ||||
| 	CVMX_SYNCIOBDMA; | ||||
| 
 | ||||
| 	address = cvmx_scratch_read64(scr_addr); | ||||
| 	if (cvmx_likely(address)) | ||||
| 		return cvmx_phys_to_ptr(address); | ||||
| 	else | ||||
| 		return cvmx_fpa1_alloc(pool); | ||||
| } | ||||
| 
 | ||||
| static inline u64 cvmx_fpa1_get_available(cvmx_fpa1_pool_t pool) | ||||
| { | ||||
| 	return csr_rd(CVMX_FPA_QUEX_AVAILABLE(pool)); | ||||
| } | ||||
| 
 | ||||
| #endif /* __CVMX_FPA1_HW_H__ */ | ||||
|  | @ -0,0 +1,566 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  * | ||||
|  * Interface to the CN78XX Free Pool Allocator, a.k.a. FPA3 | ||||
|  */ | ||||
| 
 | ||||
| #include "cvmx-address.h" | ||||
| #include "cvmx-fpa-defs.h" | ||||
| #include "cvmx-scratch.h" | ||||
| 
 | ||||
| #ifndef __CVMX_FPA3_H__ | ||||
| #define __CVMX_FPA3_H__ | ||||
| 
 | ||||
| typedef struct { | ||||
| 	unsigned res0 : 6; | ||||
| 	unsigned node : 2; | ||||
| 	unsigned res1 : 2; | ||||
| 	unsigned lpool : 6; | ||||
| 	unsigned valid_magic : 16; | ||||
| } cvmx_fpa3_pool_t; | ||||
| 
 | ||||
| typedef struct { | ||||
| 	unsigned res0 : 6; | ||||
| 	unsigned node : 2; | ||||
| 	unsigned res1 : 6; | ||||
| 	unsigned laura : 10; | ||||
| 	unsigned valid_magic : 16; | ||||
| } cvmx_fpa3_gaura_t; | ||||
| 
 | ||||
| #define CVMX_FPA3_VALID_MAGIC	0xf9a3 | ||||
| #define CVMX_FPA3_INVALID_GAURA ((cvmx_fpa3_gaura_t){ 0, 0, 0, 0, 0 }) | ||||
| #define CVMX_FPA3_INVALID_POOL	((cvmx_fpa3_pool_t){ 0, 0, 0, 0, 0 }) | ||||
| 
 | ||||
| static inline bool __cvmx_fpa3_aura_valid(cvmx_fpa3_gaura_t aura) | ||||
| { | ||||
| 	if (aura.valid_magic != CVMX_FPA3_VALID_MAGIC) | ||||
| 		return false; | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| static inline bool __cvmx_fpa3_pool_valid(cvmx_fpa3_pool_t pool) | ||||
| { | ||||
| 	if (pool.valid_magic != CVMX_FPA3_VALID_MAGIC) | ||||
| 		return false; | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| static inline cvmx_fpa3_gaura_t __cvmx_fpa3_gaura(int node, int laura) | ||||
| { | ||||
| 	cvmx_fpa3_gaura_t aura; | ||||
| 
 | ||||
| 	if (node < 0) | ||||
| 		node = cvmx_get_node_num(); | ||||
| 	if (laura < 0) | ||||
| 		return CVMX_FPA3_INVALID_GAURA; | ||||
| 
 | ||||
| 	aura.node = node; | ||||
| 	aura.laura = laura; | ||||
| 	aura.valid_magic = CVMX_FPA3_VALID_MAGIC; | ||||
| 	return aura; | ||||
| } | ||||
| 
 | ||||
| static inline cvmx_fpa3_pool_t __cvmx_fpa3_pool(int node, int lpool) | ||||
| { | ||||
| 	cvmx_fpa3_pool_t pool; | ||||
| 
 | ||||
| 	if (node < 0) | ||||
| 		node = cvmx_get_node_num(); | ||||
| 	if (lpool < 0) | ||||
| 		return CVMX_FPA3_INVALID_POOL; | ||||
| 
 | ||||
| 	pool.node = node; | ||||
| 	pool.lpool = lpool; | ||||
| 	pool.valid_magic = CVMX_FPA3_VALID_MAGIC; | ||||
| 	return pool; | ||||
| } | ||||
| 
 | ||||
| #undef CVMX_FPA3_VALID_MAGIC | ||||
| 
 | ||||
| /**
 | ||||
|  * Structure describing the data format used for stores to the FPA. | ||||
|  */ | ||||
| typedef union { | ||||
| 	u64 u64; | ||||
| 	struct { | ||||
| 		u64 scraddr : 8; | ||||
| 		u64 len : 8; | ||||
| 		u64 did : 8; | ||||
| 		u64 addr : 40; | ||||
| 	} s; | ||||
| 	struct { | ||||
| 		u64 scraddr : 8; | ||||
| 		u64 len : 8; | ||||
| 		u64 did : 8; | ||||
| 		u64 node : 4; | ||||
| 		u64 red : 1; | ||||
| 		u64 reserved2 : 9; | ||||
| 		u64 aura : 10; | ||||
| 		u64 reserved3 : 16; | ||||
| 	} cn78xx; | ||||
| } cvmx_fpa3_iobdma_data_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * Struct describing load allocate operation addresses for FPA pool. | ||||
|  */ | ||||
| union cvmx_fpa3_load_data { | ||||
| 	u64 u64; | ||||
| 	struct { | ||||
| 		u64 seg : 2; | ||||
| 		u64 reserved1 : 13; | ||||
| 		u64 io : 1; | ||||
| 		u64 did : 8; | ||||
| 		u64 node : 4; | ||||
| 		u64 red : 1; | ||||
| 		u64 reserved2 : 9; | ||||
| 		u64 aura : 10; | ||||
| 		u64 reserved3 : 16; | ||||
| 	}; | ||||
| }; | ||||
| 
 | ||||
| typedef union cvmx_fpa3_load_data cvmx_fpa3_load_data_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * Struct describing store free operation addresses from FPA pool. | ||||
|  */ | ||||
| union cvmx_fpa3_store_addr { | ||||
| 	u64 u64; | ||||
| 	struct { | ||||
| 		u64 seg : 2; | ||||
| 		u64 reserved1 : 13; | ||||
| 		u64 io : 1; | ||||
| 		u64 did : 8; | ||||
| 		u64 node : 4; | ||||
| 		u64 reserved2 : 10; | ||||
| 		u64 aura : 10; | ||||
| 		u64 fabs : 1; | ||||
| 		u64 reserved3 : 3; | ||||
| 		u64 dwb_count : 9; | ||||
| 		u64 reserved4 : 3; | ||||
| 	}; | ||||
| }; | ||||
| 
 | ||||
| typedef union cvmx_fpa3_store_addr cvmx_fpa3_store_addr_t; | ||||
| 
 | ||||
| enum cvmx_fpa3_pool_alignment_e { | ||||
| 	FPA_NATURAL_ALIGNMENT, | ||||
| 	FPA_OFFSET_ALIGNMENT, | ||||
| 	FPA_OPAQUE_ALIGNMENT | ||||
| }; | ||||
| 
 | ||||
| #define CVMX_FPA3_AURAX_LIMIT_MAX ((1ull << 40) - 1) | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * Accessor functions to return number of POOLS in an FPA3 | ||||
|  * depending on SoC model. | ||||
|  * The number is per-node for models supporting multi-node configurations. | ||||
|  */ | ||||
| static inline int cvmx_fpa3_num_pools(void) | ||||
| { | ||||
| 	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) | ||||
| 		return 64; | ||||
| 	if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) | ||||
| 		return 32; | ||||
| 	if (OCTEON_IS_MODEL(OCTEON_CN73XX)) | ||||
| 		return 32; | ||||
| 	printf("ERROR: %s: Unknowm model\n", __func__); | ||||
| 	return -1; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * Accessor functions to return number of AURAS in an FPA3 | ||||
|  * depending on SoC model. | ||||
|  * The number is per-node for models supporting multi-node configurations. | ||||
|  */ | ||||
| static inline int cvmx_fpa3_num_auras(void) | ||||
| { | ||||
| 	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) | ||||
| 		return 1024; | ||||
| 	if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) | ||||
| 		return 512; | ||||
| 	if (OCTEON_IS_MODEL(OCTEON_CN73XX)) | ||||
| 		return 512; | ||||
| 	printf("ERROR: %s: Unknowm model\n", __func__); | ||||
| 	return -1; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Get the FPA3 POOL underneath FPA3 AURA, containing all its buffers | ||||
|  * | ||||
|  */ | ||||
| static inline cvmx_fpa3_pool_t cvmx_fpa3_aura_to_pool(cvmx_fpa3_gaura_t aura) | ||||
| { | ||||
| 	cvmx_fpa3_pool_t pool; | ||||
| 	cvmx_fpa_aurax_pool_t aurax_pool; | ||||
| 
 | ||||
| 	aurax_pool.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_POOL(aura.laura)); | ||||
| 
 | ||||
| 	pool = __cvmx_fpa3_pool(aura.node, aurax_pool.s.pool); | ||||
| 	return pool; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Get a new block from the FPA pool | ||||
|  * | ||||
|  * @param aura  - aura number | ||||
|  * @return pointer to the block or NULL on failure | ||||
|  */ | ||||
| static inline void *cvmx_fpa3_alloc(cvmx_fpa3_gaura_t aura) | ||||
| { | ||||
| 	u64 address; | ||||
| 	cvmx_fpa3_load_data_t load_addr; | ||||
| 
 | ||||
| 	load_addr.u64 = 0; | ||||
| 	load_addr.seg = CVMX_MIPS_SPACE_XKPHYS; | ||||
| 	load_addr.io = 1; | ||||
| 	load_addr.did = 0x29; /* Device ID. Indicates FPA. */ | ||||
| 	load_addr.node = aura.node; | ||||
| 	load_addr.red = 0; /* Perform RED on allocation.
 | ||||
| 				  * FIXME to use config option | ||||
| 				  */ | ||||
| 	load_addr.aura = aura.laura; | ||||
| 
 | ||||
| 	address = cvmx_read64_uint64(load_addr.u64); | ||||
| 	if (!address) | ||||
| 		return NULL; | ||||
| 	return cvmx_phys_to_ptr(address); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Asynchronously get a new block from the FPA | ||||
|  * | ||||
|  * The result of cvmx_fpa_async_alloc() may be retrieved using | ||||
|  * cvmx_fpa_async_alloc_finish(). | ||||
|  * | ||||
|  * @param scr_addr Local scratch address to put response in.  This is a byte | ||||
|  *		   address but must be 8 byte aligned. | ||||
|  * @param aura     Global aura to get the block from | ||||
|  */ | ||||
| static inline void cvmx_fpa3_async_alloc(u64 scr_addr, cvmx_fpa3_gaura_t aura) | ||||
| { | ||||
| 	cvmx_fpa3_iobdma_data_t data; | ||||
| 
 | ||||
| 	/* Hardware only uses 64 bit aligned locations, so convert from byte
 | ||||
| 	 * address to 64-bit index | ||||
| 	 */ | ||||
| 	data.u64 = 0ull; | ||||
| 	data.cn78xx.scraddr = scr_addr >> 3; | ||||
| 	data.cn78xx.len = 1; | ||||
| 	data.cn78xx.did = 0x29; | ||||
| 	data.cn78xx.node = aura.node; | ||||
| 	data.cn78xx.aura = aura.laura; | ||||
| 	cvmx_scratch_write64(scr_addr, 0ull); | ||||
| 
 | ||||
| 	CVMX_SYNCW; | ||||
| 	cvmx_send_single(data.u64); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Retrieve the result of cvmx_fpa3_async_alloc | ||||
|  * | ||||
|  * @param scr_addr The Local scratch address.  Must be the same value | ||||
|  * passed to cvmx_fpa_async_alloc(). | ||||
|  * | ||||
|  * @param aura Global aura the block came from.  Must be the same value | ||||
|  * passed to cvmx_fpa_async_alloc. | ||||
|  * | ||||
|  * @return Pointer to the block or NULL on failure | ||||
|  */ | ||||
| static inline void *cvmx_fpa3_async_alloc_finish(u64 scr_addr, cvmx_fpa3_gaura_t aura) | ||||
| { | ||||
| 	u64 address; | ||||
| 
 | ||||
| 	CVMX_SYNCIOBDMA; | ||||
| 
 | ||||
| 	address = cvmx_scratch_read64(scr_addr); | ||||
| 	if (cvmx_likely(address)) | ||||
| 		return cvmx_phys_to_ptr(address); | ||||
| 	else | ||||
| 		/* Try regular alloc if async failed */ | ||||
| 		return cvmx_fpa3_alloc(aura); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Free a pointer back to the pool. | ||||
|  * | ||||
|  * @param aura   global aura number | ||||
|  * @param ptr    physical address of block to free. | ||||
|  * @param num_cache_lines Cache lines to invalidate | ||||
|  */ | ||||
| static inline void cvmx_fpa3_free(void *ptr, cvmx_fpa3_gaura_t aura, unsigned int num_cache_lines) | ||||
| { | ||||
| 	cvmx_fpa3_store_addr_t newptr; | ||||
| 	cvmx_addr_t newdata; | ||||
| 
 | ||||
| 	newdata.u64 = cvmx_ptr_to_phys(ptr); | ||||
| 
 | ||||
| 	/* Make sure that any previous writes to memory go out before we free
 | ||||
| 	   this buffer. This also serves as a barrier to prevent GCC from | ||||
| 	   reordering operations to after the free. */ | ||||
| 	CVMX_SYNCWS; | ||||
| 
 | ||||
| 	newptr.u64 = 0; | ||||
| 	newptr.seg = CVMX_MIPS_SPACE_XKPHYS; | ||||
| 	newptr.io = 1; | ||||
| 	newptr.did = 0x29; /* Device id, indicates FPA */ | ||||
| 	newptr.node = aura.node; | ||||
| 	newptr.aura = aura.laura; | ||||
| 	newptr.fabs = 0; /* Free absolute. FIXME to use config option */ | ||||
| 	newptr.dwb_count = num_cache_lines; | ||||
| 
 | ||||
| 	cvmx_write_io(newptr.u64, newdata.u64); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Free a pointer back to the pool without flushing the write buffer. | ||||
|  * | ||||
|  * @param aura   global aura number | ||||
|  * @param ptr    physical address of block to free. | ||||
|  * @param num_cache_lines Cache lines to invalidate | ||||
|  */ | ||||
| static inline void cvmx_fpa3_free_nosync(void *ptr, cvmx_fpa3_gaura_t aura, | ||||
| 					 unsigned int num_cache_lines) | ||||
| { | ||||
| 	cvmx_fpa3_store_addr_t newptr; | ||||
| 	cvmx_addr_t newdata; | ||||
| 
 | ||||
| 	newdata.u64 = cvmx_ptr_to_phys(ptr); | ||||
| 
 | ||||
| 	/* Prevent GCC from reordering writes to (*ptr) */ | ||||
| 	asm volatile("" : : : "memory"); | ||||
| 
 | ||||
| 	newptr.u64 = 0; | ||||
| 	newptr.seg = CVMX_MIPS_SPACE_XKPHYS; | ||||
| 	newptr.io = 1; | ||||
| 	newptr.did = 0x29; /* Device id, indicates FPA */ | ||||
| 	newptr.node = aura.node; | ||||
| 	newptr.aura = aura.laura; | ||||
| 	newptr.fabs = 0; /* Free absolute. FIXME to use config option */ | ||||
| 	newptr.dwb_count = num_cache_lines; | ||||
| 
 | ||||
| 	cvmx_write_io(newptr.u64, newdata.u64); | ||||
| } | ||||
| 
 | ||||
| static inline int cvmx_fpa3_pool_is_enabled(cvmx_fpa3_pool_t pool) | ||||
| { | ||||
| 	cvmx_fpa_poolx_cfg_t pool_cfg; | ||||
| 
 | ||||
| 	if (!__cvmx_fpa3_pool_valid(pool)) | ||||
| 		return -1; | ||||
| 
 | ||||
| 	pool_cfg.u64 = cvmx_read_csr_node(pool.node, CVMX_FPA_POOLX_CFG(pool.lpool)); | ||||
| 	return pool_cfg.cn78xx.ena; | ||||
| } | ||||
| 
 | ||||
| static inline int cvmx_fpa3_config_red_params(unsigned int node, int qos_avg_en, int red_lvl_dly, | ||||
| 					      int avg_dly) | ||||
| { | ||||
| 	cvmx_fpa_gen_cfg_t fpa_cfg; | ||||
| 	cvmx_fpa_red_delay_t red_delay; | ||||
| 
 | ||||
| 	fpa_cfg.u64 = cvmx_read_csr_node(node, CVMX_FPA_GEN_CFG); | ||||
| 	fpa_cfg.s.avg_en = qos_avg_en; | ||||
| 	fpa_cfg.s.lvl_dly = red_lvl_dly; | ||||
| 	cvmx_write_csr_node(node, CVMX_FPA_GEN_CFG, fpa_cfg.u64); | ||||
| 
 | ||||
| 	red_delay.u64 = cvmx_read_csr_node(node, CVMX_FPA_RED_DELAY); | ||||
| 	red_delay.s.avg_dly = avg_dly; | ||||
| 	cvmx_write_csr_node(node, CVMX_FPA_RED_DELAY, red_delay.u64); | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Gets the buffer size of the specified pool, | ||||
|  * | ||||
|  * @param aura Global aura number | ||||
|  * @return Returns size of the buffers in the specified pool. | ||||
|  */ | ||||
| static inline int cvmx_fpa3_get_aura_buf_size(cvmx_fpa3_gaura_t aura) | ||||
| { | ||||
| 	cvmx_fpa3_pool_t pool; | ||||
| 	cvmx_fpa_poolx_cfg_t pool_cfg; | ||||
| 	int block_size; | ||||
| 
 | ||||
| 	pool = cvmx_fpa3_aura_to_pool(aura); | ||||
| 
 | ||||
| 	pool_cfg.u64 = cvmx_read_csr_node(pool.node, CVMX_FPA_POOLX_CFG(pool.lpool)); | ||||
| 	block_size = pool_cfg.cn78xx.buf_size << 7; | ||||
| 	return block_size; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Return the number of available buffers in an AURA | ||||
|  * | ||||
|  * @param aura to receive count for | ||||
|  * @return available buffer count | ||||
|  */ | ||||
| static inline long long cvmx_fpa3_get_available(cvmx_fpa3_gaura_t aura) | ||||
| { | ||||
| 	cvmx_fpa3_pool_t pool; | ||||
| 	cvmx_fpa_poolx_available_t avail_reg; | ||||
| 	cvmx_fpa_aurax_cnt_t cnt_reg; | ||||
| 	cvmx_fpa_aurax_cnt_limit_t limit_reg; | ||||
| 	long long ret; | ||||
| 
 | ||||
| 	pool = cvmx_fpa3_aura_to_pool(aura); | ||||
| 
 | ||||
| 	/* Get POOL available buffer count */ | ||||
| 	avail_reg.u64 = cvmx_read_csr_node(pool.node, CVMX_FPA_POOLX_AVAILABLE(pool.lpool)); | ||||
| 
 | ||||
| 	/* Get AURA current available count */ | ||||
| 	cnt_reg.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_CNT(aura.laura)); | ||||
| 	limit_reg.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_CNT_LIMIT(aura.laura)); | ||||
| 
 | ||||
| 	if (limit_reg.cn78xx.limit < cnt_reg.cn78xx.cnt) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	/* Calculate AURA-based buffer allowance */ | ||||
| 	ret = limit_reg.cn78xx.limit - cnt_reg.cn78xx.cnt; | ||||
| 
 | ||||
| 	/* Use POOL real buffer availability when less then allowance */ | ||||
| 	if (ret > (long long)avail_reg.cn78xx.count) | ||||
| 		ret = avail_reg.cn78xx.count; | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Configure the QoS parameters of an FPA3 AURA | ||||
|  * | ||||
|  * @param aura is the FPA3 AURA handle | ||||
|  * @param ena_bp enables backpressure when outstanding count exceeds 'bp_thresh' | ||||
|  * @param ena_red enables random early discard when outstanding count exceeds 'pass_thresh' | ||||
|  * @param pass_thresh is the maximum count to invoke flow control | ||||
|  * @param drop_thresh is the count threshold to begin dropping packets | ||||
|  * @param bp_thresh is the back-pressure threshold | ||||
|  * | ||||
|  */ | ||||
| static inline void cvmx_fpa3_setup_aura_qos(cvmx_fpa3_gaura_t aura, bool ena_red, u64 pass_thresh, | ||||
| 					    u64 drop_thresh, bool ena_bp, u64 bp_thresh) | ||||
| { | ||||
| 	unsigned int shift = 0; | ||||
| 	u64 shift_thresh; | ||||
| 	cvmx_fpa_aurax_cnt_limit_t limit_reg; | ||||
| 	cvmx_fpa_aurax_cnt_levels_t aura_level; | ||||
| 
 | ||||
| 	if (!__cvmx_fpa3_aura_valid(aura)) | ||||
| 		return; | ||||
| 
 | ||||
| 	/* Get AURAX count limit for validation */ | ||||
| 	limit_reg.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_CNT_LIMIT(aura.laura)); | ||||
| 
 | ||||
| 	if (pass_thresh < 256) | ||||
| 		pass_thresh = 255; | ||||
| 
 | ||||
| 	if (drop_thresh <= pass_thresh || drop_thresh > limit_reg.cn78xx.limit) | ||||
| 		drop_thresh = limit_reg.cn78xx.limit; | ||||
| 
 | ||||
| 	if (bp_thresh < 256 || bp_thresh > limit_reg.cn78xx.limit) | ||||
| 		bp_thresh = limit_reg.cn78xx.limit >> 1; | ||||
| 
 | ||||
| 	shift_thresh = (bp_thresh > drop_thresh) ? bp_thresh : drop_thresh; | ||||
| 
 | ||||
| 	/* Calculate shift so that the largest threshold fits in 8 bits */ | ||||
| 	for (shift = 0; shift < (1 << 6); shift++) { | ||||
| 		if (0 == ((shift_thresh >> shift) & ~0xffull)) | ||||
| 			break; | ||||
| 	}; | ||||
| 
 | ||||
| 	aura_level.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_CNT_LEVELS(aura.laura)); | ||||
| 	aura_level.s.pass = pass_thresh >> shift; | ||||
| 	aura_level.s.drop = drop_thresh >> shift; | ||||
| 	aura_level.s.bp = bp_thresh >> shift; | ||||
| 	aura_level.s.shift = shift; | ||||
| 	aura_level.s.red_ena = ena_red; | ||||
| 	aura_level.s.bp_ena = ena_bp; | ||||
| 	cvmx_write_csr_node(aura.node, CVMX_FPA_AURAX_CNT_LEVELS(aura.laura), aura_level.u64); | ||||
| } | ||||
| 
 | ||||
| cvmx_fpa3_gaura_t cvmx_fpa3_reserve_aura(int node, int desired_aura_num); | ||||
| int cvmx_fpa3_release_aura(cvmx_fpa3_gaura_t aura); | ||||
| cvmx_fpa3_pool_t cvmx_fpa3_reserve_pool(int node, int desired_pool_num); | ||||
| int cvmx_fpa3_release_pool(cvmx_fpa3_pool_t pool); | ||||
| int cvmx_fpa3_is_aura_available(int node, int aura_num); | ||||
| int cvmx_fpa3_is_pool_available(int node, int pool_num); | ||||
| 
 | ||||
| cvmx_fpa3_pool_t cvmx_fpa3_setup_fill_pool(int node, int desired_pool, const char *name, | ||||
| 					   unsigned int block_size, unsigned int num_blocks, | ||||
| 					   void *buffer); | ||||
| 
 | ||||
| /**
 | ||||
|  * Function to attach an aura to an existing pool | ||||
|  * | ||||
|  * @param node - configure fpa on this node | ||||
|  * @param pool - configured pool to attach aura to | ||||
|  * @param desired_aura - pointer to aura to use, set to -1 to allocate | ||||
|  * @param name - name to register | ||||
|  * @param block_size - size of buffers to use | ||||
|  * @param num_blocks - number of blocks to allocate | ||||
|  * | ||||
|  * @return configured gaura on success, CVMX_FPA3_INVALID_GAURA on failure | ||||
|  */ | ||||
| cvmx_fpa3_gaura_t cvmx_fpa3_set_aura_for_pool(cvmx_fpa3_pool_t pool, int desired_aura, | ||||
| 					      const char *name, unsigned int block_size, | ||||
| 					      unsigned int num_blocks); | ||||
| 
 | ||||
| /**
 | ||||
|  * Function to setup and initialize a pool. | ||||
|  * | ||||
|  * @param node - configure fpa on this node | ||||
|  * @param desired_aura - aura to use, -1 for dynamic allocation | ||||
|  * @param name - name to register | ||||
|  * @param block_size - size of buffers in pool | ||||
|  * @param num_blocks - max number of buffers allowed | ||||
|  */ | ||||
| cvmx_fpa3_gaura_t cvmx_fpa3_setup_aura_and_pool(int node, int desired_aura, const char *name, | ||||
| 						void *buffer, unsigned int block_size, | ||||
| 						unsigned int num_blocks); | ||||
| 
 | ||||
| int cvmx_fpa3_shutdown_aura_and_pool(cvmx_fpa3_gaura_t aura); | ||||
| int cvmx_fpa3_shutdown_aura(cvmx_fpa3_gaura_t aura); | ||||
| int cvmx_fpa3_shutdown_pool(cvmx_fpa3_pool_t pool); | ||||
| const char *cvmx_fpa3_get_pool_name(cvmx_fpa3_pool_t pool); | ||||
| int cvmx_fpa3_get_pool_buf_size(cvmx_fpa3_pool_t pool); | ||||
| const char *cvmx_fpa3_get_aura_name(cvmx_fpa3_gaura_t aura); | ||||
| 
 | ||||
| /* FIXME: Need a different macro for stage2 of u-boot */ | ||||
| 
 | ||||
| static inline void cvmx_fpa3_stage2_init(int aura, int pool, u64 stack_paddr, int stacklen, | ||||
| 					 int buffer_sz, int buf_cnt) | ||||
| { | ||||
| 	cvmx_fpa_poolx_cfg_t pool_cfg; | ||||
| 
 | ||||
| 	/* Configure pool stack */ | ||||
| 	cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_BASE(pool), stack_paddr); | ||||
| 	cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_ADDR(pool), stack_paddr); | ||||
| 	cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_END(pool), stack_paddr + stacklen); | ||||
| 
 | ||||
| 	/* Configure pool with buffer size */ | ||||
| 	pool_cfg.u64 = 0; | ||||
| 	pool_cfg.cn78xx.nat_align = 1; | ||||
| 	pool_cfg.cn78xx.buf_size = buffer_sz >> 7; | ||||
| 	pool_cfg.cn78xx.l_type = 0x2; | ||||
| 	pool_cfg.cn78xx.ena = 0; | ||||
| 	cvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), pool_cfg.u64); | ||||
| 	/* Reset pool before starting */ | ||||
| 	pool_cfg.cn78xx.ena = 1; | ||||
| 	cvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), pool_cfg.u64); | ||||
| 
 | ||||
| 	cvmx_write_csr_node(0, CVMX_FPA_AURAX_CFG(aura), 0); | ||||
| 	cvmx_write_csr_node(0, CVMX_FPA_AURAX_CNT_ADD(aura), buf_cnt); | ||||
| 	cvmx_write_csr_node(0, CVMX_FPA_AURAX_POOL(aura), (u64)pool); | ||||
| } | ||||
| 
 | ||||
| static inline void cvmx_fpa3_stage2_disable(int aura, int pool) | ||||
| { | ||||
| 	cvmx_write_csr_node(0, CVMX_FPA_AURAX_POOL(aura), 0); | ||||
| 	cvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), 0); | ||||
| 	cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_BASE(pool), 0); | ||||
| 	cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_ADDR(pool), 0); | ||||
| 	cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_END(pool), 0); | ||||
| } | ||||
| 
 | ||||
| #endif /* __CVMX_FPA3_H__ */ | ||||
|  | @ -0,0 +1,213 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef _CVMX_GLOBAL_RESOURCES_T_ | ||||
| #define _CVMX_GLOBAL_RESOURCES_T_ | ||||
| 
 | ||||
| #define CVMX_GLOBAL_RESOURCES_DATA_NAME "cvmx-global-resources" | ||||
| 
 | ||||
| /*In macros below abbreviation GR stands for global resources. */ | ||||
| #define CVMX_GR_TAG_INVALID                                                                        \ | ||||
| 	cvmx_get_gr_tag('i', 'n', 'v', 'a', 'l', 'i', 'd', '.', '.', '.', '.', '.', '.', '.', '.', \ | ||||
| 			'.') | ||||
| /*Tag for pko que table range. */ | ||||
| #define CVMX_GR_TAG_PKO_QUEUES                                                                     \ | ||||
| 	cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', '_', 'q', 'u', 'e', 'u', 's', '.', '.', \ | ||||
| 			'.') | ||||
| /*Tag for a pko internal ports range */ | ||||
| #define CVMX_GR_TAG_PKO_IPORTS                                                                     \ | ||||
| 	cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', '_', 'i', 'p', 'o', 'r', 't', '.', '.', \ | ||||
| 			'.') | ||||
| #define CVMX_GR_TAG_FPA                                                                            \ | ||||
| 	cvmx_get_gr_tag('c', 'v', 'm', '_', 'f', 'p', 'a', '.', '.', '.', '.', '.', '.', '.', '.', \ | ||||
| 			'.') | ||||
| #define CVMX_GR_TAG_FAU                                                                            \ | ||||
| 	cvmx_get_gr_tag('c', 'v', 'm', '_', 'f', 'a', 'u', '.', '.', '.', '.', '.', '.', '.', '.', \ | ||||
| 			'.') | ||||
| #define CVMX_GR_TAG_SSO_GRP(n)                                                                     \ | ||||
| 	cvmx_get_gr_tag('c', 'v', 'm', '_', 's', 's', 'o', '_', '0', (n) + '0', '.', '.', '.',     \ | ||||
| 			'.', '.', '.'); | ||||
| #define CVMX_GR_TAG_TIM(n)                                                                         \ | ||||
| 	cvmx_get_gr_tag('c', 'v', 'm', '_', 't', 'i', 'm', '_', (n) + '0', '.', '.', '.', '.',     \ | ||||
| 			'.', '.', '.') | ||||
| #define CVMX_GR_TAG_CLUSTERS(x)                                                                    \ | ||||
| 	cvmx_get_gr_tag('c', 'v', 'm', '_', 'c', 'l', 'u', 's', 't', 'e', 'r', '_', (x + '0'),     \ | ||||
| 			'.', '.', '.') | ||||
| #define CVMX_GR_TAG_CLUSTER_GRP(x)                                                                 \ | ||||
| 	cvmx_get_gr_tag('c', 'v', 'm', '_', 'c', 'l', 'g', 'r', 'p', '_', (x + '0'), '.', '.',     \ | ||||
| 			'.', '.', '.') | ||||
| #define CVMX_GR_TAG_STYLE(x)                                                                       \ | ||||
| 	cvmx_get_gr_tag('c', 'v', 'm', '_', 's', 't', 'y', 'l', 'e', '_', (x + '0'), '.', '.',     \ | ||||
| 			'.', '.', '.') | ||||
| #define CVMX_GR_TAG_QPG_ENTRY(x)                                                                   \ | ||||
| 	cvmx_get_gr_tag('c', 'v', 'm', '_', 'q', 'p', 'g', 'e', 't', '_', (x + '0'), '.', '.',     \ | ||||
| 			'.', '.', '.') | ||||
| #define CVMX_GR_TAG_BPID(x)                                                                        \ | ||||
| 	cvmx_get_gr_tag('c', 'v', 'm', '_', 'b', 'p', 'i', 'd', 's', '_', (x + '0'), '.', '.',     \ | ||||
| 			'.', '.', '.') | ||||
| #define CVMX_GR_TAG_MTAG_IDX(x)                                                                    \ | ||||
| 	cvmx_get_gr_tag('c', 'v', 'm', '_', 'm', 't', 'a', 'g', 'x', '_', (x + '0'), '.', '.',     \ | ||||
| 			'.', '.', '.') | ||||
| #define CVMX_GR_TAG_PCAM(x, y, z)                                                                  \ | ||||
| 	cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'c', 'a', 'm', '_', (x + '0'), (y + '0'),         \ | ||||
| 			(z + '0'), '.', '.', '.', '.') | ||||
| 
 | ||||
| #define CVMX_GR_TAG_CIU3_IDT(_n)                                                                   \ | ||||
| 	cvmx_get_gr_tag('c', 'v', 'm', '_', 'c', 'i', 'u', '3', '_', ((_n) + '0'), '_', 'i', 'd',  \ | ||||
| 			't', '.', '.') | ||||
| 
 | ||||
| /* Allocation of the 512 SW INTSTs (in the  12 bit SW INTSN space) */ | ||||
| #define CVMX_GR_TAG_CIU3_SWINTSN(_n)                                                               \ | ||||
| 	cvmx_get_gr_tag('c', 'v', 'm', '_', 'c', 'i', 'u', '3', '_', ((_n) + '0'), '_', 's', 'w',  \ | ||||
| 			'i', 's', 'n') | ||||
| 
 | ||||
| #define TAG_INIT_PART(A, B, C, D, E, F, G, H)                                                      \ | ||||
| 	((((u64)(A) & 0xff) << 56) | (((u64)(B) & 0xff) << 48) | (((u64)(C) & 0xff) << 40) |             \ | ||||
| 	 (((u64)(D) & 0xff) << 32) | (((u64)(E) & 0xff) << 24) | (((u64)(F) & 0xff) << 16) |             \ | ||||
| 	 (((u64)(G) & 0xff) << 8) | (((u64)(H) & 0xff))) | ||||
| 
 | ||||
| struct global_resource_tag { | ||||
| 	u64 lo; | ||||
| 	u64 hi; | ||||
| }; | ||||
| 
 | ||||
| enum cvmx_resource_err { CVMX_RESOURCE_ALLOC_FAILED = -1, CVMX_RESOURCE_ALREADY_RESERVED = -2 }; | ||||
| 
 | ||||
| /*
 | ||||
|  * @INTERNAL | ||||
|  * Creates a tag from the specified characters. | ||||
|  */ | ||||
| static inline struct global_resource_tag cvmx_get_gr_tag(char a, char b, char c, char d, char e, | ||||
| 							 char f, char g, char h, char i, char j, | ||||
| 							 char k, char l, char m, char n, char o, | ||||
| 							 char p) | ||||
| { | ||||
| 	struct global_resource_tag tag; | ||||
| 
 | ||||
| 	tag.lo = TAG_INIT_PART(a, b, c, d, e, f, g, h); | ||||
| 	tag.hi = TAG_INIT_PART(i, j, k, l, m, n, o, p); | ||||
| 	return tag; | ||||
| } | ||||
| 
 | ||||
| static inline int cvmx_gr_same_tag(struct global_resource_tag gr1, struct global_resource_tag gr2) | ||||
| { | ||||
| 	return (gr1.hi == gr2.hi) && (gr1.lo == gr2.lo); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * @INTERNAL | ||||
|  * Creates a global resource range that can hold the specified number of | ||||
|  * elements | ||||
|  * @param tag is the tag of the range. The taga is created using the method | ||||
|  * cvmx_get_gr_tag() | ||||
|  * @param nelements is the number of elements to be held in the resource range. | ||||
|  */ | ||||
| int cvmx_create_global_resource_range(struct global_resource_tag tag, int nelements); | ||||
| 
 | ||||
| /*
 | ||||
|  * @INTERNAL | ||||
|  * Allocate nelements in the global resource range with the specified tag. It | ||||
|  * is assumed that prior | ||||
|  * to calling this the global resource range has already been created using | ||||
|  * cvmx_create_global_resource_range(). | ||||
|  * @param tag is the tag of the global resource range. | ||||
|  * @param nelements is the number of elements to be allocated. | ||||
|  * @param owner is a 64 bit number that identifes the owner of this range. | ||||
|  * @aligment specifes the required alignment of the returned base number. | ||||
|  * @return returns the base of the allocated range. -1 return value indicates | ||||
|  * failure. | ||||
|  */ | ||||
| int cvmx_allocate_global_resource_range(struct global_resource_tag tag, u64 owner, int nelements, | ||||
| 					int alignment); | ||||
| 
 | ||||
| /*
 | ||||
|  * @INTERNAL | ||||
|  * Allocate nelements in the global resource range with the specified tag. | ||||
|  * The elements allocated need not be contiguous. It is assumed that prior to | ||||
|  * calling this the global resource range has already | ||||
|  * been created using cvmx_create_global_resource_range(). | ||||
|  * @param tag is the tag of the global resource range. | ||||
|  * @param nelements is the number of elements to be allocated. | ||||
|  * @param owner is a 64 bit number that identifes the owner of the allocated | ||||
|  * elements. | ||||
|  * @param allocated_elements returns indexs of the allocated entries. | ||||
|  * @return returns 0 on success and -1 on failure. | ||||
|  */ | ||||
| int cvmx_resource_alloc_many(struct global_resource_tag tag, u64 owner, int nelements, | ||||
| 			     int allocated_elements[]); | ||||
| int cvmx_resource_alloc_reverse(struct global_resource_tag, u64 owner); | ||||
| /*
 | ||||
|  * @INTERNAL | ||||
|  * Reserve nelements starting from base in the global resource range with the | ||||
|  * specified tag. | ||||
|  * It is assumed that prior to calling this the global resource range has | ||||
|  * already been created using cvmx_create_global_resource_range(). | ||||
|  * @param tag is the tag of the global resource range. | ||||
|  * @param nelements is the number of elements to be allocated. | ||||
|  * @param owner is a 64 bit number that identifes the owner of this range. | ||||
|  * @base specifies the base start of nelements. | ||||
|  * @return returns the base of the allocated range. -1 return value indicates | ||||
|  * failure. | ||||
|  */ | ||||
| int cvmx_reserve_global_resource_range(struct global_resource_tag tag, u64 owner, int base, | ||||
| 				       int nelements); | ||||
| /*
 | ||||
|  * @INTERNAL | ||||
|  * Free nelements starting at base in the global resource range with the | ||||
|  * specified tag. | ||||
|  * @param tag is the tag of the global resource range. | ||||
|  * @param base is the base number | ||||
|  * @param nelements is the number of elements that are to be freed. | ||||
|  * @return returns 0 if successful and -1 on failure. | ||||
|  */ | ||||
| int cvmx_free_global_resource_range_with_base(struct global_resource_tag tag, int base, | ||||
| 					      int nelements); | ||||
| 
 | ||||
| /*
 | ||||
|  * @INTERNAL | ||||
|  * Free nelements with the bases specified in bases[] with the | ||||
|  * specified tag. | ||||
|  * @param tag is the tag of the global resource range. | ||||
|  * @param bases is an array containing the bases to be freed. | ||||
|  * @param nelements is the number of elements that are to be freed. | ||||
|  * @return returns 0 if successful and -1 on failure. | ||||
|  */ | ||||
| int cvmx_free_global_resource_range_multiple(struct global_resource_tag tag, int bases[], | ||||
| 					     int nelements); | ||||
| /*
 | ||||
|  * @INTERNAL | ||||
|  * Free elements from the specified owner in the global resource range with the | ||||
|  * specified tag. | ||||
|  * @param tag is the tag of the global resource range. | ||||
|  * @param owner is the owner of resources that are to be freed. | ||||
|  * @return returns 0 if successful and -1 on failure. | ||||
|  */ | ||||
| int cvmx_free_global_resource_range_with_owner(struct global_resource_tag tag, int owner); | ||||
| 
 | ||||
| /*
 | ||||
|  * @INTERNAL | ||||
|  * Frees all the global resources that have been created. | ||||
|  * For use only from the bootloader, when it shutdown and boots up the | ||||
|  * application or kernel. | ||||
|  */ | ||||
| int free_global_resources(void); | ||||
| 
 | ||||
| u64 cvmx_get_global_resource_owner(struct global_resource_tag tag, int base); | ||||
| /*
 | ||||
|  * @INTERNAL | ||||
|  * Shows the global resource range with the specified tag. Use mainly for debug. | ||||
|  */ | ||||
| void cvmx_show_global_resource_range(struct global_resource_tag tag); | ||||
| 
 | ||||
| /*
 | ||||
|  * @INTERNAL | ||||
|  * Shows all the global resources. Used mainly for debug. | ||||
|  */ | ||||
| void cvmx_global_resources_show(void); | ||||
| 
 | ||||
| u64 cvmx_allocate_app_id(void); | ||||
| u64 cvmx_get_app_id(void); | ||||
| 
 | ||||
| #endif | ||||
|  | @ -0,0 +1,16 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  * | ||||
|  * Interface to the GMX hardware. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __CVMX_GMX_H__ | ||||
| #define __CVMX_GMX_H__ | ||||
| 
 | ||||
| /* CSR typedefs have been moved to cvmx-gmx-defs.h */ | ||||
| 
 | ||||
| int cvmx_gmx_set_backpressure_override(u32 interface, u32 port_mask); | ||||
| int cvmx_agl_set_backpressure_override(u32 interface, u32 port_mask); | ||||
| 
 | ||||
| #endif | ||||
|  | @ -0,0 +1,606 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  * | ||||
|  * Interface to the hardware Fetch and Add Unit. | ||||
|  */ | ||||
| 
 | ||||
| /**
 | ||||
|  * @file | ||||
|  * | ||||
|  * Interface to the hardware Fetch and Add Unit. | ||||
|  * | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __CVMX_HWFAU_H__ | ||||
| #define __CVMX_HWFAU_H__ | ||||
| 
 | ||||
| typedef int cvmx_fau_reg64_t; | ||||
| typedef int cvmx_fau_reg32_t; | ||||
| typedef int cvmx_fau_reg16_t; | ||||
| typedef int cvmx_fau_reg8_t; | ||||
| 
 | ||||
| #define CVMX_FAU_REG_ANY -1 | ||||
| 
 | ||||
| /*
 | ||||
|  * Octeon Fetch and Add Unit (FAU) | ||||
|  */ | ||||
| 
 | ||||
| #define CVMX_FAU_LOAD_IO_ADDRESS cvmx_build_io_address(0x1e, 0) | ||||
| #define CVMX_FAU_BITS_SCRADDR	 63, 56 | ||||
| #define CVMX_FAU_BITS_LEN	 55, 48 | ||||
| #define CVMX_FAU_BITS_INEVAL	 35, 14 | ||||
| #define CVMX_FAU_BITS_TAGWAIT	 13, 13 | ||||
| #define CVMX_FAU_BITS_NOADD	 13, 13 | ||||
| #define CVMX_FAU_BITS_SIZE	 12, 11 | ||||
| #define CVMX_FAU_BITS_REGISTER	 10, 0 | ||||
| 
 | ||||
| #define CVMX_FAU_MAX_REGISTERS_8 (2048) | ||||
| 
 | ||||
| typedef enum { | ||||
| 	CVMX_FAU_OP_SIZE_8 = 0, | ||||
| 	CVMX_FAU_OP_SIZE_16 = 1, | ||||
| 	CVMX_FAU_OP_SIZE_32 = 2, | ||||
| 	CVMX_FAU_OP_SIZE_64 = 3 | ||||
| } cvmx_fau_op_size_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * Tagwait return definition. If a timeout occurs, the error | ||||
|  * bit will be set. Otherwise the value of the register before | ||||
|  * the update will be returned. | ||||
|  */ | ||||
| typedef struct { | ||||
| 	u64 error : 1; | ||||
| 	s64 value : 63; | ||||
| } cvmx_fau_tagwait64_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * Tagwait return definition. If a timeout occurs, the error | ||||
|  * bit will be set. Otherwise the value of the register before | ||||
|  * the update will be returned. | ||||
|  */ | ||||
| typedef struct { | ||||
| 	u64 error : 1; | ||||
| 	s32 value : 31; | ||||
| } cvmx_fau_tagwait32_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * Tagwait return definition. If a timeout occurs, the error | ||||
|  * bit will be set. Otherwise the value of the register before | ||||
|  * the update will be returned. | ||||
|  */ | ||||
| typedef struct { | ||||
| 	u64 error : 1; | ||||
| 	s16 value : 15; | ||||
| } cvmx_fau_tagwait16_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * Tagwait return definition. If a timeout occurs, the error | ||||
|  * bit will be set. Otherwise the value of the register before | ||||
|  * the update will be returned. | ||||
|  */ | ||||
| typedef struct { | ||||
| 	u64 error : 1; | ||||
| 	int8_t value : 7; | ||||
| } cvmx_fau_tagwait8_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * Asynchronous tagwait return definition. If a timeout occurs, | ||||
|  * the error bit will be set. Otherwise the value of the | ||||
|  * register before the update will be returned. | ||||
|  */ | ||||
| typedef union { | ||||
| 	u64 u64; | ||||
| 	struct { | ||||
| 		u64 invalid : 1; | ||||
| 		u64 data : 63; /* unpredictable if invalid is set */ | ||||
| 	} s; | ||||
| } cvmx_fau_async_tagwait_result_t; | ||||
| 
 | ||||
| #define SWIZZLE_8  0 | ||||
| #define SWIZZLE_16 0 | ||||
| #define SWIZZLE_32 0 | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * Builds a store I/O address for writing to the FAU | ||||
|  * | ||||
|  * @param noadd  0 = Store value is atomically added to the current value | ||||
|  *               1 = Store value is atomically written over the current value | ||||
|  * @param reg    FAU atomic register to access. 0 <= reg < 2048. | ||||
|  *               - Step by 2 for 16 bit access. | ||||
|  *               - Step by 4 for 32 bit access. | ||||
|  *               - Step by 8 for 64 bit access. | ||||
|  * @return Address to store for atomic update | ||||
|  */ | ||||
| static inline u64 __cvmx_hwfau_store_address(u64 noadd, u64 reg) | ||||
| { | ||||
| 	return (CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) | | ||||
| 		cvmx_build_bits(CVMX_FAU_BITS_NOADD, noadd) | | ||||
| 		cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg)); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * Builds a I/O address for accessing the FAU | ||||
|  * | ||||
|  * @param tagwait Should the atomic add wait for the current tag switch | ||||
|  *                operation to complete. | ||||
|  *                - 0 = Don't wait | ||||
|  *                - 1 = Wait for tag switch to complete | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  *                - Step by 2 for 16 bit access. | ||||
|  *                - Step by 4 for 32 bit access. | ||||
|  *                - Step by 8 for 64 bit access. | ||||
|  * @param value   Signed value to add. | ||||
|  *                Note: When performing 32 and 64 bit access, only the low | ||||
|  *                22 bits are available. | ||||
|  * @return Address to read from for atomic update | ||||
|  */ | ||||
| static inline u64 __cvmx_hwfau_atomic_address(u64 tagwait, u64 reg, s64 value) | ||||
| { | ||||
| 	return (CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) | | ||||
| 		cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) | | ||||
| 		cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) | | ||||
| 		cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg)); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an atomic 64 bit add | ||||
|  * | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  *                - Step by 8 for 64 bit access. | ||||
|  * @param value   Signed value to add. | ||||
|  *                Note: Only the low 22 bits are available. | ||||
|  * @return Value of the register before the update | ||||
|  */ | ||||
| static inline s64 cvmx_hwfau_fetch_and_add64(cvmx_fau_reg64_t reg, s64 value) | ||||
| { | ||||
| 	return cvmx_read64_int64(__cvmx_hwfau_atomic_address(0, reg, value)); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an atomic 32 bit add | ||||
|  * | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  *                - Step by 4 for 32 bit access. | ||||
|  * @param value   Signed value to add. | ||||
|  *                Note: Only the low 22 bits are available. | ||||
|  * @return Value of the register before the update | ||||
|  */ | ||||
| static inline s32 cvmx_hwfau_fetch_and_add32(cvmx_fau_reg32_t reg, s32 value) | ||||
| { | ||||
| 	reg ^= SWIZZLE_32; | ||||
| 	return cvmx_read64_int32(__cvmx_hwfau_atomic_address(0, reg, value)); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an atomic 16 bit add | ||||
|  * | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  *                - Step by 2 for 16 bit access. | ||||
|  * @param value   Signed value to add. | ||||
|  * @return Value of the register before the update | ||||
|  */ | ||||
| static inline s16 cvmx_hwfau_fetch_and_add16(cvmx_fau_reg16_t reg, s16 value) | ||||
| { | ||||
| 	reg ^= SWIZZLE_16; | ||||
| 	return cvmx_read64_int16(__cvmx_hwfau_atomic_address(0, reg, value)); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an atomic 8 bit add | ||||
|  * | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  * @param value   Signed value to add. | ||||
|  * @return Value of the register before the update | ||||
|  */ | ||||
| static inline int8_t cvmx_hwfau_fetch_and_add8(cvmx_fau_reg8_t reg, int8_t value) | ||||
| { | ||||
| 	reg ^= SWIZZLE_8; | ||||
| 	return cvmx_read64_int8(__cvmx_hwfau_atomic_address(0, reg, value)); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an atomic 64 bit add after the current tag switch | ||||
|  * completes | ||||
|  * | ||||
|  * @param reg    FAU atomic register to access. 0 <= reg < 2048. | ||||
|  *               - Step by 8 for 64 bit access. | ||||
|  * @param value  Signed value to add. | ||||
|  *               Note: Only the low 22 bits are available. | ||||
|  * @return If a timeout occurs, the error bit will be set. Otherwise | ||||
|  *         the value of the register before the update will be | ||||
|  *         returned | ||||
|  */ | ||||
| static inline cvmx_fau_tagwait64_t cvmx_hwfau_tagwait_fetch_and_add64(cvmx_fau_reg64_t reg, | ||||
| 								      s64 value) | ||||
| { | ||||
| 	union { | ||||
| 		u64 i64; | ||||
| 		cvmx_fau_tagwait64_t t; | ||||
| 	} result; | ||||
| 	result.i64 = cvmx_read64_int64(__cvmx_hwfau_atomic_address(1, reg, value)); | ||||
| 	return result.t; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an atomic 32 bit add after the current tag switch | ||||
|  * completes | ||||
|  * | ||||
|  * @param reg    FAU atomic register to access. 0 <= reg < 2048. | ||||
|  *               - Step by 4 for 32 bit access. | ||||
|  * @param value  Signed value to add. | ||||
|  *               Note: Only the low 22 bits are available. | ||||
|  * @return If a timeout occurs, the error bit will be set. Otherwise | ||||
|  *         the value of the register before the update will be | ||||
|  *         returned | ||||
|  */ | ||||
| static inline cvmx_fau_tagwait32_t cvmx_hwfau_tagwait_fetch_and_add32(cvmx_fau_reg32_t reg, | ||||
| 								      s32 value) | ||||
| { | ||||
| 	union { | ||||
| 		u64 i32; | ||||
| 		cvmx_fau_tagwait32_t t; | ||||
| 	} result; | ||||
| 	reg ^= SWIZZLE_32; | ||||
| 	result.i32 = cvmx_read64_int32(__cvmx_hwfau_atomic_address(1, reg, value)); | ||||
| 	return result.t; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an atomic 16 bit add after the current tag switch | ||||
|  * completes | ||||
|  * | ||||
|  * @param reg    FAU atomic register to access. 0 <= reg < 2048. | ||||
|  *               - Step by 2 for 16 bit access. | ||||
|  * @param value  Signed value to add. | ||||
|  * @return If a timeout occurs, the error bit will be set. Otherwise | ||||
|  *         the value of the register before the update will be | ||||
|  *         returned | ||||
|  */ | ||||
| static inline cvmx_fau_tagwait16_t cvmx_hwfau_tagwait_fetch_and_add16(cvmx_fau_reg16_t reg, | ||||
| 								      s16 value) | ||||
| { | ||||
| 	union { | ||||
| 		u64 i16; | ||||
| 		cvmx_fau_tagwait16_t t; | ||||
| 	} result; | ||||
| 	reg ^= SWIZZLE_16; | ||||
| 	result.i16 = cvmx_read64_int16(__cvmx_hwfau_atomic_address(1, reg, value)); | ||||
| 	return result.t; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an atomic 8 bit add after the current tag switch | ||||
|  * completes | ||||
|  * | ||||
|  * @param reg    FAU atomic register to access. 0 <= reg < 2048. | ||||
|  * @param value  Signed value to add. | ||||
|  * @return If a timeout occurs, the error bit will be set. Otherwise | ||||
|  *         the value of the register before the update will be | ||||
|  *         returned | ||||
|  */ | ||||
| static inline cvmx_fau_tagwait8_t cvmx_hwfau_tagwait_fetch_and_add8(cvmx_fau_reg8_t reg, | ||||
| 								    int8_t value) | ||||
| { | ||||
| 	union { | ||||
| 		u64 i8; | ||||
| 		cvmx_fau_tagwait8_t t; | ||||
| 	} result; | ||||
| 	reg ^= SWIZZLE_8; | ||||
| 	result.i8 = cvmx_read64_int8(__cvmx_hwfau_atomic_address(1, reg, value)); | ||||
| 	return result.t; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * Builds I/O data for async operations | ||||
|  * | ||||
|  * @param scraddr Scratch pad byte address to write to.  Must be 8 byte aligned | ||||
|  * @param value   Signed value to add. | ||||
|  *                Note: When performing 32 and 64 bit access, only the low | ||||
|  *                22 bits are available. | ||||
|  * @param tagwait Should the atomic add wait for the current tag switch | ||||
|  *                operation to complete. | ||||
|  *                - 0 = Don't wait | ||||
|  *                - 1 = Wait for tag switch to complete | ||||
|  * @param size    The size of the operation: | ||||
|  *                - CVMX_FAU_OP_SIZE_8  (0) = 8 bits | ||||
|  *                - CVMX_FAU_OP_SIZE_16 (1) = 16 bits | ||||
|  *                - CVMX_FAU_OP_SIZE_32 (2) = 32 bits | ||||
|  *                - CVMX_FAU_OP_SIZE_64 (3) = 64 bits | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  *                - Step by 2 for 16 bit access. | ||||
|  *                - Step by 4 for 32 bit access. | ||||
|  *                - Step by 8 for 64 bit access. | ||||
|  * @return Data to write using cvmx_send_single | ||||
|  */ | ||||
| static inline u64 __cvmx_fau_iobdma_data(u64 scraddr, s64 value, u64 tagwait, | ||||
| 					 cvmx_fau_op_size_t size, u64 reg) | ||||
| { | ||||
| 	return (CVMX_FAU_LOAD_IO_ADDRESS | cvmx_build_bits(CVMX_FAU_BITS_SCRADDR, scraddr >> 3) | | ||||
| 		cvmx_build_bits(CVMX_FAU_BITS_LEN, 1) | | ||||
| 		cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) | | ||||
| 		cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) | | ||||
| 		cvmx_build_bits(CVMX_FAU_BITS_SIZE, size) | | ||||
| 		cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg)); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an async atomic 64 bit add. The old value is | ||||
|  * placed in the scratch memory at byte address scraddr. | ||||
|  * | ||||
|  * @param scraddr Scratch memory byte address to put response in. | ||||
|  *                Must be 8 byte aligned. | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  *                - Step by 8 for 64 bit access. | ||||
|  * @param value   Signed value to add. | ||||
|  *                Note: Only the low 22 bits are available. | ||||
|  * @return Placed in the scratch pad register | ||||
|  */ | ||||
| static inline void cvmx_hwfau_async_fetch_and_add64(u64 scraddr, cvmx_fau_reg64_t reg, s64 value) | ||||
| { | ||||
| 	cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_64, reg)); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an async atomic 32 bit add. The old value is | ||||
|  * placed in the scratch memory at byte address scraddr. | ||||
|  * | ||||
|  * @param scraddr Scratch memory byte address to put response in. | ||||
|  *                Must be 8 byte aligned. | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  *                - Step by 4 for 32 bit access. | ||||
|  * @param value   Signed value to add. | ||||
|  *                Note: Only the low 22 bits are available. | ||||
|  * @return Placed in the scratch pad register | ||||
|  */ | ||||
| static inline void cvmx_hwfau_async_fetch_and_add32(u64 scraddr, cvmx_fau_reg32_t reg, s32 value) | ||||
| { | ||||
| 	cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_32, reg)); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an async atomic 16 bit add. The old value is | ||||
|  * placed in the scratch memory at byte address scraddr. | ||||
|  * | ||||
|  * @param scraddr Scratch memory byte address to put response in. | ||||
|  *                Must be 8 byte aligned. | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  *                - Step by 2 for 16 bit access. | ||||
|  * @param value   Signed value to add. | ||||
|  * @return Placed in the scratch pad register | ||||
|  */ | ||||
| static inline void cvmx_hwfau_async_fetch_and_add16(u64 scraddr, cvmx_fau_reg16_t reg, s16 value) | ||||
| { | ||||
| 	cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_16, reg)); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an async atomic 8 bit add. The old value is | ||||
|  * placed in the scratch memory at byte address scraddr. | ||||
|  * | ||||
|  * @param scraddr Scratch memory byte address to put response in. | ||||
|  *                Must be 8 byte aligned. | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  * @param value   Signed value to add. | ||||
|  * @return Placed in the scratch pad register | ||||
|  */ | ||||
| static inline void cvmx_hwfau_async_fetch_and_add8(u64 scraddr, cvmx_fau_reg8_t reg, int8_t value) | ||||
| { | ||||
| 	cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_8, reg)); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an async atomic 64 bit add after the current tag | ||||
|  * switch completes. | ||||
|  * | ||||
|  * @param scraddr Scratch memory byte address to put response in. | ||||
|  *                Must be 8 byte aligned. | ||||
|  *                If a timeout occurs, the error bit (63) will be set. Otherwise | ||||
|  *                the value of the register before the update will be | ||||
|  *                returned | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  *                - Step by 8 for 64 bit access. | ||||
|  * @param value   Signed value to add. | ||||
|  *                Note: Only the low 22 bits are available. | ||||
|  * @return Placed in the scratch pad register | ||||
|  */ | ||||
| static inline void cvmx_hwfau_async_tagwait_fetch_and_add64(u64 scraddr, cvmx_fau_reg64_t reg, | ||||
| 							    s64 value) | ||||
| { | ||||
| 	cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_64, reg)); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an async atomic 32 bit add after the current tag | ||||
|  * switch completes. | ||||
|  * | ||||
|  * @param scraddr Scratch memory byte address to put response in. | ||||
|  *                Must be 8 byte aligned. | ||||
|  *                If a timeout occurs, the error bit (63) will be set. Otherwise | ||||
|  *                the value of the register before the update will be | ||||
|  *                returned | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  *                - Step by 4 for 32 bit access. | ||||
|  * @param value   Signed value to add. | ||||
|  *                Note: Only the low 22 bits are available. | ||||
|  * @return Placed in the scratch pad register | ||||
|  */ | ||||
| static inline void cvmx_hwfau_async_tagwait_fetch_and_add32(u64 scraddr, cvmx_fau_reg32_t reg, | ||||
| 							    s32 value) | ||||
| { | ||||
| 	cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_32, reg)); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an async atomic 16 bit add after the current tag | ||||
|  * switch completes. | ||||
|  * | ||||
|  * @param scraddr Scratch memory byte address to put response in. | ||||
|  *                Must be 8 byte aligned. | ||||
|  *                If a timeout occurs, the error bit (63) will be set. Otherwise | ||||
|  *                the value of the register before the update will be | ||||
|  *                returned | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  *                - Step by 2 for 16 bit access. | ||||
|  * @param value   Signed value to add. | ||||
|  * @return Placed in the scratch pad register | ||||
|  */ | ||||
| static inline void cvmx_hwfau_async_tagwait_fetch_and_add16(u64 scraddr, cvmx_fau_reg16_t reg, | ||||
| 							    s16 value) | ||||
| { | ||||
| 	cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_16, reg)); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an async atomic 8 bit add after the current tag | ||||
|  * switch completes. | ||||
|  * | ||||
|  * @param scraddr Scratch memory byte address to put response in. | ||||
|  *                Must be 8 byte aligned. | ||||
|  *                If a timeout occurs, the error bit (63) will be set. Otherwise | ||||
|  *                the value of the register before the update will be | ||||
|  *                returned | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  * @param value   Signed value to add. | ||||
|  * @return Placed in the scratch pad register | ||||
|  */ | ||||
| static inline void cvmx_hwfau_async_tagwait_fetch_and_add8(u64 scraddr, cvmx_fau_reg8_t reg, | ||||
| 							   int8_t value) | ||||
| { | ||||
| 	cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_8, reg)); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an atomic 64 bit add | ||||
|  * | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  *                - Step by 8 for 64 bit access. | ||||
|  * @param value   Signed value to add. | ||||
|  */ | ||||
| static inline void cvmx_hwfau_atomic_add64(cvmx_fau_reg64_t reg, s64 value) | ||||
| { | ||||
| 	cvmx_write64_int64(__cvmx_hwfau_store_address(0, reg), value); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an atomic 32 bit add | ||||
|  * | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  *                - Step by 4 for 32 bit access. | ||||
|  * @param value   Signed value to add. | ||||
|  */ | ||||
| static inline void cvmx_hwfau_atomic_add32(cvmx_fau_reg32_t reg, s32 value) | ||||
| { | ||||
| 	reg ^= SWIZZLE_32; | ||||
| 	cvmx_write64_int32(__cvmx_hwfau_store_address(0, reg), value); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an atomic 16 bit add | ||||
|  * | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  *                - Step by 2 for 16 bit access. | ||||
|  * @param value   Signed value to add. | ||||
|  */ | ||||
| static inline void cvmx_hwfau_atomic_add16(cvmx_fau_reg16_t reg, s16 value) | ||||
| { | ||||
| 	reg ^= SWIZZLE_16; | ||||
| 	cvmx_write64_int16(__cvmx_hwfau_store_address(0, reg), value); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an atomic 8 bit add | ||||
|  * | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  * @param value   Signed value to add. | ||||
|  */ | ||||
| static inline void cvmx_hwfau_atomic_add8(cvmx_fau_reg8_t reg, int8_t value) | ||||
| { | ||||
| 	reg ^= SWIZZLE_8; | ||||
| 	cvmx_write64_int8(__cvmx_hwfau_store_address(0, reg), value); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an atomic 64 bit write | ||||
|  * | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  *                - Step by 8 for 64 bit access. | ||||
|  * @param value   Signed value to write. | ||||
|  */ | ||||
| static inline void cvmx_hwfau_atomic_write64(cvmx_fau_reg64_t reg, s64 value) | ||||
| { | ||||
| 	cvmx_write64_int64(__cvmx_hwfau_store_address(1, reg), value); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an atomic 32 bit write | ||||
|  * | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  *                - Step by 4 for 32 bit access. | ||||
|  * @param value   Signed value to write. | ||||
|  */ | ||||
| static inline void cvmx_hwfau_atomic_write32(cvmx_fau_reg32_t reg, s32 value) | ||||
| { | ||||
| 	reg ^= SWIZZLE_32; | ||||
| 	cvmx_write64_int32(__cvmx_hwfau_store_address(1, reg), value); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an atomic 16 bit write | ||||
|  * | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  *                - Step by 2 for 16 bit access. | ||||
|  * @param value   Signed value to write. | ||||
|  */ | ||||
| static inline void cvmx_hwfau_atomic_write16(cvmx_fau_reg16_t reg, s16 value) | ||||
| { | ||||
| 	reg ^= SWIZZLE_16; | ||||
| 	cvmx_write64_int16(__cvmx_hwfau_store_address(1, reg), value); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Perform an atomic 8 bit write | ||||
|  * | ||||
|  * @param reg     FAU atomic register to access. 0 <= reg < 2048. | ||||
|  * @param value   Signed value to write. | ||||
|  */ | ||||
| static inline void cvmx_hwfau_atomic_write8(cvmx_fau_reg8_t reg, int8_t value) | ||||
| { | ||||
| 	reg ^= SWIZZLE_8; | ||||
| 	cvmx_write64_int8(__cvmx_hwfau_store_address(1, reg), value); | ||||
| } | ||||
| 
 | ||||
| /** Allocates 64bit FAU register.
 | ||||
|  *  @return value is the base address of allocated FAU register | ||||
|  */ | ||||
| int cvmx_fau64_alloc(int reserve); | ||||
| 
 | ||||
| /** Allocates 32bit FAU register.
 | ||||
|  *  @return value is the base address of allocated FAU register | ||||
|  */ | ||||
| int cvmx_fau32_alloc(int reserve); | ||||
| 
 | ||||
| /** Allocates 16bit FAU register.
 | ||||
|  *  @return value is the base address of allocated FAU register | ||||
|  */ | ||||
| int cvmx_fau16_alloc(int reserve); | ||||
| 
 | ||||
| /** Allocates 8bit FAU register.
 | ||||
|  *  @return value is the base address of allocated FAU register | ||||
|  */ | ||||
| int cvmx_fau8_alloc(int reserve); | ||||
| 
 | ||||
| /** Frees the specified FAU register.
 | ||||
|  *  @param address Base address of register to release. | ||||
|  *  @return 0 on success; -1 on failure | ||||
|  */ | ||||
| int cvmx_fau_free(int address); | ||||
| 
 | ||||
| /** Display the fau registers array
 | ||||
|  */ | ||||
| void cvmx_fau_show(void); | ||||
| 
 | ||||
| #endif /* __CVMX_HWFAU_H__ */ | ||||
|  | @ -0,0 +1,570 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  * | ||||
|  * Interface to the hardware Packet Output unit. | ||||
|  * | ||||
|  * Starting with SDK 1.7.0, the PKO output functions now support | ||||
|  * two types of locking. CVMX_PKO_LOCK_ATOMIC_TAG continues to | ||||
|  * function similarly to previous SDKs by using POW atomic tags | ||||
|  * to preserve ordering and exclusivity. As a new option, you | ||||
|  * can now pass CVMX_PKO_LOCK_CMD_QUEUE which uses a ll/sc | ||||
|  * memory based locking instead. This locking has the advantage | ||||
|  * of not affecting the tag state but doesn't preserve packet | ||||
|  * ordering. CVMX_PKO_LOCK_CMD_QUEUE is appropriate in most | ||||
|  * generic code while CVMX_PKO_LOCK_CMD_QUEUE should be used | ||||
|  * with hand tuned fast path code. | ||||
|  * | ||||
|  * Some of other SDK differences visible to the command command | ||||
|  * queuing: | ||||
|  * - PKO indexes are no longer stored in the FAU. A large | ||||
|  *   percentage of the FAU register block used to be tied up | ||||
|  *   maintaining PKO queue pointers. These are now stored in a | ||||
|  *   global named block. | ||||
|  * - The PKO <b>use_locking</b> parameter can now have a global | ||||
|  *   effect. Since all application use the same named block, | ||||
|  *   queue locking correctly applies across all operating | ||||
|  *   systems when using CVMX_PKO_LOCK_CMD_QUEUE. | ||||
|  * - PKO 3 word commands are now supported. Use | ||||
|  *   cvmx_pko_send_packet_finish3(). | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __CVMX_HWPKO_H__ | ||||
| #define __CVMX_HWPKO_H__ | ||||
| 
 | ||||
| #include "cvmx-hwfau.h" | ||||
| #include "cvmx-fpa.h" | ||||
| #include "cvmx-pow.h" | ||||
| #include "cvmx-cmd-queue.h" | ||||
| #include "cvmx-helper.h" | ||||
| #include "cvmx-helper-util.h" | ||||
| #include "cvmx-helper-cfg.h" | ||||
| 
 | ||||
| /* Adjust the command buffer size by 1 word so that in the case of using only
 | ||||
| ** two word PKO commands no command words stradle buffers.  The useful values | ||||
| ** for this are 0 and 1. */ | ||||
| #define CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST (1) | ||||
| 
 | ||||
| #define CVMX_PKO_MAX_OUTPUT_QUEUES_STATIC 256 | ||||
| #define CVMX_PKO_MAX_OUTPUT_QUEUES                                                                 \ | ||||
| 	((OCTEON_IS_OCTEON2() || OCTEON_IS_MODEL(OCTEON_CN70XX)) ? 256 : 128) | ||||
| #define CVMX_PKO_NUM_OUTPUT_PORTS                                                                  \ | ||||
| 	((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 44 : (OCTEON_IS_MODEL(OCTEON_CN66XX) ? 48 : 40)) | ||||
| #define CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID 63 | ||||
| #define CVMX_PKO_QUEUE_STATIC_PRIORITY	    9 | ||||
| #define CVMX_PKO_ILLEGAL_QUEUE		    0xFFFF | ||||
| #define CVMX_PKO_MAX_QUEUE_DEPTH	    0 | ||||
| 
 | ||||
| typedef enum { | ||||
| 	CVMX_PKO_SUCCESS, | ||||
| 	CVMX_PKO_INVALID_PORT, | ||||
| 	CVMX_PKO_INVALID_QUEUE, | ||||
| 	CVMX_PKO_INVALID_PRIORITY, | ||||
| 	CVMX_PKO_NO_MEMORY, | ||||
| 	CVMX_PKO_PORT_ALREADY_SETUP, | ||||
| 	CVMX_PKO_CMD_QUEUE_INIT_ERROR | ||||
| } cvmx_pko_return_value_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * This enumeration represents the differnet locking modes supported by PKO. | ||||
|  */ | ||||
| typedef enum { | ||||
| 	CVMX_PKO_LOCK_NONE = 0, | ||||
| 	CVMX_PKO_LOCK_ATOMIC_TAG = 1, | ||||
| 	CVMX_PKO_LOCK_CMD_QUEUE = 2, | ||||
| } cvmx_pko_lock_t; | ||||
| 
 | ||||
| typedef struct cvmx_pko_port_status { | ||||
| 	u32 packets; | ||||
| 	u64 octets; | ||||
| 	u64 doorbell; | ||||
| } cvmx_pko_port_status_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * This structure defines the address to use on a packet enqueue | ||||
|  */ | ||||
| typedef union { | ||||
| 	u64 u64; | ||||
| 	struct { | ||||
| 		cvmx_mips_space_t mem_space : 2; | ||||
| 		u64 reserved : 13; | ||||
| 		u64 is_io : 1; | ||||
| 		u64 did : 8; | ||||
| 		u64 reserved2 : 4; | ||||
| 		u64 reserved3 : 15; | ||||
| 		u64 port : 9; | ||||
| 		u64 queue : 9; | ||||
| 		u64 reserved4 : 3; | ||||
| 	} s; | ||||
| } cvmx_pko_doorbell_address_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * Structure of the first packet output command word. | ||||
|  */ | ||||
| typedef union { | ||||
| 	u64 u64; | ||||
| 	struct { | ||||
| 		cvmx_fau_op_size_t size1 : 2; | ||||
| 		cvmx_fau_op_size_t size0 : 2; | ||||
| 		u64 subone1 : 1; | ||||
| 		u64 reg1 : 11; | ||||
| 		u64 subone0 : 1; | ||||
| 		u64 reg0 : 11; | ||||
| 		u64 le : 1; | ||||
| 		u64 n2 : 1; | ||||
| 		u64 wqp : 1; | ||||
| 		u64 rsp : 1; | ||||
| 		u64 gather : 1; | ||||
| 		u64 ipoffp1 : 7; | ||||
| 		u64 ignore_i : 1; | ||||
| 		u64 dontfree : 1; | ||||
| 		u64 segs : 6; | ||||
| 		u64 total_bytes : 16; | ||||
| 	} s; | ||||
| } cvmx_pko_command_word0_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * Call before any other calls to initialize the packet | ||||
|  * output system. | ||||
|  */ | ||||
| 
 | ||||
| void cvmx_pko_hw_init(u8 pool, unsigned int bufsize); | ||||
| 
 | ||||
| /**
 | ||||
|  * Enables the packet output hardware. It must already be | ||||
|  * configured. | ||||
|  */ | ||||
| void cvmx_pko_enable(void); | ||||
| 
 | ||||
| /**
 | ||||
|  * Disables the packet output. Does not affect any configuration. | ||||
|  */ | ||||
| void cvmx_pko_disable(void); | ||||
| 
 | ||||
| /**
 | ||||
|  * Shutdown and free resources required by packet output. | ||||
|  */ | ||||
| 
 | ||||
| void cvmx_pko_shutdown(void); | ||||
| 
 | ||||
| /**
 | ||||
|  * Configure a output port and the associated queues for use. | ||||
|  * | ||||
|  * @param port       Port to configure. | ||||
|  * @param base_queue First queue number to associate with this port. | ||||
|  * @param num_queues Number of queues t oassociate with this port | ||||
|  * @param priority   Array of priority levels for each queue. Values are | ||||
|  *                   allowed to be 1-8. A value of 8 get 8 times the traffic | ||||
|  *                   of a value of 1. There must be num_queues elements in the | ||||
|  *                   array. | ||||
|  */ | ||||
| cvmx_pko_return_value_t cvmx_pko_config_port(int port, int base_queue, int num_queues, | ||||
| 					     const u8 priority[]); | ||||
| 
 | ||||
| /**
 | ||||
|  * Ring the packet output doorbell. This tells the packet | ||||
|  * output hardware that "len" command words have been added | ||||
|  * to its pending list.  This command includes the required | ||||
|  * CVMX_SYNCWS before the doorbell ring. | ||||
|  * | ||||
|  * WARNING: This function may have to look up the proper PKO port in | ||||
|  * the IPD port to PKO port map, and is thus slower than calling | ||||
|  * cvmx_pko_doorbell_pkoid() directly if the PKO port identifier is | ||||
|  * known. | ||||
|  * | ||||
|  * @param ipd_port   The IPD port corresponding the to pko port the packet is for | ||||
|  * @param queue  Queue the packet is for | ||||
|  * @param len    Length of the command in 64 bit words | ||||
|  */ | ||||
| static inline void cvmx_pko_doorbell(u64 ipd_port, u64 queue, u64 len) | ||||
| { | ||||
| 	cvmx_pko_doorbell_address_t ptr; | ||||
| 	u64 pko_port; | ||||
| 
 | ||||
| 	pko_port = ipd_port; | ||||
| 	if (octeon_has_feature(OCTEON_FEATURE_PKND)) | ||||
| 		pko_port = cvmx_helper_cfg_ipd2pko_port_base(ipd_port); | ||||
| 
 | ||||
| 	ptr.u64 = 0; | ||||
| 	ptr.s.mem_space = CVMX_IO_SEG; | ||||
| 	ptr.s.did = CVMX_OCT_DID_PKT_SEND; | ||||
| 	ptr.s.is_io = 1; | ||||
| 	ptr.s.port = pko_port; | ||||
| 	ptr.s.queue = queue; | ||||
| 	/* Need to make sure output queue data is in DRAM before doorbell write */ | ||||
| 	CVMX_SYNCWS; | ||||
| 	cvmx_write_io(ptr.u64, len); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Prepare to send a packet.  This may initiate a tag switch to | ||||
|  * get exclusive access to the output queue structure, and | ||||
|  * performs other prep work for the packet send operation. | ||||
|  * | ||||
|  * cvmx_pko_send_packet_finish() MUST be called after this function is called, | ||||
|  * and must be called with the same port/queue/use_locking arguments. | ||||
|  * | ||||
|  * The use_locking parameter allows the caller to use three | ||||
|  * possible locking modes. | ||||
|  * - CVMX_PKO_LOCK_NONE | ||||
|  *      - PKO doesn't do any locking. It is the responsibility | ||||
|  *          of the application to make sure that no other core | ||||
|  *          is accessing the same queue at the same time. | ||||
|  * - CVMX_PKO_LOCK_ATOMIC_TAG | ||||
|  *      - PKO performs an atomic tagswitch to insure exclusive | ||||
|  *          access to the output queue. This will maintain | ||||
|  *          packet ordering on output. | ||||
|  * - CVMX_PKO_LOCK_CMD_QUEUE | ||||
|  *      - PKO uses the common command queue locks to insure | ||||
|  *          exclusive access to the output queue. This is a | ||||
|  *          memory based ll/sc. This is the most portable | ||||
|  *          locking mechanism. | ||||
|  * | ||||
|  * NOTE: If atomic locking is used, the POW entry CANNOT be | ||||
|  * descheduled, as it does not contain a valid WQE pointer. | ||||
|  * | ||||
|  * @param port   Port to send it on, this can be either IPD port or PKO | ||||
|  *		 port. | ||||
|  * @param queue  Queue to use | ||||
|  * @param use_locking | ||||
|  *               CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or CVMX_PKO_LOCK_CMD_QUEUE | ||||
|  */ | ||||
| static inline void cvmx_pko_send_packet_prepare(u64 port __attribute__((unused)), u64 queue, | ||||
| 						cvmx_pko_lock_t use_locking) | ||||
| { | ||||
| 	if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG) { | ||||
| 		/*
 | ||||
| 		 * Must do a full switch here to handle all cases.  We use a | ||||
| 		 * fake WQE pointer, as the POW does not access this memory. | ||||
| 		 * The WQE pointer and group are only used if this work is | ||||
| 		 * descheduled, which is not supported by the | ||||
| 		 * cvmx_pko_send_packet_prepare/cvmx_pko_send_packet_finish | ||||
| 		 * combination. Note that this is a special case in which these | ||||
| 		 * fake values can be used - this is not a general technique. | ||||
| 		 */ | ||||
| 		u32 tag = CVMX_TAG_SW_BITS_INTERNAL << CVMX_TAG_SW_SHIFT | | ||||
| 			  CVMX_TAG_SUBGROUP_PKO << CVMX_TAG_SUBGROUP_SHIFT | | ||||
| 			  (CVMX_TAG_SUBGROUP_MASK & queue); | ||||
| 		cvmx_pow_tag_sw_full((cvmx_wqe_t *)cvmx_phys_to_ptr(0x80), tag, | ||||
| 				     CVMX_POW_TAG_TYPE_ATOMIC, 0); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| #define cvmx_pko_send_packet_prepare_pkoid cvmx_pko_send_packet_prepare | ||||
| 
 | ||||
| /**
 | ||||
|  * Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly once before this, | ||||
|  * and the same parameters must be passed to both cvmx_pko_send_packet_prepare() and | ||||
|  * cvmx_pko_send_packet_finish(). | ||||
|  * | ||||
|  * WARNING: This function may have to look up the proper PKO port in | ||||
|  * the IPD port to PKO port map, and is thus slower than calling | ||||
|  * cvmx_pko_send_packet_finish_pkoid() directly if the PKO port | ||||
|  * identifier is known. | ||||
|  * | ||||
|  * @param ipd_port   The IPD port corresponding the to pko port the packet is for | ||||
|  * @param queue  Queue to use | ||||
|  * @param pko_command | ||||
|  *               PKO HW command word | ||||
|  * @param packet Packet to send | ||||
|  * @param use_locking | ||||
|  *               CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or CVMX_PKO_LOCK_CMD_QUEUE | ||||
|  * | ||||
|  * @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output | ||||
|  */ | ||||
| static inline cvmx_pko_return_value_t | ||||
| cvmx_hwpko_send_packet_finish(u64 ipd_port, u64 queue, cvmx_pko_command_word0_t pko_command, | ||||
| 			      cvmx_buf_ptr_t packet, cvmx_pko_lock_t use_locking) | ||||
| { | ||||
| 	cvmx_cmd_queue_result_t result; | ||||
| 
 | ||||
| 	if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG) | ||||
| 		cvmx_pow_tag_sw_wait(); | ||||
| 
 | ||||
| 	result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue), | ||||
| 				       (use_locking == CVMX_PKO_LOCK_CMD_QUEUE), pko_command.u64, | ||||
| 				       packet.u64); | ||||
| 	if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) { | ||||
| 		cvmx_pko_doorbell(ipd_port, queue, 2); | ||||
| 		return CVMX_PKO_SUCCESS; | ||||
| 	} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) || (result == CVMX_CMD_QUEUE_FULL)) { | ||||
| 		return CVMX_PKO_NO_MEMORY; | ||||
| 	} else { | ||||
| 		return CVMX_PKO_INVALID_QUEUE; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly once before this, | ||||
|  * and the same parameters must be passed to both cvmx_pko_send_packet_prepare() and | ||||
|  * cvmx_pko_send_packet_finish(). | ||||
|  * | ||||
|  * WARNING: This function may have to look up the proper PKO port in | ||||
|  * the IPD port to PKO port map, and is thus slower than calling | ||||
|  * cvmx_pko_send_packet_finish3_pkoid() directly if the PKO port | ||||
|  * identifier is known. | ||||
|  * | ||||
|  * @param ipd_port   The IPD port corresponding the to pko port the packet is for | ||||
|  * @param queue  Queue to use | ||||
|  * @param pko_command | ||||
|  *               PKO HW command word | ||||
|  * @param packet Packet to send | ||||
|  * @param addr   Plysical address of a work queue entry or physical address to zero on complete. | ||||
|  * @param use_locking | ||||
|  *               CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or CVMX_PKO_LOCK_CMD_QUEUE | ||||
|  * | ||||
|  * @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output | ||||
|  */ | ||||
| static inline cvmx_pko_return_value_t | ||||
| cvmx_hwpko_send_packet_finish3(u64 ipd_port, u64 queue, cvmx_pko_command_word0_t pko_command, | ||||
| 			       cvmx_buf_ptr_t packet, u64 addr, cvmx_pko_lock_t use_locking) | ||||
| { | ||||
| 	cvmx_cmd_queue_result_t result; | ||||
| 
 | ||||
| 	if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG) | ||||
| 		cvmx_pow_tag_sw_wait(); | ||||
| 
 | ||||
| 	result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue), | ||||
| 				       (use_locking == CVMX_PKO_LOCK_CMD_QUEUE), pko_command.u64, | ||||
| 				       packet.u64, addr); | ||||
| 	if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) { | ||||
| 		cvmx_pko_doorbell(ipd_port, queue, 3); | ||||
| 		return CVMX_PKO_SUCCESS; | ||||
| 	} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) || (result == CVMX_CMD_QUEUE_FULL)) { | ||||
| 		return CVMX_PKO_NO_MEMORY; | ||||
| 	} else { | ||||
| 		return CVMX_PKO_INVALID_QUEUE; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Get the first pko_port for the (interface, index) | ||||
|  * | ||||
|  * @param interface | ||||
|  * @param index | ||||
|  */ | ||||
| int cvmx_pko_get_base_pko_port(int interface, int index); | ||||
| 
 | ||||
| /**
 | ||||
|  * Get the number of pko_ports for the (interface, index) | ||||
|  * | ||||
|  * @param interface | ||||
|  * @param index | ||||
|  */ | ||||
| int cvmx_pko_get_num_pko_ports(int interface, int index); | ||||
| 
 | ||||
| /**
 | ||||
|  * For a given port number, return the base pko output queue | ||||
|  * for the port. | ||||
|  * | ||||
|  * @param port   IPD port number | ||||
|  * @return Base output queue | ||||
|  */ | ||||
| int cvmx_pko_get_base_queue(int port); | ||||
| 
 | ||||
| /**
 | ||||
|  * For a given port number, return the number of pko output queues. | ||||
|  * | ||||
|  * @param port   IPD port number | ||||
|  * @return Number of output queues | ||||
|  */ | ||||
| int cvmx_pko_get_num_queues(int port); | ||||
| 
 | ||||
| /**
 | ||||
|  * Sets the internal FPA pool data structure for PKO comamnd queue. | ||||
|  * @param pool	fpa pool number yo use | ||||
|  * @param buffer_size	buffer size of pool | ||||
|  * @param buffer_count	number of buufers to allocate to pool | ||||
|  * | ||||
|  * @note the caller is responsable for setting up the pool with | ||||
|  * an appropriate buffer size and sufficient buffer count. | ||||
|  */ | ||||
| void cvmx_pko_set_cmd_que_pool_config(s64 pool, u64 buffer_size, u64 buffer_count); | ||||
| 
 | ||||
| /**
 | ||||
|  * Get the status counters for a port. | ||||
|  * | ||||
|  * @param ipd_port Port number (ipd_port) to get statistics for. | ||||
|  * @param clear    Set to 1 to clear the counters after they are read | ||||
|  * @param status   Where to put the results. | ||||
|  * | ||||
|  * Note: | ||||
|  *     - Only the doorbell for the base queue of the ipd_port is | ||||
|  *       collected. | ||||
|  *     - Retrieving the stats involves writing the index through | ||||
|  *       CVMX_PKO_REG_READ_IDX and reading the stat CSRs, in that | ||||
|  *       order. It is not MP-safe and caller should guarantee | ||||
|  *       atomicity. | ||||
|  */ | ||||
| void cvmx_pko_get_port_status(u64 ipd_port, u64 clear, cvmx_pko_port_status_t *status); | ||||
| 
 | ||||
| /**
 | ||||
|  * Rate limit a PKO port to a max packets/sec. This function is only | ||||
|  * supported on CN57XX, CN56XX, CN55XX, and CN54XX. | ||||
|  * | ||||
|  * @param port      Port to rate limit | ||||
|  * @param packets_s Maximum packet/sec | ||||
|  * @param burst     Maximum number of packets to burst in a row before rate | ||||
|  *                  limiting cuts in. | ||||
|  * | ||||
|  * @return Zero on success, negative on failure | ||||
|  */ | ||||
| int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst); | ||||
| 
 | ||||
| /**
 | ||||
|  * Rate limit a PKO port to a max bits/sec. This function is only | ||||
|  * supported on CN57XX, CN56XX, CN55XX, and CN54XX. | ||||
|  * | ||||
|  * @param port   Port to rate limit | ||||
|  * @param bits_s PKO rate limit in bits/sec | ||||
|  * @param burst  Maximum number of bits to burst before rate | ||||
|  *               limiting cuts in. | ||||
|  * | ||||
|  * @return Zero on success, negative on failure | ||||
|  */ | ||||
| int cvmx_pko_rate_limit_bits(int port, u64 bits_s, int burst); | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * | ||||
|  * Retrieve the PKO pipe number for a port | ||||
|  * | ||||
|  * @param interface | ||||
|  * @param index | ||||
|  * | ||||
|  * @return negative on error. | ||||
|  * | ||||
|  * This applies only to the non-loopback interfaces. | ||||
|  * | ||||
|  */ | ||||
| int __cvmx_pko_get_pipe(int interface, int index); | ||||
| 
 | ||||
| /**
 | ||||
|  * For a given PKO port number, return the base output queue | ||||
|  * for the port. | ||||
|  * | ||||
|  * @param pko_port   PKO port number | ||||
|  * @return           Base output queue | ||||
|  */ | ||||
| int cvmx_pko_get_base_queue_pkoid(int pko_port); | ||||
| 
 | ||||
| /**
 | ||||
|  * For a given PKO port number, return the number of output queues | ||||
|  * for the port. | ||||
|  * | ||||
|  * @param pko_port	PKO port number | ||||
|  * @return		the number of output queues | ||||
|  */ | ||||
| int cvmx_pko_get_num_queues_pkoid(int pko_port); | ||||
| 
 | ||||
| /**
 | ||||
|  * Ring the packet output doorbell. This tells the packet | ||||
|  * output hardware that "len" command words have been added | ||||
|  * to its pending list.  This command includes the required | ||||
|  * CVMX_SYNCWS before the doorbell ring. | ||||
|  * | ||||
|  * @param pko_port   Port the packet is for | ||||
|  * @param queue  Queue the packet is for | ||||
|  * @param len    Length of the command in 64 bit words | ||||
|  */ | ||||
| static inline void cvmx_pko_doorbell_pkoid(u64 pko_port, u64 queue, u64 len) | ||||
| { | ||||
| 	cvmx_pko_doorbell_address_t ptr; | ||||
| 
 | ||||
| 	ptr.u64 = 0; | ||||
| 	ptr.s.mem_space = CVMX_IO_SEG; | ||||
| 	ptr.s.did = CVMX_OCT_DID_PKT_SEND; | ||||
| 	ptr.s.is_io = 1; | ||||
| 	ptr.s.port = pko_port; | ||||
| 	ptr.s.queue = queue; | ||||
| 	/* Need to make sure output queue data is in DRAM before doorbell write */ | ||||
| 	CVMX_SYNCWS; | ||||
| 	cvmx_write_io(ptr.u64, len); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly once before this, | ||||
|  * and the same parameters must be passed to both cvmx_pko_send_packet_prepare() and | ||||
|  * cvmx_pko_send_packet_finish_pkoid(). | ||||
|  * | ||||
|  * @param pko_port   Port to send it on | ||||
|  * @param queue  Queue to use | ||||
|  * @param pko_command | ||||
|  *               PKO HW command word | ||||
|  * @param packet Packet to send | ||||
|  * @param use_locking | ||||
|  *               CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or CVMX_PKO_LOCK_CMD_QUEUE | ||||
|  * | ||||
|  * @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output | ||||
|  */ | ||||
| static inline cvmx_pko_return_value_t | ||||
| cvmx_hwpko_send_packet_finish_pkoid(int pko_port, u64 queue, cvmx_pko_command_word0_t pko_command, | ||||
| 				    cvmx_buf_ptr_t packet, cvmx_pko_lock_t use_locking) | ||||
| { | ||||
| 	cvmx_cmd_queue_result_t result; | ||||
| 
 | ||||
| 	if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG) | ||||
| 		cvmx_pow_tag_sw_wait(); | ||||
| 
 | ||||
| 	result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue), | ||||
| 				       (use_locking == CVMX_PKO_LOCK_CMD_QUEUE), pko_command.u64, | ||||
| 				       packet.u64); | ||||
| 	if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) { | ||||
| 		cvmx_pko_doorbell_pkoid(pko_port, queue, 2); | ||||
| 		return CVMX_PKO_SUCCESS; | ||||
| 	} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) || (result == CVMX_CMD_QUEUE_FULL)) { | ||||
| 		return CVMX_PKO_NO_MEMORY; | ||||
| 	} else { | ||||
| 		return CVMX_PKO_INVALID_QUEUE; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly once before this, | ||||
|  * and the same parameters must be passed to both cvmx_pko_send_packet_prepare() and | ||||
|  * cvmx_pko_send_packet_finish_pkoid(). | ||||
|  * | ||||
|  * @param pko_port   The PKO port the packet is for | ||||
|  * @param queue  Queue to use | ||||
|  * @param pko_command | ||||
|  *               PKO HW command word | ||||
|  * @param packet Packet to send | ||||
|  * @param addr   Plysical address of a work queue entry or physical address to zero on complete. | ||||
|  * @param use_locking | ||||
|  *               CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or CVMX_PKO_LOCK_CMD_QUEUE | ||||
|  * | ||||
|  * @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output | ||||
|  */ | ||||
| static inline cvmx_pko_return_value_t | ||||
| cvmx_hwpko_send_packet_finish3_pkoid(u64 pko_port, u64 queue, cvmx_pko_command_word0_t pko_command, | ||||
| 				     cvmx_buf_ptr_t packet, u64 addr, cvmx_pko_lock_t use_locking) | ||||
| { | ||||
| 	cvmx_cmd_queue_result_t result; | ||||
| 
 | ||||
| 	if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG) | ||||
| 		cvmx_pow_tag_sw_wait(); | ||||
| 
 | ||||
| 	result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue), | ||||
| 				       (use_locking == CVMX_PKO_LOCK_CMD_QUEUE), pko_command.u64, | ||||
| 				       packet.u64, addr); | ||||
| 	if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) { | ||||
| 		cvmx_pko_doorbell_pkoid(pko_port, queue, 3); | ||||
| 		return CVMX_PKO_SUCCESS; | ||||
| 	} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) || (result == CVMX_CMD_QUEUE_FULL)) { | ||||
| 		return CVMX_PKO_NO_MEMORY; | ||||
| 	} else { | ||||
| 		return CVMX_PKO_INVALID_QUEUE; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Obtain the number of PKO commands pending in a queue | ||||
|  * | ||||
|  * @param queue is the queue identifier to be queried | ||||
|  * @return the number of commands pending transmission or -1 on error | ||||
|  */ | ||||
| int cvmx_pko_queue_pend_count(cvmx_cmd_queue_id_t queue); | ||||
| 
 | ||||
| void cvmx_pko_set_cmd_queue_pool_buffer_count(u64 buffer_count); | ||||
| 
 | ||||
| #endif /* __CVMX_HWPKO_H__ */ | ||||
|  | @ -0,0 +1,154 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  * | ||||
|  * This file contains defines for the ILK interface | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __CVMX_ILK_H__ | ||||
| #define __CVMX_ILK_H__ | ||||
| 
 | ||||
| /* CSR typedefs have been moved to cvmx-ilk-defs.h */ | ||||
| 
 | ||||
| /*
 | ||||
|  * Note: this macro must match the first ilk port in the ipd_port_map_68xx[] | ||||
|  * and ipd_port_map_78xx[] arrays. | ||||
|  */ | ||||
| static inline int CVMX_ILK_GBL_BASE(void) | ||||
| { | ||||
| 	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) | ||||
| 		return 5; | ||||
| 	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) | ||||
| 		return 6; | ||||
| 	return -1; | ||||
| } | ||||
| 
 | ||||
| static inline int CVMX_ILK_QLM_BASE(void) | ||||
| { | ||||
| 	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) | ||||
| 		return 1; | ||||
| 	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) | ||||
| 		return 4; | ||||
| 	return -1; | ||||
| } | ||||
| 
 | ||||
| typedef struct { | ||||
| 	int intf_en : 1; | ||||
| 	int la_mode : 1; | ||||
| 	int reserved : 14; /* unused */ | ||||
| 	int lane_speed : 16; | ||||
| 	/* add more here */ | ||||
| } cvmx_ilk_intf_t; | ||||
| 
 | ||||
| #define CVMX_NUM_ILK_INTF 2 | ||||
| static inline int CVMX_ILK_MAX_LANES(void) | ||||
| { | ||||
| 	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) | ||||
| 		return 8; | ||||
| 	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) | ||||
| 		return 16; | ||||
| 	return -1; | ||||
| } | ||||
| 
 | ||||
| extern unsigned short cvmx_ilk_lane_mask[CVMX_MAX_NODES][CVMX_NUM_ILK_INTF]; | ||||
| 
 | ||||
| typedef struct { | ||||
| 	unsigned int pipe; | ||||
| 	unsigned int chan; | ||||
| } cvmx_ilk_pipe_chan_t; | ||||
| 
 | ||||
| #define CVMX_ILK_MAX_PIPES 45 | ||||
| /* Max number of channels allowed */ | ||||
| #define CVMX_ILK_MAX_CHANS 256 | ||||
| 
 | ||||
| extern int cvmx_ilk_chans[CVMX_MAX_NODES][CVMX_NUM_ILK_INTF]; | ||||
| 
 | ||||
| typedef struct { | ||||
| 	unsigned int chan; | ||||
| 	unsigned int pknd; | ||||
| } cvmx_ilk_chan_pknd_t; | ||||
| 
 | ||||
| #define CVMX_ILK_MAX_PKNDS 16 /* must be <45 */ | ||||
| 
 | ||||
| typedef struct { | ||||
| 	int *chan_list; /* for discrete channels. or, must be null */ | ||||
| 	unsigned int num_chans; | ||||
| 
 | ||||
| 	unsigned int chan_start; /* for continuous channels */ | ||||
| 	unsigned int chan_end; | ||||
| 	unsigned int chan_step; | ||||
| 
 | ||||
| 	unsigned int clr_on_rd; | ||||
| } cvmx_ilk_stats_ctrl_t; | ||||
| 
 | ||||
| #define CVMX_ILK_MAX_CAL      288 | ||||
| #define CVMX_ILK_MAX_CAL_IDX  (CVMX_ILK_MAX_CAL / 8) | ||||
| #define CVMX_ILK_TX_MIN_CAL   1 | ||||
| #define CVMX_ILK_RX_MIN_CAL   1 | ||||
| #define CVMX_ILK_CAL_GRP_SZ   8 | ||||
| #define CVMX_ILK_PIPE_BPID_SZ 7 | ||||
| #define CVMX_ILK_ENT_CTRL_SZ  2 | ||||
| #define CVMX_ILK_RX_FIFO_WM   0x200 | ||||
| 
 | ||||
| typedef enum { PIPE_BPID = 0, LINK, XOFF, XON } cvmx_ilk_cal_ent_ctrl_t; | ||||
| 
 | ||||
| typedef struct { | ||||
| 	unsigned char pipe_bpid; | ||||
| 	cvmx_ilk_cal_ent_ctrl_t ent_ctrl; | ||||
| } cvmx_ilk_cal_entry_t; | ||||
| 
 | ||||
| typedef enum { CVMX_ILK_LPBK_DISA = 0, CVMX_ILK_LPBK_ENA } cvmx_ilk_lpbk_ena_t; | ||||
| 
 | ||||
| typedef enum { CVMX_ILK_LPBK_INT = 0, CVMX_ILK_LPBK_EXT } cvmx_ilk_lpbk_mode_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * This header is placed in front of all received ILK look-aside mode packets | ||||
|  */ | ||||
| typedef union { | ||||
| 	u64 u64; | ||||
| 
 | ||||
| 	struct { | ||||
| 		u32 reserved_63_57 : 7;	  /* bits 63...57 */ | ||||
| 		u32 nsp_cmd : 5;	  /* bits 56...52 */ | ||||
| 		u32 nsp_flags : 4;	  /* bits 51...48 */ | ||||
| 		u32 nsp_grp_id_upper : 6; /* bits 47...42 */ | ||||
| 		u32 reserved_41_40 : 2;	  /* bits 41...40 */ | ||||
| 		/* Protocol type, 1 for LA mode packet */ | ||||
| 		u32 la_mode : 1;	  /* bit  39      */ | ||||
| 		u32 nsp_grp_id_lower : 2; /* bits 38...37 */ | ||||
| 		u32 nsp_xid_upper : 4;	  /* bits 36...33 */ | ||||
| 		/* ILK channel number, 0 or 1 */ | ||||
| 		u32 ilk_channel : 1;   /* bit  32      */ | ||||
| 		u32 nsp_xid_lower : 8; /* bits 31...24 */ | ||||
| 		/* Unpredictable, may be any value */ | ||||
| 		u32 reserved_23_0 : 24; /* bits 23...0  */ | ||||
| 	} s; | ||||
| } cvmx_ilk_la_nsp_compact_hdr_t; | ||||
| 
 | ||||
| typedef struct cvmx_ilk_LA_mode_struct { | ||||
| 	int ilk_LA_mode; | ||||
| 	int ilk_LA_mode_cal_ena; | ||||
| } cvmx_ilk_LA_mode_t; | ||||
| 
 | ||||
| extern cvmx_ilk_LA_mode_t cvmx_ilk_LA_mode[CVMX_NUM_ILK_INTF]; | ||||
| 
 | ||||
| int cvmx_ilk_use_la_mode(int interface, int channel); | ||||
| int cvmx_ilk_start_interface(int interface, unsigned short num_lanes); | ||||
| int cvmx_ilk_start_interface_la(int interface, unsigned char num_lanes); | ||||
| int cvmx_ilk_set_pipe(int interface, int pipe_base, unsigned int pipe_len); | ||||
| int cvmx_ilk_tx_set_channel(int interface, cvmx_ilk_pipe_chan_t *pch, unsigned int num_chs); | ||||
| int cvmx_ilk_rx_set_pknd(int interface, cvmx_ilk_chan_pknd_t *chpknd, unsigned int num_pknd); | ||||
| int cvmx_ilk_enable(int interface); | ||||
| int cvmx_ilk_disable(int interface); | ||||
| int cvmx_ilk_get_intf_ena(int interface); | ||||
| int cvmx_ilk_get_chan_info(int interface, unsigned char **chans, unsigned char *num_chan); | ||||
| cvmx_ilk_la_nsp_compact_hdr_t cvmx_ilk_enable_la_header(int ipd_port, int mode); | ||||
| void cvmx_ilk_show_stats(int interface, cvmx_ilk_stats_ctrl_t *pstats); | ||||
| int cvmx_ilk_cal_setup_rx(int interface, int cal_depth, cvmx_ilk_cal_entry_t *pent, int hi_wm, | ||||
| 			  unsigned char cal_ena); | ||||
| int cvmx_ilk_cal_setup_tx(int interface, int cal_depth, cvmx_ilk_cal_entry_t *pent, | ||||
| 			  unsigned char cal_ena); | ||||
| int cvmx_ilk_lpbk(int interface, cvmx_ilk_lpbk_ena_t enable, cvmx_ilk_lpbk_mode_t mode); | ||||
| int cvmx_ilk_la_mode_enable_rx_calendar(int interface); | ||||
| 
 | ||||
| #endif /* __CVMX_ILK_H__ */ | ||||
|  | @ -0,0 +1,233 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  * | ||||
|  * Interface to the hardware Input Packet Data unit. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __CVMX_IPD_H__ | ||||
| #define __CVMX_IPD_H__ | ||||
| 
 | ||||
| #include "cvmx-pki.h" | ||||
| 
 | ||||
| /* CSR typedefs have been moved to cvmx-ipd-defs.h */ | ||||
| 
 | ||||
| typedef cvmx_ipd_1st_mbuff_skip_t cvmx_ipd_mbuff_not_first_skip_t; | ||||
| typedef cvmx_ipd_1st_next_ptr_back_t cvmx_ipd_second_next_ptr_back_t; | ||||
| 
 | ||||
| typedef struct cvmx_ipd_tag_fields { | ||||
| 	u64 ipv6_src_ip : 1; | ||||
| 	u64 ipv6_dst_ip : 1; | ||||
| 	u64 ipv6_src_port : 1; | ||||
| 	u64 ipv6_dst_port : 1; | ||||
| 	u64 ipv6_next_header : 1; | ||||
| 	u64 ipv4_src_ip : 1; | ||||
| 	u64 ipv4_dst_ip : 1; | ||||
| 	u64 ipv4_src_port : 1; | ||||
| 	u64 ipv4_dst_port : 1; | ||||
| 	u64 ipv4_protocol : 1; | ||||
| 	u64 input_port : 1; | ||||
| } cvmx_ipd_tag_fields_t; | ||||
| 
 | ||||
| typedef struct cvmx_pip_port_config { | ||||
| 	u64 parse_mode; | ||||
| 	u64 tag_type; | ||||
| 	u64 tag_mode; | ||||
| 	cvmx_ipd_tag_fields_t tag_fields; | ||||
| } cvmx_pip_port_config_t; | ||||
| 
 | ||||
| typedef struct cvmx_ipd_config_struct { | ||||
| 	u64 first_mbuf_skip; | ||||
| 	u64 not_first_mbuf_skip; | ||||
| 	u64 ipd_enable; | ||||
| 	u64 enable_len_M8_fix; | ||||
| 	u64 cache_mode; | ||||
| 	cvmx_fpa_pool_config_t packet_pool; | ||||
| 	cvmx_fpa_pool_config_t wqe_pool; | ||||
| 	cvmx_pip_port_config_t port_config; | ||||
| } cvmx_ipd_config_t; | ||||
| 
 | ||||
| extern cvmx_ipd_config_t cvmx_ipd_cfg; | ||||
| 
 | ||||
| /**
 | ||||
|  * Gets the fpa pool number of packet pool | ||||
|  */ | ||||
| static inline s64 cvmx_fpa_get_packet_pool(void) | ||||
| { | ||||
| 	return (cvmx_ipd_cfg.packet_pool.pool_num); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Gets the buffer size of packet pool buffer | ||||
|  */ | ||||
| static inline u64 cvmx_fpa_get_packet_pool_block_size(void) | ||||
| { | ||||
| 	return (cvmx_ipd_cfg.packet_pool.buffer_size); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Gets the buffer count of packet pool | ||||
|  */ | ||||
| static inline u64 cvmx_fpa_get_packet_pool_buffer_count(void) | ||||
| { | ||||
| 	return (cvmx_ipd_cfg.packet_pool.buffer_count); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Gets the fpa pool number of wqe pool | ||||
|  */ | ||||
| static inline s64 cvmx_fpa_get_wqe_pool(void) | ||||
| { | ||||
| 	return (cvmx_ipd_cfg.wqe_pool.pool_num); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Gets the buffer size of wqe pool buffer | ||||
|  */ | ||||
| static inline u64 cvmx_fpa_get_wqe_pool_block_size(void) | ||||
| { | ||||
| 	return (cvmx_ipd_cfg.wqe_pool.buffer_size); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Gets the buffer count of wqe pool | ||||
|  */ | ||||
| static inline u64 cvmx_fpa_get_wqe_pool_buffer_count(void) | ||||
| { | ||||
| 	return (cvmx_ipd_cfg.wqe_pool.buffer_count); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Sets the ipd related configuration in internal structure which is then used | ||||
|  * for seting IPD hardware block | ||||
|  */ | ||||
| int cvmx_ipd_set_config(cvmx_ipd_config_t ipd_config); | ||||
| 
 | ||||
| /**
 | ||||
|  * Gets the ipd related configuration from internal structure. | ||||
|  */ | ||||
| void cvmx_ipd_get_config(cvmx_ipd_config_t *ipd_config); | ||||
| 
 | ||||
| /**
 | ||||
|  * Sets the internal FPA pool data structure for packet buffer pool. | ||||
|  * @param pool	fpa pool number yo use | ||||
|  * @param buffer_size	buffer size of pool | ||||
|  * @param buffer_count	number of buufers to allocate to pool | ||||
|  */ | ||||
| void cvmx_ipd_set_packet_pool_config(s64 pool, u64 buffer_size, u64 buffer_count); | ||||
| 
 | ||||
| /**
 | ||||
|  * Sets the internal FPA pool data structure for wqe pool. | ||||
|  * @param pool	fpa pool number yo use | ||||
|  * @param buffer_size	buffer size of pool | ||||
|  * @param buffer_count	number of buufers to allocate to pool | ||||
|  */ | ||||
| void cvmx_ipd_set_wqe_pool_config(s64 pool, u64 buffer_size, u64 buffer_count); | ||||
| 
 | ||||
| /**
 | ||||
|  * Gets the FPA packet buffer pool parameters. | ||||
|  */ | ||||
| static inline void cvmx_fpa_get_packet_pool_config(s64 *pool, u64 *buffer_size, u64 *buffer_count) | ||||
| { | ||||
| 	if (pool) | ||||
| 		*pool = cvmx_ipd_cfg.packet_pool.pool_num; | ||||
| 	if (buffer_size) | ||||
| 		*buffer_size = cvmx_ipd_cfg.packet_pool.buffer_size; | ||||
| 	if (buffer_count) | ||||
| 		*buffer_count = cvmx_ipd_cfg.packet_pool.buffer_count; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Sets the FPA packet buffer pool parameters. | ||||
|  */ | ||||
| static inline void cvmx_fpa_set_packet_pool_config(s64 pool, u64 buffer_size, u64 buffer_count) | ||||
| { | ||||
| 	cvmx_ipd_set_packet_pool_config(pool, buffer_size, buffer_count); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Gets the FPA WQE pool parameters. | ||||
|  */ | ||||
| static inline void cvmx_fpa_get_wqe_pool_config(s64 *pool, u64 *buffer_size, u64 *buffer_count) | ||||
| { | ||||
| 	if (pool) | ||||
| 		*pool = cvmx_ipd_cfg.wqe_pool.pool_num; | ||||
| 	if (buffer_size) | ||||
| 		*buffer_size = cvmx_ipd_cfg.wqe_pool.buffer_size; | ||||
| 	if (buffer_count) | ||||
| 		*buffer_count = cvmx_ipd_cfg.wqe_pool.buffer_count; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Sets the FPA WQE pool parameters. | ||||
|  */ | ||||
| static inline void cvmx_fpa_set_wqe_pool_config(s64 pool, u64 buffer_size, u64 buffer_count) | ||||
| { | ||||
| 	cvmx_ipd_set_wqe_pool_config(pool, buffer_size, buffer_count); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Configure IPD | ||||
|  * | ||||
|  * @param mbuff_size Packets buffer size in 8 byte words | ||||
|  * @param first_mbuff_skip | ||||
|  *                   Number of 8 byte words to skip in the first buffer | ||||
|  * @param not_first_mbuff_skip | ||||
|  *                   Number of 8 byte words to skip in each following buffer | ||||
|  * @param first_back Must be same as first_mbuff_skip / 128 | ||||
|  * @param second_back | ||||
|  *                   Must be same as not_first_mbuff_skip / 128 | ||||
|  * @param wqe_fpa_pool | ||||
|  *                   FPA pool to get work entries from | ||||
|  * @param cache_mode | ||||
|  * @param back_pres_enable_flag | ||||
|  *                   Enable or disable port back pressure at a global level. | ||||
|  *                   This should always be 1 as more accurate control can be | ||||
|  *                   found in IPD_PORTX_BP_PAGE_CNT[BP_ENB]. | ||||
|  */ | ||||
| void cvmx_ipd_config(u64 mbuff_size, u64 first_mbuff_skip, u64 not_first_mbuff_skip, u64 first_back, | ||||
| 		     u64 second_back, u64 wqe_fpa_pool, cvmx_ipd_mode_t cache_mode, | ||||
| 		     u64 back_pres_enable_flag); | ||||
| /**
 | ||||
|  * Enable IPD | ||||
|  */ | ||||
| void cvmx_ipd_enable(void); | ||||
| 
 | ||||
| /**
 | ||||
|  * Disable IPD | ||||
|  */ | ||||
| void cvmx_ipd_disable(void); | ||||
| 
 | ||||
| void __cvmx_ipd_free_ptr(void); | ||||
| 
 | ||||
| void cvmx_ipd_set_packet_pool_buffer_count(u64 buffer_count); | ||||
| void cvmx_ipd_set_wqe_pool_buffer_count(u64 buffer_count); | ||||
| 
 | ||||
| /**
 | ||||
|  * Setup Random Early Drop on a specific input queue | ||||
|  * | ||||
|  * @param queue  Input queue to setup RED on (0-7) | ||||
|  * @param pass_thresh | ||||
|  *               Packets will begin slowly dropping when there are less than | ||||
|  *               this many packet buffers free in FPA 0. | ||||
|  * @param drop_thresh | ||||
|  *               All incoming packets will be dropped when there are less | ||||
|  *               than this many free packet buffers in FPA 0. | ||||
|  * @return Zero on success. Negative on failure | ||||
|  */ | ||||
| int cvmx_ipd_setup_red_queue(int queue, int pass_thresh, int drop_thresh); | ||||
| 
 | ||||
| /**
 | ||||
|  * Setup Random Early Drop to automatically begin dropping packets. | ||||
|  * | ||||
|  * @param pass_thresh | ||||
|  *               Packets will begin slowly dropping when there are less than | ||||
|  *               this many packet buffers free in FPA 0. | ||||
|  * @param drop_thresh | ||||
|  *               All incoming packets will be dropped when there are less | ||||
|  *               than this many free packet buffers in FPA 0. | ||||
|  * @return Zero on success. Negative on failure | ||||
|  */ | ||||
| int cvmx_ipd_setup_red(int pass_thresh, int drop_thresh); | ||||
| 
 | ||||
| #endif /*  __CVMX_IPD_H__ */ | ||||
|  | @ -0,0 +1,40 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  * | ||||
|  * Packet buffer defines. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __CVMX_PACKET_H__ | ||||
| #define __CVMX_PACKET_H__ | ||||
| 
 | ||||
| union cvmx_buf_ptr_pki { | ||||
| 	u64 u64; | ||||
| 	struct { | ||||
| 		u64 size : 16; | ||||
| 		u64 packet_outside_wqe : 1; | ||||
| 		u64 rsvd0 : 5; | ||||
| 		u64 addr : 42; | ||||
| 	}; | ||||
| }; | ||||
| 
 | ||||
| typedef union cvmx_buf_ptr_pki cvmx_buf_ptr_pki_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * This structure defines a buffer pointer on Octeon | ||||
|  */ | ||||
| union cvmx_buf_ptr { | ||||
| 	void *ptr; | ||||
| 	u64 u64; | ||||
| 	struct { | ||||
| 		u64 i : 1; | ||||
| 		u64 back : 4; | ||||
| 		u64 pool : 3; | ||||
| 		u64 size : 16; | ||||
| 		u64 addr : 40; | ||||
| 	} s; | ||||
| }; | ||||
| 
 | ||||
| typedef union cvmx_buf_ptr cvmx_buf_ptr_t; | ||||
| 
 | ||||
| #endif /*  __CVMX_PACKET_H__ */ | ||||
|  | @ -0,0 +1,279 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __CVMX_PCIE_H__ | ||||
| #define __CVMX_PCIE_H__ | ||||
| 
 | ||||
| #define CVMX_PCIE_MAX_PORTS 4 | ||||
| #define CVMX_PCIE_PORTS                                                                            \ | ||||
| 	((OCTEON_IS_MODEL(OCTEON_CN78XX) || OCTEON_IS_MODEL(OCTEON_CN73XX)) ?                      \ | ||||
| 		       CVMX_PCIE_MAX_PORTS :                                                             \ | ||||
| 		       (OCTEON_IS_MODEL(OCTEON_CN70XX) ? 3 : 2)) | ||||
| 
 | ||||
| /*
 | ||||
|  * The physical memory base mapped by BAR1.  256MB at the end of the | ||||
|  * first 4GB. | ||||
|  */ | ||||
| #define CVMX_PCIE_BAR1_PHYS_BASE ((1ull << 32) - (1ull << 28)) | ||||
| #define CVMX_PCIE_BAR1_PHYS_SIZE BIT_ULL(28) | ||||
| 
 | ||||
| /*
 | ||||
|  * The RC base of BAR1.  gen1 has a 39-bit BAR2, gen2 has 41-bit BAR2, | ||||
|  * place BAR1 so it is the same for both. | ||||
|  */ | ||||
| #define CVMX_PCIE_BAR1_RC_BASE BIT_ULL(41) | ||||
| 
 | ||||
| typedef union { | ||||
| 	u64 u64; | ||||
| 	struct { | ||||
| 		u64 upper : 2;		 /* Normally 2 for XKPHYS */ | ||||
| 		u64 reserved_49_61 : 13; /* Must be zero */ | ||||
| 		u64 io : 1;		 /* 1 for IO space access */ | ||||
| 		u64 did : 5;		 /* PCIe DID = 3 */ | ||||
| 		u64 subdid : 3;		 /* PCIe SubDID = 1 */ | ||||
| 		u64 reserved_38_39 : 2;	 /* Must be zero */ | ||||
| 		u64 node : 2;		 /* Numa node number */ | ||||
| 		u64 es : 2;		 /* Endian swap = 1 */ | ||||
| 		u64 port : 2;		 /* PCIe port 0,1 */ | ||||
| 		u64 reserved_29_31 : 3;	 /* Must be zero */ | ||||
| 		u64 ty : 1; | ||||
| 		u64 bus : 8; | ||||
| 		u64 dev : 5; | ||||
| 		u64 func : 3; | ||||
| 		u64 reg : 12; | ||||
| 	} config; | ||||
| 	struct { | ||||
| 		u64 upper : 2;		 /* Normally 2 for XKPHYS */ | ||||
| 		u64 reserved_49_61 : 13; /* Must be zero */ | ||||
| 		u64 io : 1;		 /* 1 for IO space access */ | ||||
| 		u64 did : 5;		 /* PCIe DID = 3 */ | ||||
| 		u64 subdid : 3;		 /* PCIe SubDID = 2 */ | ||||
| 		u64 reserved_38_39 : 2;	 /* Must be zero */ | ||||
| 		u64 node : 2;		 /* Numa node number */ | ||||
| 		u64 es : 2;		 /* Endian swap = 1 */ | ||||
| 		u64 port : 2;		 /* PCIe port 0,1 */ | ||||
| 		u64 address : 32;	 /* PCIe IO address */ | ||||
| 	} io; | ||||
| 	struct { | ||||
| 		u64 upper : 2;		 /* Normally 2 for XKPHYS */ | ||||
| 		u64 reserved_49_61 : 13; /* Must be zero */ | ||||
| 		u64 io : 1;		 /* 1 for IO space access */ | ||||
| 		u64 did : 5;		 /* PCIe DID = 3 */ | ||||
| 		u64 subdid : 3;		 /* PCIe SubDID = 3-6 */ | ||||
| 		u64 reserved_38_39 : 2;	 /* Must be zero */ | ||||
| 		u64 node : 2;		 /* Numa node number */ | ||||
| 		u64 address : 36;	 /* PCIe Mem address */ | ||||
| 	} mem; | ||||
| } cvmx_pcie_address_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * Return the Core virtual base address for PCIe IO access. IOs are | ||||
|  * read/written as an offset from this address. | ||||
|  * | ||||
|  * @param pcie_port PCIe port the IO is for | ||||
|  * | ||||
|  * @return 64bit Octeon IO base address for read/write | ||||
|  */ | ||||
| u64 cvmx_pcie_get_io_base_address(int pcie_port); | ||||
| 
 | ||||
| /**
 | ||||
|  * Size of the IO address region returned at address | ||||
|  * cvmx_pcie_get_io_base_address() | ||||
|  * | ||||
|  * @param pcie_port PCIe port the IO is for | ||||
|  * | ||||
|  * @return Size of the IO window | ||||
|  */ | ||||
| u64 cvmx_pcie_get_io_size(int pcie_port); | ||||
| 
 | ||||
| /**
 | ||||
|  * Return the Core virtual base address for PCIe MEM access. Memory is | ||||
|  * read/written as an offset from this address. | ||||
|  * | ||||
|  * @param pcie_port PCIe port the IO is for | ||||
|  * | ||||
|  * @return 64bit Octeon IO base address for read/write | ||||
|  */ | ||||
| u64 cvmx_pcie_get_mem_base_address(int pcie_port); | ||||
| 
 | ||||
| /**
 | ||||
|  * Size of the Mem address region returned at address | ||||
|  * cvmx_pcie_get_mem_base_address() | ||||
|  * | ||||
|  * @param pcie_port PCIe port the IO is for | ||||
|  * | ||||
|  * @return Size of the Mem window | ||||
|  */ | ||||
| u64 cvmx_pcie_get_mem_size(int pcie_port); | ||||
| 
 | ||||
| /**
 | ||||
|  * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus. | ||||
|  * | ||||
|  * @param pcie_port PCIe port to initialize | ||||
|  * | ||||
|  * @return Zero on success | ||||
|  */ | ||||
| int cvmx_pcie_rc_initialize(int pcie_port); | ||||
| 
 | ||||
| /**
 | ||||
|  * Shutdown a PCIe port and put it in reset | ||||
|  * | ||||
|  * @param pcie_port PCIe port to shutdown | ||||
|  * | ||||
|  * @return Zero on success | ||||
|  */ | ||||
| int cvmx_pcie_rc_shutdown(int pcie_port); | ||||
| 
 | ||||
| /**
 | ||||
|  * Read 8bits from a Device's config space | ||||
|  * | ||||
|  * @param pcie_port PCIe port the device is on | ||||
|  * @param bus       Sub bus | ||||
|  * @param dev       Device ID | ||||
|  * @param fn        Device sub function | ||||
|  * @param reg       Register to access | ||||
|  * | ||||
|  * @return Result of the read | ||||
|  */ | ||||
| u8 cvmx_pcie_config_read8(int pcie_port, int bus, int dev, int fn, int reg); | ||||
| 
 | ||||
| /**
 | ||||
|  * Read 16bits from a Device's config space | ||||
|  * | ||||
|  * @param pcie_port PCIe port the device is on | ||||
|  * @param bus       Sub bus | ||||
|  * @param dev       Device ID | ||||
|  * @param fn        Device sub function | ||||
|  * @param reg       Register to access | ||||
|  * | ||||
|  * @return Result of the read | ||||
|  */ | ||||
| u16 cvmx_pcie_config_read16(int pcie_port, int bus, int dev, int fn, int reg); | ||||
| 
 | ||||
| /**
 | ||||
|  * Read 32bits from a Device's config space | ||||
|  * | ||||
|  * @param pcie_port PCIe port the device is on | ||||
|  * @param bus       Sub bus | ||||
|  * @param dev       Device ID | ||||
|  * @param fn        Device sub function | ||||
|  * @param reg       Register to access | ||||
|  * | ||||
|  * @return Result of the read | ||||
|  */ | ||||
| u32 cvmx_pcie_config_read32(int pcie_port, int bus, int dev, int fn, int reg); | ||||
| 
 | ||||
| /**
 | ||||
|  * Write 8bits to a Device's config space | ||||
|  * | ||||
|  * @param pcie_port PCIe port the device is on | ||||
|  * @param bus       Sub bus | ||||
|  * @param dev       Device ID | ||||
|  * @param fn        Device sub function | ||||
|  * @param reg       Register to access | ||||
|  * @param val       Value to write | ||||
|  */ | ||||
| void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn, int reg, u8 val); | ||||
| 
 | ||||
| /**
 | ||||
|  * Write 16bits to a Device's config space | ||||
|  * | ||||
|  * @param pcie_port PCIe port the device is on | ||||
|  * @param bus       Sub bus | ||||
|  * @param dev       Device ID | ||||
|  * @param fn        Device sub function | ||||
|  * @param reg       Register to access | ||||
|  * @param val       Value to write | ||||
|  */ | ||||
| void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn, int reg, u16 val); | ||||
| 
 | ||||
| /**
 | ||||
|  * Write 32bits to a Device's config space | ||||
|  * | ||||
|  * @param pcie_port PCIe port the device is on | ||||
|  * @param bus       Sub bus | ||||
|  * @param dev       Device ID | ||||
|  * @param fn        Device sub function | ||||
|  * @param reg       Register to access | ||||
|  * @param val       Value to write | ||||
|  */ | ||||
| void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn, int reg, u32 val); | ||||
| 
 | ||||
| /**
 | ||||
|  * Read a PCIe config space register indirectly. This is used for | ||||
|  * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???. | ||||
|  * | ||||
|  * @param pcie_port  PCIe port to read from | ||||
|  * @param cfg_offset Address to read | ||||
|  * | ||||
|  * @return Value read | ||||
|  */ | ||||
| u32 cvmx_pcie_cfgx_read(int pcie_port, u32 cfg_offset); | ||||
| u32 cvmx_pcie_cfgx_read_node(int node, int pcie_port, u32 cfg_offset); | ||||
| 
 | ||||
| /**
 | ||||
|  * Write a PCIe config space register indirectly. This is used for | ||||
|  * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???. | ||||
|  * | ||||
|  * @param pcie_port  PCIe port to write to | ||||
|  * @param cfg_offset Address to write | ||||
|  * @param val        Value to write | ||||
|  */ | ||||
| void cvmx_pcie_cfgx_write(int pcie_port, u32 cfg_offset, u32 val); | ||||
| void cvmx_pcie_cfgx_write_node(int node, int pcie_port, u32 cfg_offset, u32 val); | ||||
| 
 | ||||
| /**
 | ||||
|  * Write a 32bit value to the Octeon NPEI register space | ||||
|  * | ||||
|  * @param address Address to write to | ||||
|  * @param val     Value to write | ||||
|  */ | ||||
| static inline void cvmx_pcie_npei_write32(u64 address, u32 val) | ||||
| { | ||||
| 	cvmx_write64_uint32(address ^ 4, val); | ||||
| 	cvmx_read64_uint32(address ^ 4); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Read a 32bit value from the Octeon NPEI register space | ||||
|  * | ||||
|  * @param address Address to read | ||||
|  * @return The result | ||||
|  */ | ||||
| static inline u32 cvmx_pcie_npei_read32(u64 address) | ||||
| { | ||||
| 	return cvmx_read64_uint32(address ^ 4); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Initialize a PCIe port for use in target(EP) mode. | ||||
|  * | ||||
|  * @param pcie_port PCIe port to initialize | ||||
|  * | ||||
|  * @return Zero on success | ||||
|  */ | ||||
| int cvmx_pcie_ep_initialize(int pcie_port); | ||||
| 
 | ||||
| /**
 | ||||
|  * Wait for posted PCIe read/writes to reach the other side of | ||||
|  * the internal PCIe switch. This will insure that core | ||||
|  * read/writes are posted before anything after this function | ||||
|  * is called. This may be necessary when writing to memory that | ||||
|  * will later be read using the DMA/PKT engines. | ||||
|  * | ||||
|  * @param pcie_port PCIe port to wait for | ||||
|  */ | ||||
| void cvmx_pcie_wait_for_pending(int pcie_port); | ||||
| 
 | ||||
| /**
 | ||||
|  * Returns if a PCIe port is in host or target mode. | ||||
|  * | ||||
|  * @param pcie_port PCIe port number (PEM number) | ||||
|  * | ||||
|  * @return 0 if PCIe port is in target mode, !0 if in host mode. | ||||
|  */ | ||||
| int cvmx_pcie_is_host_mode(int pcie_port); | ||||
| 
 | ||||
| #endif | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -0,0 +1,157 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  * | ||||
|  * Resource management for PKI resources. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __CVMX_PKI_RESOURCES_H__ | ||||
| #define __CVMX_PKI_RESOURCES_H__ | ||||
| 
 | ||||
| /**
 | ||||
|  * This function allocates/reserves a style from pool of global styles per node. | ||||
|  * @param node	 node to allocate style from. | ||||
|  * @param style	 style to allocate, if -1 it will be allocated | ||||
| 		 first available style from style resource. If index is positive | ||||
| 		 number and in range, it will try to allocate specified style. | ||||
|  * @return	 style number on success, -1 on failure. | ||||
|  */ | ||||
| int cvmx_pki_style_alloc(int node, int style); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function allocates/reserves a cluster group from per node | ||||
|    cluster group resources. | ||||
|  * @param node		node to allocate cluster group from. | ||||
|    @param cl_grp	cluster group to allocate/reserve, if -1 , | ||||
| 			allocate any available cluster group. | ||||
|  * @return		cluster group number or -1 on failure | ||||
|  */ | ||||
| int cvmx_pki_cluster_grp_alloc(int node, int cl_grp); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function allocates/reserves a cluster from per node | ||||
|    cluster resources. | ||||
|  * @param node		node to allocate cluster group from. | ||||
|    @param cluster_mask	mask of clusters  to allocate/reserve, if -1 , | ||||
| 			allocate any available clusters. | ||||
|  * @param num_clusters	number of clusters that will be allocated | ||||
|  */ | ||||
| int cvmx_pki_cluster_alloc(int node, int num_clusters, u64 *cluster_mask); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function allocates/reserves a pcam entry from node | ||||
|  * @param node		node to allocate pcam entry from. | ||||
|    @param index	index of pacm entry (0-191), if -1 , | ||||
| 			allocate any available pcam entry. | ||||
|  * @param bank		pcam bank where to allocate/reserve pcan entry from | ||||
|  * @param cluster_mask  mask of clusters from which pcam entry is needed. | ||||
|  * @return		pcam entry of -1 on failure | ||||
|  */ | ||||
| int cvmx_pki_pcam_entry_alloc(int node, int index, int bank, u64 cluster_mask); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function allocates/reserves QPG table entries per node. | ||||
|  * @param node		node number. | ||||
|  * @param base_offset	base_offset in qpg table. If -1, first available | ||||
| 			qpg base_offset will be allocated. If base_offset is positive | ||||
| 			number and in range, it will try to allocate specified base_offset. | ||||
|    @param count		number of consecutive qpg entries to allocate. They will be consecutive | ||||
| 			from base offset. | ||||
|  * @return		qpg table base offset number on success, -1 on failure. | ||||
|  */ | ||||
| int cvmx_pki_qpg_entry_alloc(int node, int base_offset, int count); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function frees a style from pool of global styles per node. | ||||
|  * @param node	 node to free style from. | ||||
|  * @param style	 style to free | ||||
|  * @return	 0 on success, -1 on failure. | ||||
|  */ | ||||
| int cvmx_pki_style_free(int node, int style); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function frees a cluster group from per node | ||||
|    cluster group resources. | ||||
|  * @param node		node to free cluster group from. | ||||
|    @param cl_grp	cluster group to free | ||||
|  * @return		0 on success or -1 on failure | ||||
|  */ | ||||
| int cvmx_pki_cluster_grp_free(int node, int cl_grp); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function frees QPG table entries per node. | ||||
|  * @param node		node number. | ||||
|  * @param base_offset	base_offset in qpg table. If -1, first available | ||||
|  *			qpg base_offset will be allocated. If base_offset is positive | ||||
|  *			number and in range, it will try to allocate specified base_offset. | ||||
|  * @param count		number of consecutive qpg entries to allocate. They will be consecutive | ||||
|  *			from base offset. | ||||
|  * @return		qpg table base offset number on success, -1 on failure. | ||||
|  */ | ||||
| int cvmx_pki_qpg_entry_free(int node, int base_offset, int count); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function frees  clusters  from per node | ||||
|    clusters resources. | ||||
|  * @param node		node to free clusters from. | ||||
|  * @param cluster_mask  mask of clusters need freeing | ||||
|  * @return		0 on success or -1 on failure | ||||
|  */ | ||||
| int cvmx_pki_cluster_free(int node, u64 cluster_mask); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function frees a pcam entry from node | ||||
|  * @param node		node to allocate pcam entry from. | ||||
|    @param index	index of pacm entry (0-191) needs to be freed. | ||||
|  * @param bank		pcam bank where to free pcam entry from | ||||
|  * @param cluster_mask  mask of clusters from which pcam entry is freed. | ||||
|  * @return		0 on success OR -1 on failure | ||||
|  */ | ||||
| int cvmx_pki_pcam_entry_free(int node, int index, int bank, u64 cluster_mask); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function allocates/reserves a bpid from pool of global bpid per node. | ||||
|  * @param node	node to allocate bpid from. | ||||
|  * @param bpid	bpid  to allocate, if -1 it will be allocated | ||||
|  *		first available boid from bpid resource. If index is positive | ||||
|  *		number and in range, it will try to allocate specified bpid. | ||||
|  * @return	bpid number on success, | ||||
|  *		-1 on alloc failure. | ||||
|  *		-2 on resource already reserved. | ||||
|  */ | ||||
| int cvmx_pki_bpid_alloc(int node, int bpid); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function frees a bpid from pool of global bpid per node. | ||||
|  * @param node	 node to free bpid from. | ||||
|  * @param bpid	 bpid to free | ||||
|  * @return	 0 on success, -1 on failure or | ||||
|  */ | ||||
| int cvmx_pki_bpid_free(int node, int bpid); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function frees all the PKI software resources | ||||
|  * (clusters, styles, qpg_entry, pcam_entry etc) for the specified node | ||||
|  */ | ||||
| 
 | ||||
| /**
 | ||||
|  * This function allocates/reserves an index from pool of global MTAG-IDX per node. | ||||
|  * @param node	node to allocate index from. | ||||
|  * @param idx	index  to allocate, if -1 it will be allocated | ||||
|  * @return	MTAG index number on success, | ||||
|  *		-1 on alloc failure. | ||||
|  *		-2 on resource already reserved. | ||||
|  */ | ||||
| int cvmx_pki_mtag_idx_alloc(int node, int idx); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function frees an index from pool of global MTAG-IDX per node. | ||||
|  * @param node	 node to free bpid from. | ||||
|  * @param bpid	 bpid to free | ||||
|  * @return	 0 on success, -1 on failure or | ||||
|  */ | ||||
| int cvmx_pki_mtag_idx_free(int node, int idx); | ||||
| 
 | ||||
| void __cvmx_pki_global_rsrc_free(int node); | ||||
| 
 | ||||
| #endif /*  __CVM_PKI_RESOURCES_H__ */ | ||||
|  | @ -0,0 +1,970 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  * | ||||
|  * Interface to the hardware Packet Input Data unit. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __CVMX_PKI_H__ | ||||
| #define __CVMX_PKI_H__ | ||||
| 
 | ||||
| #include "cvmx-fpa3.h" | ||||
| #include "cvmx-helper-util.h" | ||||
| #include "cvmx-helper-cfg.h" | ||||
| #include "cvmx-error.h" | ||||
| 
 | ||||
| /* PKI AURA and BPID count are equal to FPA AURA count */ | ||||
| #define CVMX_PKI_NUM_AURA	       (cvmx_fpa3_num_auras()) | ||||
| #define CVMX_PKI_NUM_BPID	       (cvmx_fpa3_num_auras()) | ||||
| #define CVMX_PKI_NUM_SSO_GROUP	       (cvmx_sso_num_xgrp()) | ||||
| #define CVMX_PKI_NUM_CLUSTER_GROUP_MAX 1 | ||||
| #define CVMX_PKI_NUM_CLUSTER_GROUP     (cvmx_pki_num_cl_grp()) | ||||
| #define CVMX_PKI_NUM_CLUSTER	       (cvmx_pki_num_clusters()) | ||||
| 
 | ||||
| /* FIXME: Reduce some of these values, convert to routines XXX */ | ||||
| #define CVMX_PKI_NUM_CHANNEL	    4096 | ||||
| #define CVMX_PKI_NUM_PKIND	    64 | ||||
| #define CVMX_PKI_NUM_INTERNAL_STYLE 256 | ||||
| #define CVMX_PKI_NUM_FINAL_STYLE    64 | ||||
| #define CVMX_PKI_NUM_QPG_ENTRY	    2048 | ||||
| #define CVMX_PKI_NUM_MTAG_IDX	    (32 / 4) /* 32 registers grouped by 4*/ | ||||
| #define CVMX_PKI_NUM_LTYPE	    32 | ||||
| #define CVMX_PKI_NUM_PCAM_BANK	    2 | ||||
| #define CVMX_PKI_NUM_PCAM_ENTRY	    192 | ||||
| #define CVMX_PKI_NUM_FRAME_CHECK    2 | ||||
| #define CVMX_PKI_NUM_BELTYPE	    32 | ||||
| #define CVMX_PKI_MAX_FRAME_SIZE	    65535 | ||||
| #define CVMX_PKI_FIND_AVAL_ENTRY    (-1) | ||||
| #define CVMX_PKI_CLUSTER_ALL	    0xf | ||||
| 
 | ||||
| #ifdef CVMX_SUPPORT_SEPARATE_CLUSTER_CONFIG | ||||
| #define CVMX_PKI_TOTAL_PCAM_ENTRY                                                                  \ | ||||
| 	((CVMX_PKI_NUM_CLUSTER) * (CVMX_PKI_NUM_PCAM_BANK) * (CVMX_PKI_NUM_PCAM_ENTRY)) | ||||
| #else | ||||
| #define CVMX_PKI_TOTAL_PCAM_ENTRY (CVMX_PKI_NUM_PCAM_BANK * CVMX_PKI_NUM_PCAM_ENTRY) | ||||
| #endif | ||||
| 
 | ||||
| static inline unsigned int cvmx_pki_num_clusters(void) | ||||
| { | ||||
| 	if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX)) | ||||
| 		return 2; | ||||
| 	return 4; | ||||
| } | ||||
| 
 | ||||
| static inline unsigned int cvmx_pki_num_cl_grp(void) | ||||
| { | ||||
| 	if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX) || | ||||
| 	    OCTEON_IS_MODEL(OCTEON_CN78XX)) | ||||
| 		return 1; | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| enum cvmx_pki_pkind_parse_mode { | ||||
| 	CVMX_PKI_PARSE_LA_TO_LG = 0,  /* Parse LA(L2) to LG */ | ||||
| 	CVMX_PKI_PARSE_LB_TO_LG = 1,  /* Parse LB(custom) to LG */ | ||||
| 	CVMX_PKI_PARSE_LC_TO_LG = 3,  /* Parse LC(L3) to LG */ | ||||
| 	CVMX_PKI_PARSE_LG = 0x3f,     /* Parse LG */ | ||||
| 	CVMX_PKI_PARSE_NOTHING = 0x7f /* Parse nothing */ | ||||
| }; | ||||
| 
 | ||||
| enum cvmx_pki_parse_mode_chg { | ||||
| 	CVMX_PKI_PARSE_NO_CHG = 0x0, | ||||
| 	CVMX_PKI_PARSE_SKIP_TO_LB = 0x1, | ||||
| 	CVMX_PKI_PARSE_SKIP_TO_LC = 0x3, | ||||
| 	CVMX_PKI_PARSE_SKIP_TO_LD = 0x7, | ||||
| 	CVMX_PKI_PARSE_SKIP_TO_LG = 0x3f, | ||||
| 	CVMX_PKI_PARSE_SKIP_ALL = 0x7f, | ||||
| }; | ||||
| 
 | ||||
| enum cvmx_pki_l2_len_mode { PKI_L2_LENCHK_EQUAL_GREATER = 0, PKI_L2_LENCHK_EQUAL_ONLY }; | ||||
| 
 | ||||
| enum cvmx_pki_cache_mode { | ||||
| 	CVMX_PKI_OPC_MODE_STT = 0LL,	  /* All blocks write through DRAM,*/ | ||||
| 	CVMX_PKI_OPC_MODE_STF = 1LL,	  /* All blocks into L2 */ | ||||
| 	CVMX_PKI_OPC_MODE_STF1_STT = 2LL, /* 1st block L2, rest DRAM */ | ||||
| 	CVMX_PKI_OPC_MODE_STF2_STT = 3LL  /* 1st, 2nd blocks L2, rest DRAM */ | ||||
| }; | ||||
| 
 | ||||
| /**
 | ||||
|  * Tag type definitions | ||||
|  */ | ||||
| enum cvmx_sso_tag_type { | ||||
| 	CVMX_SSO_TAG_TYPE_ORDERED = 0L, | ||||
| 	CVMX_SSO_TAG_TYPE_ATOMIC = 1L, | ||||
| 	CVMX_SSO_TAG_TYPE_UNTAGGED = 2L, | ||||
| 	CVMX_SSO_TAG_TYPE_EMPTY = 3L | ||||
| }; | ||||
| 
 | ||||
| enum cvmx_pki_qpg_qos { | ||||
| 	CVMX_PKI_QPG_QOS_NONE = 0, | ||||
| 	CVMX_PKI_QPG_QOS_VLAN, | ||||
| 	CVMX_PKI_QPG_QOS_MPLS, | ||||
| 	CVMX_PKI_QPG_QOS_DSA_SRC, | ||||
| 	CVMX_PKI_QPG_QOS_DIFFSERV, | ||||
| 	CVMX_PKI_QPG_QOS_HIGIG, | ||||
| }; | ||||
| 
 | ||||
| enum cvmx_pki_wqe_vlan { CVMX_PKI_USE_FIRST_VLAN = 0, CVMX_PKI_USE_SECOND_VLAN }; | ||||
| 
 | ||||
| /**
 | ||||
|  * Controls how the PKI statistics counters are handled | ||||
|  * The PKI_STAT*_X registers can be indexed either by port kind (pkind), or | ||||
|  * final style. (Does not apply to the PKI_STAT_INB* registers.) | ||||
|  *    0 = X represents the packet’s pkind | ||||
|  *    1 = X represents the low 6-bits of packet’s final style | ||||
|  */ | ||||
| enum cvmx_pki_stats_mode { CVMX_PKI_STAT_MODE_PKIND, CVMX_PKI_STAT_MODE_STYLE }; | ||||
| 
 | ||||
| enum cvmx_pki_fpa_wait { CVMX_PKI_DROP_PKT, CVMX_PKI_WAIT_PKT }; | ||||
| 
 | ||||
| #define PKI_BELTYPE_E__NONE_M 0x0 | ||||
| #define PKI_BELTYPE_E__MISC_M 0x1 | ||||
| #define PKI_BELTYPE_E__IP4_M  0x2 | ||||
| #define PKI_BELTYPE_E__IP6_M  0x3 | ||||
| #define PKI_BELTYPE_E__TCP_M  0x4 | ||||
| #define PKI_BELTYPE_E__UDP_M  0x5 | ||||
| #define PKI_BELTYPE_E__SCTP_M 0x6 | ||||
| #define PKI_BELTYPE_E__SNAP_M 0x7 | ||||
| 
 | ||||
| /* PKI_BELTYPE_E_t */ | ||||
| enum cvmx_pki_beltype { | ||||
| 	CVMX_PKI_BELTYPE_NONE = PKI_BELTYPE_E__NONE_M, | ||||
| 	CVMX_PKI_BELTYPE_MISC = PKI_BELTYPE_E__MISC_M, | ||||
| 	CVMX_PKI_BELTYPE_IP4 = PKI_BELTYPE_E__IP4_M, | ||||
| 	CVMX_PKI_BELTYPE_IP6 = PKI_BELTYPE_E__IP6_M, | ||||
| 	CVMX_PKI_BELTYPE_TCP = PKI_BELTYPE_E__TCP_M, | ||||
| 	CVMX_PKI_BELTYPE_UDP = PKI_BELTYPE_E__UDP_M, | ||||
| 	CVMX_PKI_BELTYPE_SCTP = PKI_BELTYPE_E__SCTP_M, | ||||
| 	CVMX_PKI_BELTYPE_SNAP = PKI_BELTYPE_E__SNAP_M, | ||||
| 	CVMX_PKI_BELTYPE_MAX = CVMX_PKI_BELTYPE_SNAP | ||||
| }; | ||||
| 
 | ||||
| struct cvmx_pki_frame_len { | ||||
| 	u16 maxlen; | ||||
| 	u16 minlen; | ||||
| }; | ||||
| 
 | ||||
| struct cvmx_pki_tag_fields { | ||||
| 	u64 layer_g_src : 1; | ||||
| 	u64 layer_f_src : 1; | ||||
| 	u64 layer_e_src : 1; | ||||
| 	u64 layer_d_src : 1; | ||||
| 	u64 layer_c_src : 1; | ||||
| 	u64 layer_b_src : 1; | ||||
| 	u64 layer_g_dst : 1; | ||||
| 	u64 layer_f_dst : 1; | ||||
| 	u64 layer_e_dst : 1; | ||||
| 	u64 layer_d_dst : 1; | ||||
| 	u64 layer_c_dst : 1; | ||||
| 	u64 layer_b_dst : 1; | ||||
| 	u64 input_port : 1; | ||||
| 	u64 mpls_label : 1; | ||||
| 	u64 first_vlan : 1; | ||||
| 	u64 second_vlan : 1; | ||||
| 	u64 ip_prot_nexthdr : 1; | ||||
| 	u64 tag_sync : 1; | ||||
| 	u64 tag_spi : 1; | ||||
| 	u64 tag_gtp : 1; | ||||
| 	u64 tag_vni : 1; | ||||
| }; | ||||
| 
 | ||||
| struct cvmx_pki_pkind_parse { | ||||
| 	u64 mpls_en : 1; | ||||
| 	u64 inst_hdr : 1; | ||||
| 	u64 lg_custom : 1; | ||||
| 	u64 fulc_en : 1; | ||||
| 	u64 dsa_en : 1; | ||||
| 	u64 hg2_en : 1; | ||||
| 	u64 hg_en : 1; | ||||
| }; | ||||
| 
 | ||||
| struct cvmx_pki_pool_config { | ||||
| 	int pool_num; | ||||
| 	cvmx_fpa3_pool_t pool; | ||||
| 	u64 buffer_size; | ||||
| 	u64 buffer_count; | ||||
| }; | ||||
| 
 | ||||
| struct cvmx_pki_qpg_config { | ||||
| 	int qpg_base; | ||||
| 	int port_add; | ||||
| 	int aura_num; | ||||
| 	int grp_ok; | ||||
| 	int grp_bad; | ||||
| 	int grptag_ok; | ||||
| 	int grptag_bad; | ||||
| }; | ||||
| 
 | ||||
| struct cvmx_pki_aura_config { | ||||
| 	int aura_num; | ||||
| 	int pool_num; | ||||
| 	cvmx_fpa3_pool_t pool; | ||||
| 	cvmx_fpa3_gaura_t aura; | ||||
| 	int buffer_count; | ||||
| }; | ||||
| 
 | ||||
| struct cvmx_pki_cluster_grp_config { | ||||
| 	int grp_num; | ||||
| 	u64 cluster_mask; /* Bit mask of cluster assigned to this cluster group */ | ||||
| }; | ||||
| 
 | ||||
| struct cvmx_pki_sso_grp_config { | ||||
| 	int group; | ||||
| 	int priority; | ||||
| 	int weight; | ||||
| 	int affinity; | ||||
| 	u64 core_mask; | ||||
| 	u8 core_mask_set; | ||||
| }; | ||||
| 
 | ||||
| /* This is per style structure for configuring port parameters,
 | ||||
|  * it is kind of of profile which can be assigned to any port. | ||||
|  * If multiple ports are assigned same style be aware that modifying | ||||
|  * that style will modify the respective parameters for all the ports | ||||
|  * which are using this style | ||||
|  */ | ||||
| struct cvmx_pki_style_parm { | ||||
| 	bool ip6_udp_opt; | ||||
| 	bool lenerr_en; | ||||
| 	bool maxerr_en; | ||||
| 	bool minerr_en; | ||||
| 	u8 lenerr_eqpad; | ||||
| 	u8 minmax_sel; | ||||
| 	bool qpg_dis_grptag; | ||||
| 	bool fcs_strip; | ||||
| 	bool fcs_chk; | ||||
| 	bool rawdrp; | ||||
| 	bool force_drop; | ||||
| 	bool nodrop; | ||||
| 	bool qpg_dis_padd; | ||||
| 	bool qpg_dis_grp; | ||||
| 	bool qpg_dis_aura; | ||||
| 	u16 qpg_base; | ||||
| 	enum cvmx_pki_qpg_qos qpg_qos; | ||||
| 	u8 qpg_port_sh; | ||||
| 	u8 qpg_port_msb; | ||||
| 	u8 apad_nip; | ||||
| 	u8 wqe_vs; | ||||
| 	enum cvmx_sso_tag_type tag_type; | ||||
| 	bool pkt_lend; | ||||
| 	u8 wqe_hsz; | ||||
| 	u16 wqe_skip; | ||||
| 	u16 first_skip; | ||||
| 	u16 later_skip; | ||||
| 	enum cvmx_pki_cache_mode cache_mode; | ||||
| 	u8 dis_wq_dat; | ||||
| 	u64 mbuff_size; | ||||
| 	bool len_lg; | ||||
| 	bool len_lf; | ||||
| 	bool len_le; | ||||
| 	bool len_ld; | ||||
| 	bool len_lc; | ||||
| 	bool len_lb; | ||||
| 	bool csum_lg; | ||||
| 	bool csum_lf; | ||||
| 	bool csum_le; | ||||
| 	bool csum_ld; | ||||
| 	bool csum_lc; | ||||
| 	bool csum_lb; | ||||
| }; | ||||
| 
 | ||||
| /* This is per style structure for configuring port's tag configuration,
 | ||||
|  * it is kind of of profile which can be assigned to any port. | ||||
|  * If multiple ports are assigned same style be aware that modiying that style | ||||
|  * will modify the respective parameters for all the ports which are | ||||
|  * using this style */ | ||||
| enum cvmx_pki_mtag_ptrsel { | ||||
| 	CVMX_PKI_MTAG_PTRSEL_SOP = 0, | ||||
| 	CVMX_PKI_MTAG_PTRSEL_LA = 8, | ||||
| 	CVMX_PKI_MTAG_PTRSEL_LB = 9, | ||||
| 	CVMX_PKI_MTAG_PTRSEL_LC = 10, | ||||
| 	CVMX_PKI_MTAG_PTRSEL_LD = 11, | ||||
| 	CVMX_PKI_MTAG_PTRSEL_LE = 12, | ||||
| 	CVMX_PKI_MTAG_PTRSEL_LF = 13, | ||||
| 	CVMX_PKI_MTAG_PTRSEL_LG = 14, | ||||
| 	CVMX_PKI_MTAG_PTRSEL_VL = 15, | ||||
| }; | ||||
| 
 | ||||
| struct cvmx_pki_mask_tag { | ||||
| 	bool enable; | ||||
| 	int base;   /* CVMX_PKI_MTAG_PTRSEL_XXX */ | ||||
| 	int offset; /* Offset from base. */ | ||||
| 	u64 val;    /* Bitmask:
 | ||||
| 		1 = enable, 0 = disabled for each byte in the 64-byte array.*/ | ||||
| }; | ||||
| 
 | ||||
| struct cvmx_pki_style_tag_cfg { | ||||
| 	struct cvmx_pki_tag_fields tag_fields; | ||||
| 	struct cvmx_pki_mask_tag mask_tag[4]; | ||||
| }; | ||||
| 
 | ||||
| struct cvmx_pki_style_config { | ||||
| 	struct cvmx_pki_style_parm parm_cfg; | ||||
| 	struct cvmx_pki_style_tag_cfg tag_cfg; | ||||
| }; | ||||
| 
 | ||||
| struct cvmx_pki_pkind_config { | ||||
| 	u8 cluster_grp; | ||||
| 	bool fcs_pres; | ||||
| 	struct cvmx_pki_pkind_parse parse_en; | ||||
| 	enum cvmx_pki_pkind_parse_mode initial_parse_mode; | ||||
| 	u8 fcs_skip; | ||||
| 	u8 inst_skip; | ||||
| 	int initial_style; | ||||
| 	bool custom_l2_hdr; | ||||
| 	u8 l2_scan_offset; | ||||
| 	u64 lg_scan_offset; | ||||
| }; | ||||
| 
 | ||||
| struct cvmx_pki_port_config { | ||||
| 	struct cvmx_pki_pkind_config pkind_cfg; | ||||
| 	struct cvmx_pki_style_config style_cfg; | ||||
| }; | ||||
| 
 | ||||
| struct cvmx_pki_global_parse { | ||||
| 	u64 virt_pen : 1; | ||||
| 	u64 clg_pen : 1; | ||||
| 	u64 cl2_pen : 1; | ||||
| 	u64 l4_pen : 1; | ||||
| 	u64 il3_pen : 1; | ||||
| 	u64 l3_pen : 1; | ||||
| 	u64 mpls_pen : 1; | ||||
| 	u64 fulc_pen : 1; | ||||
| 	u64 dsa_pen : 1; | ||||
| 	u64 hg_pen : 1; | ||||
| }; | ||||
| 
 | ||||
| struct cvmx_pki_tag_sec { | ||||
| 	u16 dst6; | ||||
| 	u16 src6; | ||||
| 	u16 dst; | ||||
| 	u16 src; | ||||
| }; | ||||
| 
 | ||||
| struct cvmx_pki_global_config { | ||||
| 	u64 cluster_mask[CVMX_PKI_NUM_CLUSTER_GROUP_MAX]; | ||||
| 	enum cvmx_pki_stats_mode stat_mode; | ||||
| 	enum cvmx_pki_fpa_wait fpa_wait; | ||||
| 	struct cvmx_pki_global_parse gbl_pen; | ||||
| 	struct cvmx_pki_tag_sec tag_secret; | ||||
| 	struct cvmx_pki_frame_len frm_len[CVMX_PKI_NUM_FRAME_CHECK]; | ||||
| 	enum cvmx_pki_beltype ltype_map[CVMX_PKI_NUM_BELTYPE]; | ||||
| 	int pki_enable; | ||||
| }; | ||||
| 
 | ||||
| #define CVMX_PKI_PCAM_TERM_E_NONE_M	 0x0 | ||||
| #define CVMX_PKI_PCAM_TERM_E_L2_CUSTOM_M 0x2 | ||||
| #define CVMX_PKI_PCAM_TERM_E_HIGIGD_M	 0x4 | ||||
| #define CVMX_PKI_PCAM_TERM_E_HIGIG_M	 0x5 | ||||
| #define CVMX_PKI_PCAM_TERM_E_SMACH_M	 0x8 | ||||
| #define CVMX_PKI_PCAM_TERM_E_SMACL_M	 0x9 | ||||
| #define CVMX_PKI_PCAM_TERM_E_DMACH_M	 0xA | ||||
| #define CVMX_PKI_PCAM_TERM_E_DMACL_M	 0xB | ||||
| #define CVMX_PKI_PCAM_TERM_E_GLORT_M	 0x12 | ||||
| #define CVMX_PKI_PCAM_TERM_E_DSA_M	 0x13 | ||||
| #define CVMX_PKI_PCAM_TERM_E_ETHTYPE0_M	 0x18 | ||||
| #define CVMX_PKI_PCAM_TERM_E_ETHTYPE1_M	 0x19 | ||||
| #define CVMX_PKI_PCAM_TERM_E_ETHTYPE2_M	 0x1A | ||||
| #define CVMX_PKI_PCAM_TERM_E_ETHTYPE3_M	 0x1B | ||||
| #define CVMX_PKI_PCAM_TERM_E_MPLS0_M	 0x1E | ||||
| #define CVMX_PKI_PCAM_TERM_E_L3_SIPHH_M	 0x1F | ||||
| #define CVMX_PKI_PCAM_TERM_E_L3_SIPMH_M	 0x20 | ||||
| #define CVMX_PKI_PCAM_TERM_E_L3_SIPML_M	 0x21 | ||||
| #define CVMX_PKI_PCAM_TERM_E_L3_SIPLL_M	 0x22 | ||||
| #define CVMX_PKI_PCAM_TERM_E_L3_FLAGS_M	 0x23 | ||||
| #define CVMX_PKI_PCAM_TERM_E_L3_DIPHH_M	 0x24 | ||||
| #define CVMX_PKI_PCAM_TERM_E_L3_DIPMH_M	 0x25 | ||||
| #define CVMX_PKI_PCAM_TERM_E_L3_DIPML_M	 0x26 | ||||
| #define CVMX_PKI_PCAM_TERM_E_L3_DIPLL_M	 0x27 | ||||
| #define CVMX_PKI_PCAM_TERM_E_LD_VNI_M	 0x28 | ||||
| #define CVMX_PKI_PCAM_TERM_E_IL3_FLAGS_M 0x2B | ||||
| #define CVMX_PKI_PCAM_TERM_E_LF_SPI_M	 0x2E | ||||
| #define CVMX_PKI_PCAM_TERM_E_L4_SPORT_M	 0x2f | ||||
| #define CVMX_PKI_PCAM_TERM_E_L4_PORT_M	 0x30 | ||||
| #define CVMX_PKI_PCAM_TERM_E_LG_CUSTOM_M 0x39 | ||||
| 
 | ||||
| enum cvmx_pki_term { | ||||
| 	CVMX_PKI_PCAM_TERM_NONE = CVMX_PKI_PCAM_TERM_E_NONE_M, | ||||
| 	CVMX_PKI_PCAM_TERM_L2_CUSTOM = CVMX_PKI_PCAM_TERM_E_L2_CUSTOM_M, | ||||
| 	CVMX_PKI_PCAM_TERM_HIGIGD = CVMX_PKI_PCAM_TERM_E_HIGIGD_M, | ||||
| 	CVMX_PKI_PCAM_TERM_HIGIG = CVMX_PKI_PCAM_TERM_E_HIGIG_M, | ||||
| 	CVMX_PKI_PCAM_TERM_SMACH = CVMX_PKI_PCAM_TERM_E_SMACH_M, | ||||
| 	CVMX_PKI_PCAM_TERM_SMACL = CVMX_PKI_PCAM_TERM_E_SMACL_M, | ||||
| 	CVMX_PKI_PCAM_TERM_DMACH = CVMX_PKI_PCAM_TERM_E_DMACH_M, | ||||
| 	CVMX_PKI_PCAM_TERM_DMACL = CVMX_PKI_PCAM_TERM_E_DMACL_M, | ||||
| 	CVMX_PKI_PCAM_TERM_GLORT = CVMX_PKI_PCAM_TERM_E_GLORT_M, | ||||
| 	CVMX_PKI_PCAM_TERM_DSA = CVMX_PKI_PCAM_TERM_E_DSA_M, | ||||
| 	CVMX_PKI_PCAM_TERM_ETHTYPE0 = CVMX_PKI_PCAM_TERM_E_ETHTYPE0_M, | ||||
| 	CVMX_PKI_PCAM_TERM_ETHTYPE1 = CVMX_PKI_PCAM_TERM_E_ETHTYPE1_M, | ||||
| 	CVMX_PKI_PCAM_TERM_ETHTYPE2 = CVMX_PKI_PCAM_TERM_E_ETHTYPE2_M, | ||||
| 	CVMX_PKI_PCAM_TERM_ETHTYPE3 = CVMX_PKI_PCAM_TERM_E_ETHTYPE3_M, | ||||
| 	CVMX_PKI_PCAM_TERM_MPLS0 = CVMX_PKI_PCAM_TERM_E_MPLS0_M, | ||||
| 	CVMX_PKI_PCAM_TERM_L3_SIPHH = CVMX_PKI_PCAM_TERM_E_L3_SIPHH_M, | ||||
| 	CVMX_PKI_PCAM_TERM_L3_SIPMH = CVMX_PKI_PCAM_TERM_E_L3_SIPMH_M, | ||||
| 	CVMX_PKI_PCAM_TERM_L3_SIPML = CVMX_PKI_PCAM_TERM_E_L3_SIPML_M, | ||||
| 	CVMX_PKI_PCAM_TERM_L3_SIPLL = CVMX_PKI_PCAM_TERM_E_L3_SIPLL_M, | ||||
| 	CVMX_PKI_PCAM_TERM_L3_FLAGS = CVMX_PKI_PCAM_TERM_E_L3_FLAGS_M, | ||||
| 	CVMX_PKI_PCAM_TERM_L3_DIPHH = CVMX_PKI_PCAM_TERM_E_L3_DIPHH_M, | ||||
| 	CVMX_PKI_PCAM_TERM_L3_DIPMH = CVMX_PKI_PCAM_TERM_E_L3_DIPMH_M, | ||||
| 	CVMX_PKI_PCAM_TERM_L3_DIPML = CVMX_PKI_PCAM_TERM_E_L3_DIPML_M, | ||||
| 	CVMX_PKI_PCAM_TERM_L3_DIPLL = CVMX_PKI_PCAM_TERM_E_L3_DIPLL_M, | ||||
| 	CVMX_PKI_PCAM_TERM_LD_VNI = CVMX_PKI_PCAM_TERM_E_LD_VNI_M, | ||||
| 	CVMX_PKI_PCAM_TERM_IL3_FLAGS = CVMX_PKI_PCAM_TERM_E_IL3_FLAGS_M, | ||||
| 	CVMX_PKI_PCAM_TERM_LF_SPI = CVMX_PKI_PCAM_TERM_E_LF_SPI_M, | ||||
| 	CVMX_PKI_PCAM_TERM_L4_PORT = CVMX_PKI_PCAM_TERM_E_L4_PORT_M, | ||||
| 	CVMX_PKI_PCAM_TERM_L4_SPORT = CVMX_PKI_PCAM_TERM_E_L4_SPORT_M, | ||||
| 	CVMX_PKI_PCAM_TERM_LG_CUSTOM = CVMX_PKI_PCAM_TERM_E_LG_CUSTOM_M | ||||
| }; | ||||
| 
 | ||||
| #define CVMX_PKI_DMACH_SHIFT	  32 | ||||
| #define CVMX_PKI_DMACH_MASK	  cvmx_build_mask(16) | ||||
| #define CVMX_PKI_DMACL_MASK	  CVMX_PKI_DATA_MASK_32 | ||||
| #define CVMX_PKI_DATA_MASK_32	  cvmx_build_mask(32) | ||||
| #define CVMX_PKI_DATA_MASK_16	  cvmx_build_mask(16) | ||||
| #define CVMX_PKI_DMAC_MATCH_EXACT cvmx_build_mask(48) | ||||
| 
 | ||||
| struct cvmx_pki_pcam_input { | ||||
| 	u64 style; | ||||
| 	u64 style_mask; /* bits: 1-match, 0-dont care */ | ||||
| 	enum cvmx_pki_term field; | ||||
| 	u32 field_mask; /* bits: 1-match, 0-dont care */ | ||||
| 	u64 data; | ||||
| 	u64 data_mask; /* bits: 1-match, 0-dont care */ | ||||
| }; | ||||
| 
 | ||||
| struct cvmx_pki_pcam_action { | ||||
| 	enum cvmx_pki_parse_mode_chg parse_mode_chg; | ||||
| 	enum cvmx_pki_layer_type layer_type_set; | ||||
| 	int style_add; | ||||
| 	int parse_flag_set; | ||||
| 	int pointer_advance; | ||||
| }; | ||||
| 
 | ||||
| struct cvmx_pki_pcam_config { | ||||
| 	int in_use; | ||||
| 	int entry_num; | ||||
| 	u64 cluster_mask; | ||||
| 	struct cvmx_pki_pcam_input pcam_input; | ||||
| 	struct cvmx_pki_pcam_action pcam_action; | ||||
| }; | ||||
| 
 | ||||
| /**
 | ||||
|  * Status statistics for a port | ||||
|  */ | ||||
| struct cvmx_pki_port_stats { | ||||
| 	u64 dropped_octets; | ||||
| 	u64 dropped_packets; | ||||
| 	u64 pci_raw_packets; | ||||
| 	u64 octets; | ||||
| 	u64 packets; | ||||
| 	u64 multicast_packets; | ||||
| 	u64 broadcast_packets; | ||||
| 	u64 len_64_packets; | ||||
| 	u64 len_65_127_packets; | ||||
| 	u64 len_128_255_packets; | ||||
| 	u64 len_256_511_packets; | ||||
| 	u64 len_512_1023_packets; | ||||
| 	u64 len_1024_1518_packets; | ||||
| 	u64 len_1519_max_packets; | ||||
| 	u64 fcs_align_err_packets; | ||||
| 	u64 runt_packets; | ||||
| 	u64 runt_crc_packets; | ||||
| 	u64 oversize_packets; | ||||
| 	u64 oversize_crc_packets; | ||||
| 	u64 inb_packets; | ||||
| 	u64 inb_octets; | ||||
| 	u64 inb_errors; | ||||
| 	u64 mcast_l2_red_packets; | ||||
| 	u64 bcast_l2_red_packets; | ||||
| 	u64 mcast_l3_red_packets; | ||||
| 	u64 bcast_l3_red_packets; | ||||
| }; | ||||
| 
 | ||||
| /**
 | ||||
|  * PKI Packet Instruction Header Structure (PKI_INST_HDR_S) | ||||
|  */ | ||||
| typedef union { | ||||
| 	u64 u64; | ||||
| 	struct { | ||||
| 		u64 w : 1;    /* INST_HDR size: 0 = 2 bytes, 1 = 4 or 8 bytes */ | ||||
| 		u64 raw : 1;  /* RAW packet indicator in WQE[RAW]: 1 = enable */ | ||||
| 		u64 utag : 1; /* Use INST_HDR[TAG] to compute WQE[TAG]: 1 = enable */ | ||||
| 		u64 uqpg : 1; /* Use INST_HDR[QPG] to compute QPG: 1 = enable */ | ||||
| 		u64 rsvd1 : 1; | ||||
| 		u64 pm : 3; /* Packet parsing mode. Legal values = 0x0..0x7 */ | ||||
| 		u64 sl : 8; /* Number of bytes in INST_HDR. */ | ||||
| 		/* The following fields are not present, if INST_HDR[W] = 0: */ | ||||
| 		u64 utt : 1; /* Use INST_HDR[TT] to compute WQE[TT]: 1 = enable */ | ||||
| 		u64 tt : 2;  /* INST_HDR[TT] => WQE[TT], if INST_HDR[UTT] = 1 */ | ||||
| 		u64 rsvd2 : 2; | ||||
| 		u64 qpg : 11; /* INST_HDR[QPG] => QPG, if INST_HDR[UQPG] = 1 */ | ||||
| 		u64 tag : 32; /* INST_HDR[TAG] => WQE[TAG], if INST_HDR[UTAG] = 1 */ | ||||
| 	} s; | ||||
| } cvmx_pki_inst_hdr_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * This function assignes the clusters to a group, later pkind can be | ||||
|  * configured to use that group depending on number of clusters pkind | ||||
|  * would use. A given cluster can only be enabled in a single cluster group. | ||||
|  * Number of clusters assign to that group determines how many engine can work | ||||
|  * in parallel to process the packet. Eack cluster can process x MPPS. | ||||
|  * | ||||
|  * @param node	Node | ||||
|  * @param cluster_group Group to attach clusters to. | ||||
|  * @param cluster_mask The mask of clusters which needs to be assigned to the group. | ||||
|  */ | ||||
| static inline int cvmx_pki_attach_cluster_to_group(int node, u64 cluster_group, u64 cluster_mask) | ||||
| { | ||||
| 	cvmx_pki_icgx_cfg_t pki_cl_grp; | ||||
| 
 | ||||
| 	if (cluster_group >= CVMX_PKI_NUM_CLUSTER_GROUP) { | ||||
| 		debug("ERROR: config cluster group %d", (int)cluster_group); | ||||
| 		return -1; | ||||
| 	} | ||||
| 	pki_cl_grp.u64 = cvmx_read_csr_node(node, CVMX_PKI_ICGX_CFG(cluster_group)); | ||||
| 	pki_cl_grp.s.clusters = cluster_mask; | ||||
| 	cvmx_write_csr_node(node, CVMX_PKI_ICGX_CFG(cluster_group), pki_cl_grp.u64); | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static inline void cvmx_pki_write_global_parse(int node, struct cvmx_pki_global_parse gbl_pen) | ||||
| { | ||||
| 	cvmx_pki_gbl_pen_t gbl_pen_reg; | ||||
| 
 | ||||
| 	gbl_pen_reg.u64 = cvmx_read_csr_node(node, CVMX_PKI_GBL_PEN); | ||||
| 	gbl_pen_reg.s.virt_pen = gbl_pen.virt_pen; | ||||
| 	gbl_pen_reg.s.clg_pen = gbl_pen.clg_pen; | ||||
| 	gbl_pen_reg.s.cl2_pen = gbl_pen.cl2_pen; | ||||
| 	gbl_pen_reg.s.l4_pen = gbl_pen.l4_pen; | ||||
| 	gbl_pen_reg.s.il3_pen = gbl_pen.il3_pen; | ||||
| 	gbl_pen_reg.s.l3_pen = gbl_pen.l3_pen; | ||||
| 	gbl_pen_reg.s.mpls_pen = gbl_pen.mpls_pen; | ||||
| 	gbl_pen_reg.s.fulc_pen = gbl_pen.fulc_pen; | ||||
| 	gbl_pen_reg.s.dsa_pen = gbl_pen.dsa_pen; | ||||
| 	gbl_pen_reg.s.hg_pen = gbl_pen.hg_pen; | ||||
| 	cvmx_write_csr_node(node, CVMX_PKI_GBL_PEN, gbl_pen_reg.u64); | ||||
| } | ||||
| 
 | ||||
| static inline void cvmx_pki_write_tag_secret(int node, struct cvmx_pki_tag_sec tag_secret) | ||||
| { | ||||
| 	cvmx_pki_tag_secret_t tag_secret_reg; | ||||
| 
 | ||||
| 	tag_secret_reg.u64 = cvmx_read_csr_node(node, CVMX_PKI_TAG_SECRET); | ||||
| 	tag_secret_reg.s.dst6 = tag_secret.dst6; | ||||
| 	tag_secret_reg.s.src6 = tag_secret.src6; | ||||
| 	tag_secret_reg.s.dst = tag_secret.dst; | ||||
| 	tag_secret_reg.s.src = tag_secret.src; | ||||
| 	cvmx_write_csr_node(node, CVMX_PKI_TAG_SECRET, tag_secret_reg.u64); | ||||
| } | ||||
| 
 | ||||
| static inline void cvmx_pki_write_ltype_map(int node, enum cvmx_pki_layer_type layer, | ||||
| 					    enum cvmx_pki_beltype backend) | ||||
| { | ||||
| 	cvmx_pki_ltypex_map_t ltype_map; | ||||
| 
 | ||||
| 	if (layer > CVMX_PKI_LTYPE_E_MAX || backend > CVMX_PKI_BELTYPE_MAX) { | ||||
| 		debug("ERROR: invalid ltype beltype mapping\n"); | ||||
| 		return; | ||||
| 	} | ||||
| 	ltype_map.u64 = cvmx_read_csr_node(node, CVMX_PKI_LTYPEX_MAP(layer)); | ||||
| 	ltype_map.s.beltype = backend; | ||||
| 	cvmx_write_csr_node(node, CVMX_PKI_LTYPEX_MAP(layer), ltype_map.u64); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * This function enables the cluster group to start parsing. | ||||
|  * | ||||
|  * @param node    Node number. | ||||
|  * @param cl_grp  Cluster group to enable parsing. | ||||
|  */ | ||||
| static inline int cvmx_pki_parse_enable(int node, unsigned int cl_grp) | ||||
| { | ||||
| 	cvmx_pki_icgx_cfg_t pki_cl_grp; | ||||
| 
 | ||||
| 	if (cl_grp >= CVMX_PKI_NUM_CLUSTER_GROUP) { | ||||
| 		debug("ERROR: pki parse en group %d", (int)cl_grp); | ||||
| 		return -1; | ||||
| 	} | ||||
| 	pki_cl_grp.u64 = cvmx_read_csr_node(node, CVMX_PKI_ICGX_CFG(cl_grp)); | ||||
| 	pki_cl_grp.s.pena = 1; | ||||
| 	cvmx_write_csr_node(node, CVMX_PKI_ICGX_CFG(cl_grp), pki_cl_grp.u64); | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * This function enables the PKI to send bpid level backpressure to CN78XX inputs. | ||||
|  * | ||||
|  * @param node Node number. | ||||
|  */ | ||||
| static inline void cvmx_pki_enable_backpressure(int node) | ||||
| { | ||||
| 	cvmx_pki_buf_ctl_t pki_buf_ctl; | ||||
| 
 | ||||
| 	pki_buf_ctl.u64 = cvmx_read_csr_node(node, CVMX_PKI_BUF_CTL); | ||||
| 	pki_buf_ctl.s.pbp_en = 1; | ||||
| 	cvmx_write_csr_node(node, CVMX_PKI_BUF_CTL, pki_buf_ctl.u64); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Clear the statistics counters for a port. | ||||
|  * | ||||
|  * @param node Node number. | ||||
|  * @param port Port number (ipd_port) to get statistics for. | ||||
|  *    Make sure PKI_STATS_CTL:mode is set to 0 for collecting per port/pkind stats. | ||||
|  */ | ||||
| void cvmx_pki_clear_port_stats(int node, u64 port); | ||||
| 
 | ||||
| /**
 | ||||
|  * Get the status counters for index from PKI. | ||||
|  * | ||||
|  * @param node	  Node number. | ||||
|  * @param index   PKIND number, if PKI_STATS_CTL:mode = 0 or | ||||
|  *     style(flow) number, if PKI_STATS_CTL:mode = 1 | ||||
|  * @param status  Where to put the results. | ||||
|  */ | ||||
| void cvmx_pki_get_stats(int node, int index, struct cvmx_pki_port_stats *status); | ||||
| 
 | ||||
| /**
 | ||||
|  * Get the statistics counters for a port. | ||||
|  * | ||||
|  * @param node	 Node number | ||||
|  * @param port   Port number (ipd_port) to get statistics for. | ||||
|  *    Make sure PKI_STATS_CTL:mode is set to 0 for collecting per port/pkind stats. | ||||
|  * @param status Where to put the results. | ||||
|  */ | ||||
| static inline void cvmx_pki_get_port_stats(int node, u64 port, struct cvmx_pki_port_stats *status) | ||||
| { | ||||
| 	int xipd = cvmx_helper_node_to_ipd_port(node, port); | ||||
| 	int xiface = cvmx_helper_get_interface_num(xipd); | ||||
| 	int index = cvmx_helper_get_interface_index_num(port); | ||||
| 	int pknd = cvmx_helper_get_pknd(xiface, index); | ||||
| 
 | ||||
| 	cvmx_pki_get_stats(node, pknd, status); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Get the statistics counters for a flow represented by style in PKI. | ||||
|  * | ||||
|  * @param node Node number. | ||||
|  * @param style_num Style number to get statistics for. | ||||
|  *    Make sure PKI_STATS_CTL:mode is set to 1 for collecting per style/flow stats. | ||||
|  * @param status Where to put the results. | ||||
|  */ | ||||
| static inline void cvmx_pki_get_flow_stats(int node, u64 style_num, | ||||
| 					   struct cvmx_pki_port_stats *status) | ||||
| { | ||||
| 	cvmx_pki_get_stats(node, style_num, status); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Show integrated PKI configuration. | ||||
|  * | ||||
|  * @param node	   node number | ||||
|  */ | ||||
| int cvmx_pki_config_dump(unsigned int node); | ||||
| 
 | ||||
| /**
 | ||||
|  * Show integrated PKI statistics. | ||||
|  * | ||||
|  * @param node	   node number | ||||
|  */ | ||||
| int cvmx_pki_stats_dump(unsigned int node); | ||||
| 
 | ||||
| /**
 | ||||
|  * Clear PKI statistics. | ||||
|  * | ||||
|  * @param node	   node number | ||||
|  */ | ||||
| void cvmx_pki_stats_clear(unsigned int node); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function enables PKI. | ||||
|  * | ||||
|  * @param node	 node to enable pki in. | ||||
|  */ | ||||
| void cvmx_pki_enable(int node); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function disables PKI. | ||||
|  * | ||||
|  * @param node	node to disable pki in. | ||||
|  */ | ||||
| void cvmx_pki_disable(int node); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function soft resets PKI. | ||||
|  * | ||||
|  * @param node	node to enable pki in. | ||||
|  */ | ||||
| void cvmx_pki_reset(int node); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function sets the clusters in PKI. | ||||
|  * | ||||
|  * @param node	node to set clusters in. | ||||
|  */ | ||||
| int cvmx_pki_setup_clusters(int node); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function reads global configuration of PKI block. | ||||
|  * | ||||
|  * @param node    Node number. | ||||
|  * @param gbl_cfg Pointer to struct to read global configuration | ||||
|  */ | ||||
| void cvmx_pki_read_global_config(int node, struct cvmx_pki_global_config *gbl_cfg); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function writes global configuration of PKI into hw. | ||||
|  * | ||||
|  * @param node    Node number. | ||||
|  * @param gbl_cfg Pointer to struct to global configuration | ||||
|  */ | ||||
| void cvmx_pki_write_global_config(int node, struct cvmx_pki_global_config *gbl_cfg); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function reads per pkind parameters in hardware which defines how | ||||
|  * the incoming packet is processed. | ||||
|  * | ||||
|  * @param node   Node number. | ||||
|  * @param pkind  PKI supports a large number of incoming interfaces and packets | ||||
|  *     arriving on different interfaces or channels may want to be processed | ||||
|  *     differently. PKI uses the pkind to determine how the incoming packet | ||||
|  *     is processed. | ||||
|  * @param pkind_cfg	Pointer to struct conatining pkind configuration read | ||||
|  *     from hardware. | ||||
|  */ | ||||
| int cvmx_pki_read_pkind_config(int node, int pkind, struct cvmx_pki_pkind_config *pkind_cfg); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function writes per pkind parameters in hardware which defines how | ||||
|  * the incoming packet is processed. | ||||
|  * | ||||
|  * @param node   Node number. | ||||
|  * @param pkind  PKI supports a large number of incoming interfaces and packets | ||||
|  *     arriving on different interfaces or channels may want to be processed | ||||
|  *     differently. PKI uses the pkind to determine how the incoming packet | ||||
|  *     is processed. | ||||
|  * @param pkind_cfg	Pointer to struct conatining pkind configuration need | ||||
|  *     to be written in hardware. | ||||
|  */ | ||||
| int cvmx_pki_write_pkind_config(int node, int pkind, struct cvmx_pki_pkind_config *pkind_cfg); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function reads parameters associated with tag configuration in hardware. | ||||
|  * | ||||
|  * @param node	 Node number. | ||||
|  * @param style  Style to configure tag for. | ||||
|  * @param cluster_mask  Mask of clusters to configure the style for. | ||||
|  * @param tag_cfg  Pointer to tag configuration struct. | ||||
|  */ | ||||
| void cvmx_pki_read_tag_config(int node, int style, u64 cluster_mask, | ||||
| 			      struct cvmx_pki_style_tag_cfg *tag_cfg); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function writes/configures parameters associated with tag | ||||
|  * configuration in hardware. | ||||
|  * | ||||
|  * @param node  Node number. | ||||
|  * @param style  Style to configure tag for. | ||||
|  * @param cluster_mask  Mask of clusters to configure the style for. | ||||
|  * @param tag_cfg  Pointer to taf configuration struct. | ||||
|  */ | ||||
| void cvmx_pki_write_tag_config(int node, int style, u64 cluster_mask, | ||||
| 			       struct cvmx_pki_style_tag_cfg *tag_cfg); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function reads parameters associated with style in hardware. | ||||
|  * | ||||
|  * @param node	Node number. | ||||
|  * @param style  Style to read from. | ||||
|  * @param cluster_mask  Mask of clusters style belongs to. | ||||
|  * @param style_cfg  Pointer to style config struct. | ||||
|  */ | ||||
| void cvmx_pki_read_style_config(int node, int style, u64 cluster_mask, | ||||
| 				struct cvmx_pki_style_config *style_cfg); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function writes/configures parameters associated with style in hardware. | ||||
|  * | ||||
|  * @param node  Node number. | ||||
|  * @param style  Style to configure. | ||||
|  * @param cluster_mask  Mask of clusters to configure the style for. | ||||
|  * @param style_cfg  Pointer to style config struct. | ||||
|  */ | ||||
| void cvmx_pki_write_style_config(int node, u64 style, u64 cluster_mask, | ||||
| 				 struct cvmx_pki_style_config *style_cfg); | ||||
| /**
 | ||||
|  * This function reads qpg entry at specified offset from qpg table | ||||
|  * | ||||
|  * @param node  Node number. | ||||
|  * @param offset  Offset in qpg table to read from. | ||||
|  * @param qpg_cfg  Pointer to structure containing qpg values | ||||
|  */ | ||||
| int cvmx_pki_read_qpg_entry(int node, int offset, struct cvmx_pki_qpg_config *qpg_cfg); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function writes qpg entry at specified offset in qpg table | ||||
|  * | ||||
|  * @param node  Node number. | ||||
|  * @param offset  Offset in qpg table to write to. | ||||
|  * @param qpg_cfg  Pointer to stricture containing qpg values. | ||||
|  */ | ||||
| void cvmx_pki_write_qpg_entry(int node, int offset, struct cvmx_pki_qpg_config *qpg_cfg); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function writes pcam entry at given offset in pcam table in hardware | ||||
|  * | ||||
|  * @param node  Node number. | ||||
|  * @param index	 Offset in pcam table. | ||||
|  * @param cluster_mask  Mask of clusters in which to write pcam entry. | ||||
|  * @param input  Input keys to pcam match passed as struct. | ||||
|  * @param action  PCAM match action passed as struct | ||||
|  */ | ||||
| int cvmx_pki_pcam_write_entry(int node, int index, u64 cluster_mask, | ||||
| 			      struct cvmx_pki_pcam_input input, struct cvmx_pki_pcam_action action); | ||||
| /**
 | ||||
|  * Configures the channel which will receive backpressure from the specified bpid. | ||||
|  * Each channel listens for backpressure on a specific bpid. | ||||
|  * Each bpid can backpressure multiple channels. | ||||
|  * @param node  Node number. | ||||
|  * @param bpid  BPID from which channel will receive backpressure. | ||||
|  * @param channel  Channel number to receive backpressue. | ||||
|  */ | ||||
| int cvmx_pki_write_channel_bpid(int node, int channel, int bpid); | ||||
| 
 | ||||
| /**
 | ||||
|  * Configures the bpid on which, specified channel will | ||||
|  * assert backpressure. | ||||
|  * Each bpid receives backpressure from auras. | ||||
|  * Multiple auras can backpressure single bpid. | ||||
|  * @param node  Node number. | ||||
|  * @param aura  Number which will assert backpressure on that bpid. | ||||
|  * @param bpid  To assert backpressure on. | ||||
|  */ | ||||
| int cvmx_pki_write_aura_bpid(int node, int aura, int bpid); | ||||
| 
 | ||||
| /**
 | ||||
|  * Enables/Disabled QoS (RED Drop, Tail Drop & backpressure) for the* PKI aura. | ||||
|  * | ||||
|  * @param node  Node number | ||||
|  * @param aura  To enable/disable QoS on. | ||||
|  * @param ena_red  Enable/Disable RED drop between pass and drop level | ||||
|  *    1-enable 0-disable | ||||
|  * @param ena_drop  Enable/disable tail drop when max drop level exceeds | ||||
|  *    1-enable 0-disable | ||||
|  * @param ena_bp  Enable/Disable asserting backpressure on bpid when | ||||
|  *    max DROP level exceeds. | ||||
|  *    1-enable 0-disable | ||||
|  */ | ||||
| int cvmx_pki_enable_aura_qos(int node, int aura, bool ena_red, bool ena_drop, bool ena_bp); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function gives the initial style used by that pkind. | ||||
|  * | ||||
|  * @param node  Node number. | ||||
|  * @param pkind  PKIND number. | ||||
|  */ | ||||
| int cvmx_pki_get_pkind_style(int node, int pkind); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function sets the wqe buffer mode. First packet data buffer can reside | ||||
|  * either in same buffer as wqe OR it can go in separate buffer. If used the later mode, | ||||
|  * make sure software allocate enough buffers to now have wqe separate from packet data. | ||||
|  * | ||||
|  * @param node  Node number. | ||||
|  * @param style  Style to configure. | ||||
|  * @param pkt_outside_wqe | ||||
|  *    0 = The packet link pointer will be at word [FIRST_SKIP] immediately | ||||
|  *    followed by packet data, in the same buffer as the work queue entry. | ||||
|  *    1 = The packet link pointer will be at word [FIRST_SKIP] in a new | ||||
|  *    buffer separate from the work queue entry. Words following the | ||||
|  *    WQE in the same cache line will be zeroed, other lines in the | ||||
|  *    buffer will not be modified and will retain stale data (from the | ||||
|  *    buffer’s previous use). This setting may decrease the peak PKI | ||||
|  *    performance by up to half on small packets. | ||||
|  */ | ||||
| void cvmx_pki_set_wqe_mode(int node, u64 style, bool pkt_outside_wqe); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function sets the Packet mode of all ports and styles to little-endian. | ||||
|  * It Changes write operations of packet data to L2C to | ||||
|  * be in little-endian. Does not change the WQE header format, which is | ||||
|  * properly endian neutral. | ||||
|  * | ||||
|  * @param node  Node number. | ||||
|  * @param style  Style to configure. | ||||
|  */ | ||||
| void cvmx_pki_set_little_endian(int node, u64 style); | ||||
| 
 | ||||
| /**
 | ||||
|  * Enables/Disables L2 length error check and max & min frame length checks. | ||||
|  * | ||||
|  * @param node  Node number. | ||||
|  * @param pknd  PKIND to disable error for. | ||||
|  * @param l2len_err	 L2 length error check enable. | ||||
|  * @param maxframe_err	Max frame error check enable. | ||||
|  * @param minframe_err	Min frame error check enable. | ||||
|  *    1 -- Enabel err checks | ||||
|  *    0 -- Disable error checks | ||||
|  */ | ||||
| void cvmx_pki_endis_l2_errs(int node, int pknd, bool l2len_err, bool maxframe_err, | ||||
| 			    bool minframe_err); | ||||
| 
 | ||||
| /**
 | ||||
|  * Enables/Disables fcs check and fcs stripping on the pkind. | ||||
|  * | ||||
|  * @param node  Node number. | ||||
|  * @param pknd  PKIND to apply settings on. | ||||
|  * @param fcs_chk  Enable/disable fcs check. | ||||
|  *    1 -- enable fcs error check. | ||||
|  *    0 -- disable fcs error check. | ||||
|  * @param fcs_strip	 Strip L2 FCS bytes from packet, decrease WQE[LEN] by 4 bytes | ||||
|  *    1 -- strip L2 FCS. | ||||
|  *    0 -- Do not strip L2 FCS. | ||||
|  */ | ||||
| void cvmx_pki_endis_fcs_check(int node, int pknd, bool fcs_chk, bool fcs_strip); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function shows the qpg table entries, read directly from hardware. | ||||
|  * | ||||
|  * @param node  Node number. | ||||
|  * @param num_entry  Number of entries to print. | ||||
|  */ | ||||
| void cvmx_pki_show_qpg_entries(int node, u16 num_entry); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function shows the pcam table in raw format read directly from hardware. | ||||
|  * | ||||
|  * @param node  Node number. | ||||
|  */ | ||||
| void cvmx_pki_show_pcam_entries(int node); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function shows the valid entries in readable format, | ||||
|  * read directly from hardware. | ||||
|  * | ||||
|  * @param node  Node number. | ||||
|  */ | ||||
| void cvmx_pki_show_valid_pcam_entries(int node); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function shows the pkind attributes in readable format, | ||||
|  * read directly from hardware. | ||||
|  * @param node  Node number. | ||||
|  * @param pkind  PKIND number to print. | ||||
|  */ | ||||
| void cvmx_pki_show_pkind_attributes(int node, int pkind); | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * This function is called by cvmx_helper_shutdown() to extract all FPA buffers | ||||
|  * out of the PKI. After this function completes, all FPA buffers that were | ||||
|  * prefetched by PKI will be in the appropriate FPA pool. | ||||
|  * This functions does not reset the PKI. | ||||
|  * WARNING: It is very important that PKI be reset soon after a call to this function. | ||||
|  * | ||||
|  * @param node  Node number. | ||||
|  */ | ||||
| void __cvmx_pki_free_ptr(int node); | ||||
| 
 | ||||
| #endif | ||||
|  | @ -0,0 +1,43 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __CVMX_INTERNAL_PORTS_RANGE__ | ||||
| #define __CVMX_INTERNAL_PORTS_RANGE__ | ||||
| 
 | ||||
| /*
 | ||||
|  * Allocated a block of internal ports for the specified interface/port | ||||
|  * | ||||
|  * @param  interface  the interface for which the internal ports are requested | ||||
|  * @param  port       the index of the port within in the interface for which the internal ports | ||||
|  *                    are requested. | ||||
|  * @param  count      the number of internal ports requested | ||||
|  * | ||||
|  * @return  0 on success | ||||
|  *         -1 on failure | ||||
|  */ | ||||
| int cvmx_pko_internal_ports_alloc(int interface, int port, u64 count); | ||||
| 
 | ||||
| /*
 | ||||
|  * Free the internal ports associated with the specified interface/port | ||||
|  * | ||||
|  * @param  interface  the interface for which the internal ports are requested | ||||
|  * @param  port       the index of the port within in the interface for which the internal ports | ||||
|  *                    are requested. | ||||
|  * | ||||
|  * @return  0 on success | ||||
|  *         -1 on failure | ||||
|  */ | ||||
| int cvmx_pko_internal_ports_free(int interface, int port); | ||||
| 
 | ||||
| /*
 | ||||
|  * Frees up all the allocated internal ports. | ||||
|  */ | ||||
| void cvmx_pko_internal_ports_range_free_all(void); | ||||
| 
 | ||||
| void cvmx_pko_internal_ports_range_show(void); | ||||
| 
 | ||||
| int __cvmx_pko_internal_ports_range_init(void); | ||||
| 
 | ||||
| #endif | ||||
|  | @ -0,0 +1,175 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __CVMX_PKO3_QUEUE_H__ | ||||
| #define __CVMX_PKO3_QUEUE_H__ | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * | ||||
|  * Find or allocate global port/dq map table | ||||
|  * which is a named table, contains entries for | ||||
|  * all possible OCI nodes. | ||||
|  * | ||||
|  * The table global pointer is stored in core-local variable | ||||
|  * so that every core will call this function once, on first use. | ||||
|  */ | ||||
| int __cvmx_pko3_dq_table_setup(void); | ||||
| 
 | ||||
| /*
 | ||||
|  * Get the base Descriptor Queue number for an IPD port on the local node | ||||
|  */ | ||||
| int cvmx_pko3_get_queue_base(int ipd_port); | ||||
| 
 | ||||
| /*
 | ||||
|  * Get the number of Descriptor Queues assigned for an IPD port | ||||
|  */ | ||||
| int cvmx_pko3_get_queue_num(int ipd_port); | ||||
| 
 | ||||
| /**
 | ||||
|  * Get L1/Port Queue number assigned to interface port. | ||||
|  * | ||||
|  * @param xiface is interface number. | ||||
|  * @param index is port index. | ||||
|  */ | ||||
| int cvmx_pko3_get_port_queue(int xiface, int index); | ||||
| 
 | ||||
| /*
 | ||||
|  * Configure L3 through L5 Scheduler Queues and Descriptor Queues | ||||
|  * | ||||
|  * The Scheduler Queues in Levels 3 to 5 and Descriptor Queues are | ||||
|  * configured one-to-one or many-to-one to a single parent Scheduler | ||||
|  * Queues. The level of the parent SQ is specified in an argument, | ||||
|  * as well as the number of children to attach to the specific parent. | ||||
|  * The children can have fair round-robin or priority-based scheduling | ||||
|  * when multiple children are assigned a single parent. | ||||
|  * | ||||
|  * @param node is the OCI node location for the queues to be configured | ||||
|  * @param parent_level is the level of the parent queue, 2 to 5. | ||||
|  * @param parent_queue is the number of the parent Scheduler Queue | ||||
|  * @param child_base is the number of the first child SQ or DQ to assign to | ||||
|  * @param parent | ||||
|  * @param child_count is the number of consecutive children to assign | ||||
|  * @param stat_prio_count is the priority setting for the children L2 SQs | ||||
|  * | ||||
|  * If <stat_prio_count> is -1, the Ln children will have equal Round-Robin | ||||
|  * relationship with eachother. If <stat_prio_count> is 0, all Ln children | ||||
|  * will be arranged in Weighted-Round-Robin, with the first having the most | ||||
|  * precedence. If <stat_prio_count> is between 1 and 8, it indicates how | ||||
|  * many children will have static priority settings (with the first having | ||||
|  * the most precedence), with the remaining Ln children having WRR scheduling. | ||||
|  * | ||||
|  * @returns 0 on success, -1 on failure. | ||||
|  * | ||||
|  * Note: this function supports the configuration of node-local unit. | ||||
|  */ | ||||
| int cvmx_pko3_sq_config_children(unsigned int node, unsigned int parent_level, | ||||
| 				 unsigned int parent_queue, unsigned int child_base, | ||||
| 				 unsigned int child_count, int stat_prio_count); | ||||
| 
 | ||||
| /*
 | ||||
|  * @INTERNAL | ||||
|  * Register a range of Descriptor Queues wth an interface port | ||||
|  * | ||||
|  * This function poulates the DQ-to-IPD translation table | ||||
|  * used by the application to retrieve the DQ range (typically ordered | ||||
|  * by priority) for a given IPD-port, which is either a physical port, | ||||
|  * or a channel on a channelized interface (i.e. ILK). | ||||
|  * | ||||
|  * @param xiface is the physical interface number | ||||
|  * @param index is either a physical port on an interface | ||||
|  * @param or a channel of an ILK interface | ||||
|  * @param dq_base is the first Descriptor Queue number in a consecutive range | ||||
|  * @param dq_count is the number of consecutive Descriptor Queues leading | ||||
|  * @param the same channel or port. | ||||
|  * | ||||
|  * Only a consecurive range of Descriptor Queues can be associated with any | ||||
|  * given channel/port, and usually they are ordered from most to least | ||||
|  * in terms of scheduling priority. | ||||
|  * | ||||
|  * Note: thus function only populates the node-local translation table. | ||||
|  * | ||||
|  * @returns 0 on success, -1 on failure. | ||||
|  */ | ||||
| int __cvmx_pko3_ipd_dq_register(int xiface, int index, unsigned int dq_base, unsigned int dq_count); | ||||
| 
 | ||||
| /**
 | ||||
|  * @INTERNAL | ||||
|  * | ||||
|  * Unregister DQs associated with CHAN_E (IPD port) | ||||
|  */ | ||||
| int __cvmx_pko3_ipd_dq_unregister(int xiface, int index); | ||||
| 
 | ||||
| /*
 | ||||
|  * Map channel number in PKO | ||||
|  * | ||||
|  * @param node is to specify the node to which this configuration is applied. | ||||
|  * @param pq_num specifies the Port Queue (i.e. L1) queue number. | ||||
|  * @param l2_l3_q_num  specifies L2/L3 queue number. | ||||
|  * @param channel specifies the channel number to map to the queue. | ||||
|  * | ||||
|  * The channel assignment applies to L2 or L3 Shaper Queues depending | ||||
|  * on the setting of channel credit level. | ||||
|  * | ||||
|  * @return returns none. | ||||
|  */ | ||||
| void cvmx_pko3_map_channel(unsigned int node, unsigned int pq_num, unsigned int l2_l3_q_num, | ||||
| 			   u16 channel); | ||||
| 
 | ||||
| int cvmx_pko3_pq_config(unsigned int node, unsigned int mac_num, unsigned int pq_num); | ||||
| 
 | ||||
| int cvmx_pko3_port_cir_set(unsigned int node, unsigned int pq_num, unsigned long rate_kbips, | ||||
| 			   unsigned int burst_bytes, int adj_bytes); | ||||
| int cvmx_pko3_dq_cir_set(unsigned int node, unsigned int pq_num, unsigned long rate_kbips, | ||||
| 			 unsigned int burst_bytes); | ||||
| int cvmx_pko3_dq_pir_set(unsigned int node, unsigned int pq_num, unsigned long rate_kbips, | ||||
| 			 unsigned int burst_bytes); | ||||
| typedef enum { | ||||
| 	CVMX_PKO3_SHAPE_RED_STALL, | ||||
| 	CVMX_PKO3_SHAPE_RED_DISCARD, | ||||
| 	CVMX_PKO3_SHAPE_RED_PASS | ||||
| } red_action_t; | ||||
| 
 | ||||
| void cvmx_pko3_dq_red(unsigned int node, unsigned int dq_num, red_action_t red_act, | ||||
| 		      int8_t len_adjust); | ||||
| 
 | ||||
| /**
 | ||||
|  * Macros to deal with short floating point numbers, | ||||
|  * where unsigned exponent, and an unsigned normalized | ||||
|  * mantissa are represented each with a defined field width. | ||||
|  * | ||||
|  */ | ||||
| #define CVMX_SHOFT_MANT_BITS 8 | ||||
| #define CVMX_SHOFT_EXP_BITS  4 | ||||
| 
 | ||||
| /**
 | ||||
|  * Convert short-float to an unsigned integer | ||||
|  * Note that it will lose precision. | ||||
|  */ | ||||
| #define CVMX_SHOFT_TO_U64(m, e)                                                                    \ | ||||
| 	((((1ull << CVMX_SHOFT_MANT_BITS) | (m)) << (e)) >> CVMX_SHOFT_MANT_BITS) | ||||
| 
 | ||||
| /**
 | ||||
|  * Convert to short-float from an unsigned integer | ||||
|  */ | ||||
| #define CVMX_SHOFT_FROM_U64(ui, m, e)                                                              \ | ||||
| 	do {                                                                                       \ | ||||
| 		unsigned long long u;                                                              \ | ||||
| 		unsigned int k;                                                                    \ | ||||
| 		k = (1ull << (CVMX_SHOFT_MANT_BITS + 1)) - 1;                                      \ | ||||
| 		(e) = 0;                                                                           \ | ||||
| 		u = (ui) << CVMX_SHOFT_MANT_BITS;                                                  \ | ||||
| 		while ((u) > k) {                                                                  \ | ||||
| 			u >>= 1;                                                                   \ | ||||
| 			(e)++;                                                                     \ | ||||
| 		}                                                                                  \ | ||||
| 		(m) = u & (k >> 1);                                                                \ | ||||
| 	} while (0); | ||||
| 
 | ||||
| #define CVMX_SHOFT_MAX()                                                                           \ | ||||
| 	CVMX_SHOFT_TO_U64((1 << CVMX_SHOFT_MANT_BITS) - 1, (1 << CVMX_SHOFT_EXP_BITS) - 1) | ||||
| #define CVMX_SHOFT_MIN() CVMX_SHOFT_TO_U64(0, 0) | ||||
| 
 | ||||
| #endif /* __CVMX_PKO3_QUEUE_H__ */ | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -0,0 +1,304 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __CVMX_QLM_H__ | ||||
| #define __CVMX_QLM_H__ | ||||
| 
 | ||||
| /*
 | ||||
|  * Interface 0 on the 78xx can be connected to qlm 0 or qlm 2. When interface | ||||
|  * 0 is connected to qlm 0, this macro must be set to 0. When interface 0 is | ||||
|  * connected to qlm 2, this macro must be set to 1. | ||||
|  */ | ||||
| #define MUX_78XX_IFACE0 0 | ||||
| 
 | ||||
| /*
 | ||||
|  * Interface 1 on the 78xx can be connected to qlm 1 or qlm 3. When interface | ||||
|  * 1 is connected to qlm 1, this macro must be set to 0. When interface 1 is | ||||
|  * connected to qlm 3, this macro must be set to 1. | ||||
|  */ | ||||
| #define MUX_78XX_IFACE1 0 | ||||
| 
 | ||||
| /* Uncomment this line to print QLM JTAG state */ | ||||
| /* #define CVMX_QLM_DUMP_STATE 1 */ | ||||
| 
 | ||||
| typedef struct { | ||||
| 	const char *name; | ||||
| 	int stop_bit; | ||||
| 	int start_bit; | ||||
| } __cvmx_qlm_jtag_field_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * Return the number of QLMs supported by the chip | ||||
|  * | ||||
|  * @return  Number of QLMs | ||||
|  */ | ||||
| int cvmx_qlm_get_num(void); | ||||
| 
 | ||||
| /**
 | ||||
|  * Return the qlm number based on the interface | ||||
|  * | ||||
|  * @param xiface  Interface to look | ||||
|  */ | ||||
| int cvmx_qlm_interface(int xiface); | ||||
| 
 | ||||
| /**
 | ||||
|  * Return the qlm number based for a port in the interface | ||||
|  * | ||||
|  * @param xiface  interface to look up | ||||
|  * @param index  index in an interface | ||||
|  * | ||||
|  * @return the qlm number based on the xiface | ||||
|  */ | ||||
| int cvmx_qlm_lmac(int xiface, int index); | ||||
| 
 | ||||
| /**
 | ||||
|  * Return if only DLM5/DLM6/DLM5+DLM6 is used by BGX | ||||
|  * | ||||
|  * @param BGX  BGX to search for. | ||||
|  * | ||||
|  * @return muxes used 0 = DLM5+DLM6, 1 = DLM5, 2 = DLM6. | ||||
|  */ | ||||
| int cvmx_qlm_mux_interface(int bgx); | ||||
| 
 | ||||
| /**
 | ||||
|  * Return number of lanes for a given qlm | ||||
|  * | ||||
|  * @param qlm QLM block to query | ||||
|  * | ||||
|  * @return  Number of lanes | ||||
|  */ | ||||
| int cvmx_qlm_get_lanes(int qlm); | ||||
| 
 | ||||
| /**
 | ||||
|  * Get the QLM JTAG fields based on Octeon model on the supported chips. | ||||
|  * | ||||
|  * @return  qlm_jtag_field_t structure | ||||
|  */ | ||||
| const __cvmx_qlm_jtag_field_t *cvmx_qlm_jtag_get_field(void); | ||||
| 
 | ||||
| /**
 | ||||
|  * Get the QLM JTAG length by going through qlm_jtag_field for each | ||||
|  * Octeon model that is supported | ||||
|  * | ||||
|  * @return return the length. | ||||
|  */ | ||||
| int cvmx_qlm_jtag_get_length(void); | ||||
| 
 | ||||
| /**
 | ||||
|  * Initialize the QLM layer | ||||
|  */ | ||||
| void cvmx_qlm_init(void); | ||||
| 
 | ||||
| /**
 | ||||
|  * Get a field in a QLM JTAG chain | ||||
|  * | ||||
|  * @param qlm    QLM to get | ||||
|  * @param lane   Lane in QLM to get | ||||
|  * @param name   String name of field | ||||
|  * | ||||
|  * @return JTAG field value | ||||
|  */ | ||||
| u64 cvmx_qlm_jtag_get(int qlm, int lane, const char *name); | ||||
| 
 | ||||
| /**
 | ||||
|  * Set a field in a QLM JTAG chain | ||||
|  * | ||||
|  * @param qlm    QLM to set | ||||
|  * @param lane   Lane in QLM to set, or -1 for all lanes | ||||
|  * @param name   String name of field | ||||
|  * @param value  Value of the field | ||||
|  */ | ||||
| void cvmx_qlm_jtag_set(int qlm, int lane, const char *name, u64 value); | ||||
| 
 | ||||
| /**
 | ||||
|  * Errata G-16094: QLM Gen2 Equalizer Default Setting Change. | ||||
|  * CN68XX pass 1.x and CN66XX pass 1.x QLM tweak. This function tweaks the | ||||
|  * JTAG setting for a QLMs to run better at 5 and 6.25Ghz. | ||||
|  */ | ||||
| void __cvmx_qlm_speed_tweak(void); | ||||
| 
 | ||||
| /**
 | ||||
|  * Errata G-16174: QLM Gen2 PCIe IDLE DAC change. | ||||
|  * CN68XX pass 1.x, CN66XX pass 1.x and CN63XX pass 1.0-2.2 QLM tweak. | ||||
|  * This function tweaks the JTAG setting for a QLMs for PCIe to run better. | ||||
|  */ | ||||
| void __cvmx_qlm_pcie_idle_dac_tweak(void); | ||||
| 
 | ||||
| void __cvmx_qlm_pcie_cfg_rxd_set_tweak(int qlm, int lane); | ||||
| 
 | ||||
| /**
 | ||||
|  * Get the speed (Gbaud) of the QLM in Mhz. | ||||
|  * | ||||
|  * @param qlm    QLM to examine | ||||
|  * | ||||
|  * @return Speed in Mhz | ||||
|  */ | ||||
| int cvmx_qlm_get_gbaud_mhz(int qlm); | ||||
| /**
 | ||||
|  * Get the speed (Gbaud) of the QLM in Mhz on specific node. | ||||
|  * | ||||
|  * @param node   Target QLM node | ||||
|  * @param qlm    QLM to examine | ||||
|  * | ||||
|  * @return Speed in Mhz | ||||
|  */ | ||||
| int cvmx_qlm_get_gbaud_mhz_node(int node, int qlm); | ||||
| 
 | ||||
| enum cvmx_qlm_mode { | ||||
| 	CVMX_QLM_MODE_DISABLED = -1, | ||||
| 	CVMX_QLM_MODE_SGMII = 1, | ||||
| 	CVMX_QLM_MODE_XAUI, | ||||
| 	CVMX_QLM_MODE_RXAUI, | ||||
| 	CVMX_QLM_MODE_PCIE,	/* gen3 / gen2 / gen1 */ | ||||
| 	CVMX_QLM_MODE_PCIE_1X2, /* 1x2 gen2 / gen1 */ | ||||
| 	CVMX_QLM_MODE_PCIE_2X1, /* 2x1 gen2 / gen1 */ | ||||
| 	CVMX_QLM_MODE_PCIE_1X1, /* 1x1 gen2 / gen1 */ | ||||
| 	CVMX_QLM_MODE_SRIO_1X4, /* 1x4 short / long */ | ||||
| 	CVMX_QLM_MODE_SRIO_2X2, /* 2x2 short / long */ | ||||
| 	CVMX_QLM_MODE_SRIO_4X1, /* 4x1 short / long */ | ||||
| 	CVMX_QLM_MODE_ILK, | ||||
| 	CVMX_QLM_MODE_QSGMII, | ||||
| 	CVMX_QLM_MODE_SGMII_SGMII, | ||||
| 	CVMX_QLM_MODE_SGMII_DISABLED, | ||||
| 	CVMX_QLM_MODE_DISABLED_SGMII, | ||||
| 	CVMX_QLM_MODE_SGMII_QSGMII, | ||||
| 	CVMX_QLM_MODE_QSGMII_QSGMII, | ||||
| 	CVMX_QLM_MODE_QSGMII_DISABLED, | ||||
| 	CVMX_QLM_MODE_DISABLED_QSGMII, | ||||
| 	CVMX_QLM_MODE_QSGMII_SGMII, | ||||
| 	CVMX_QLM_MODE_RXAUI_1X2, | ||||
| 	CVMX_QLM_MODE_SATA_2X1, | ||||
| 	CVMX_QLM_MODE_XLAUI, | ||||
| 	CVMX_QLM_MODE_XFI, | ||||
| 	CVMX_QLM_MODE_10G_KR, | ||||
| 	CVMX_QLM_MODE_40G_KR4, | ||||
| 	CVMX_QLM_MODE_PCIE_1X8, /* 1x8 gen3 / gen2 / gen1 */ | ||||
| 	CVMX_QLM_MODE_RGMII_SGMII, | ||||
| 	CVMX_QLM_MODE_RGMII_XFI, | ||||
| 	CVMX_QLM_MODE_RGMII_10G_KR, | ||||
| 	CVMX_QLM_MODE_RGMII_RXAUI, | ||||
| 	CVMX_QLM_MODE_RGMII_XAUI, | ||||
| 	CVMX_QLM_MODE_RGMII_XLAUI, | ||||
| 	CVMX_QLM_MODE_RGMII_40G_KR4, | ||||
| 	CVMX_QLM_MODE_MIXED,		/* BGX2 is mixed mode, DLM5(SGMII) & DLM6(XFI) */ | ||||
| 	CVMX_QLM_MODE_SGMII_2X1,	/* Configure BGX2 separate for DLM5 & DLM6 */ | ||||
| 	CVMX_QLM_MODE_10G_KR_1X2,	/* Configure BGX2 separate for DLM5 & DLM6 */ | ||||
| 	CVMX_QLM_MODE_XFI_1X2,		/* Configure BGX2 separate for DLM5 & DLM6 */ | ||||
| 	CVMX_QLM_MODE_RGMII_SGMII_1X1,	/* Configure BGX2, applies to DLM5 */ | ||||
| 	CVMX_QLM_MODE_RGMII_SGMII_2X1,	/* Configure BGX2, applies to DLM6 */ | ||||
| 	CVMX_QLM_MODE_RGMII_10G_KR_1X1, /* Configure BGX2, applies to DLM6 */ | ||||
| 	CVMX_QLM_MODE_RGMII_XFI_1X1,	/* Configure BGX2, applies to DLM6 */ | ||||
| 	CVMX_QLM_MODE_SDL,		/* RMAC Pipe */ | ||||
| 	CVMX_QLM_MODE_CPRI,		/* RMAC */ | ||||
| 	CVMX_QLM_MODE_OCI | ||||
| }; | ||||
| 
 | ||||
| enum cvmx_gmx_inf_mode { | ||||
| 	CVMX_GMX_INF_MODE_DISABLED = 0, | ||||
| 	CVMX_GMX_INF_MODE_SGMII = 1,  /* Other interface can be SGMII or QSGMII */ | ||||
| 	CVMX_GMX_INF_MODE_QSGMII = 2, /* Other interface can be SGMII or QSGMII */ | ||||
| 	CVMX_GMX_INF_MODE_RXAUI = 3,  /* Only interface 0, interface 1 must be DISABLED */ | ||||
| }; | ||||
| 
 | ||||
| /**
 | ||||
|  * Eye diagram captures are stored in the following structure | ||||
|  */ | ||||
| typedef struct { | ||||
| 	int width;	   /* Width in the x direction (time) */ | ||||
| 	int height;	   /* Height in the y direction (voltage) */ | ||||
| 	u32 data[64][128]; /* Error count at location, saturates as max */ | ||||
| } cvmx_qlm_eye_t; | ||||
| 
 | ||||
| /**
 | ||||
|  * These apply to DLM1 and DLM2 if its not in SATA mode | ||||
|  * Manual refers to lanes as follows: | ||||
|  *  DML 0 lane 0 == GSER0 lane 0 | ||||
|  *  DML 0 lane 1 == GSER0 lane 1 | ||||
|  *  DML 1 lane 2 == GSER1 lane 0 | ||||
|  *  DML 1 lane 3 == GSER1 lane 1 | ||||
|  *  DML 2 lane 4 == GSER2 lane 0 | ||||
|  *  DML 2 lane 5 == GSER2 lane 1 | ||||
|  */ | ||||
| enum cvmx_pemx_cfg_mode { | ||||
| 	CVMX_PEM_MD_GEN2_2LANE = 0, /* Valid for PEM0(DLM1), PEM1(DLM2) */ | ||||
| 	CVMX_PEM_MD_GEN2_1LANE = 1, /* Valid for PEM0(DLM1.0), PEM1(DLM1.1,DLM2.0), PEM2(DLM2.1) */ | ||||
| 	CVMX_PEM_MD_GEN2_4LANE = 2, /* Valid for PEM0(DLM1-2) */ | ||||
| 	/* Reserved */ | ||||
| 	CVMX_PEM_MD_GEN1_2LANE = 4, /* Valid for PEM0(DLM1), PEM1(DLM2) */ | ||||
| 	CVMX_PEM_MD_GEN1_1LANE = 5, /* Valid for PEM0(DLM1.0), PEM1(DLM1.1,DLM2.0), PEM2(DLM2.1) */ | ||||
| 	CVMX_PEM_MD_GEN1_4LANE = 6, /* Valid for PEM0(DLM1-2) */ | ||||
| 	/* Reserved */ | ||||
| }; | ||||
| 
 | ||||
| /*
 | ||||
|  * Read QLM and return mode. | ||||
|  */ | ||||
| enum cvmx_qlm_mode cvmx_qlm_get_mode(int qlm); | ||||
| enum cvmx_qlm_mode cvmx_qlm_get_mode_cn78xx(int node, int qlm); | ||||
| enum cvmx_qlm_mode cvmx_qlm_get_dlm_mode(int dlm_mode, int interface); | ||||
| void __cvmx_qlm_set_mult(int qlm, int baud_mhz, int old_multiplier); | ||||
| 
 | ||||
| void cvmx_qlm_display_registers(int qlm); | ||||
| 
 | ||||
| int cvmx_qlm_measure_clock(int qlm); | ||||
| 
 | ||||
| /**
 | ||||
|  * Measure the reference clock of a QLM on a multi-node setup | ||||
|  * | ||||
|  * @param node   node to measure | ||||
|  * @param qlm    QLM to measure | ||||
|  * | ||||
|  * @return Clock rate in Hz | ||||
|  */ | ||||
| int cvmx_qlm_measure_clock_node(int node, int qlm); | ||||
| 
 | ||||
| /*
 | ||||
|  * Perform RX equalization on a QLM | ||||
|  * | ||||
|  * @param node	Node the QLM is on | ||||
|  * @param qlm	QLM to perform RX equalization on | ||||
|  * @param lane	Lane to use, or -1 for all lanes | ||||
|  * | ||||
|  * @return Zero on success, negative if any lane failed RX equalization | ||||
|  */ | ||||
| int __cvmx_qlm_rx_equalization(int node, int qlm, int lane); | ||||
| 
 | ||||
| /**
 | ||||
|  * Errata GSER-27882 -GSER 10GBASE-KR Transmit Equalizer | ||||
|  * Training may not update PHY Tx Taps. This function is not static | ||||
|  * so we can share it with BGX KR | ||||
|  * | ||||
|  * @param node	Node to apply errata workaround | ||||
|  * @param qlm	QLM to apply errata workaround | ||||
|  * @param lane	Lane to apply the errata | ||||
|  */ | ||||
| int cvmx_qlm_gser_errata_27882(int node, int qlm, int lane); | ||||
| 
 | ||||
| void cvmx_qlm_gser_errata_25992(int node, int qlm); | ||||
| 
 | ||||
| #ifdef CVMX_DUMP_GSER | ||||
| /**
 | ||||
|  * Dump GSER configuration for node 0 | ||||
|  */ | ||||
| int cvmx_dump_gser_config(unsigned int gser); | ||||
| /**
 | ||||
|  * Dump GSER status for node 0 | ||||
|  */ | ||||
| int cvmx_dump_gser_status(unsigned int gser); | ||||
| /**
 | ||||
|  * Dump GSER configuration | ||||
|  */ | ||||
| int cvmx_dump_gser_config_node(unsigned int node, unsigned int gser); | ||||
| /**
 | ||||
|  * Dump GSER status | ||||
|  */ | ||||
| int cvmx_dump_gser_status_node(unsigned int node, unsigned int gser); | ||||
| #endif | ||||
| 
 | ||||
| int cvmx_qlm_eye_display(int node, int qlm, int qlm_lane, int format, const cvmx_qlm_eye_t *eye); | ||||
| 
 | ||||
| void cvmx_prbs_process_cmd(int node, int qlm, int mode); | ||||
| 
 | ||||
| #endif /* __CVMX_QLM_H__ */ | ||||
|  | @ -0,0 +1,113 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  * | ||||
|  * This file provides support for the processor local scratch memory. | ||||
|  * Scratch memory is byte addressable - all addresses are byte addresses. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __CVMX_SCRATCH_H__ | ||||
| #define __CVMX_SCRATCH_H__ | ||||
| 
 | ||||
| /* Note: This define must be a long, not a long long in order to compile
 | ||||
| 	without warnings for both 32bit and 64bit. */ | ||||
| #define CVMX_SCRATCH_BASE (-32768l) /* 0xffffffffffff8000 */ | ||||
| 
 | ||||
| /* Scratch line for LMTST/LMTDMA on Octeon3 models */ | ||||
| #ifdef CVMX_CAVIUM_OCTEON3 | ||||
| #define CVMX_PKO_LMTLINE 2ull | ||||
| #endif | ||||
| 
 | ||||
| /**
 | ||||
|  * Reads an 8 bit value from the processor local scratchpad memory. | ||||
|  * | ||||
|  * @param address byte address to read from | ||||
|  * | ||||
|  * @return value read | ||||
|  */ | ||||
| static inline u8 cvmx_scratch_read8(u64 address) | ||||
| { | ||||
| 	return *CASTPTR(volatile u8, CVMX_SCRATCH_BASE + address); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Reads a 16 bit value from the processor local scratchpad memory. | ||||
|  * | ||||
|  * @param address byte address to read from | ||||
|  * | ||||
|  * @return value read | ||||
|  */ | ||||
| static inline u16 cvmx_scratch_read16(u64 address) | ||||
| { | ||||
| 	return *CASTPTR(volatile u16, CVMX_SCRATCH_BASE + address); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Reads a 32 bit value from the processor local scratchpad memory. | ||||
|  * | ||||
|  * @param address byte address to read from | ||||
|  * | ||||
|  * @return value read | ||||
|  */ | ||||
| static inline u32 cvmx_scratch_read32(u64 address) | ||||
| { | ||||
| 	return *CASTPTR(volatile u32, CVMX_SCRATCH_BASE + address); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Reads a 64 bit value from the processor local scratchpad memory. | ||||
|  * | ||||
|  * @param address byte address to read from | ||||
|  * | ||||
|  * @return value read | ||||
|  */ | ||||
| static inline u64 cvmx_scratch_read64(u64 address) | ||||
| { | ||||
| 	return *CASTPTR(volatile u64, CVMX_SCRATCH_BASE + address); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Writes an 8 bit value to the processor local scratchpad memory. | ||||
|  * | ||||
|  * @param address byte address to write to | ||||
|  * @param value   value to write | ||||
|  */ | ||||
| static inline void cvmx_scratch_write8(u64 address, u64 value) | ||||
| { | ||||
| 	*CASTPTR(volatile u8, CVMX_SCRATCH_BASE + address) = (u8)value; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Writes a 32 bit value to the processor local scratchpad memory. | ||||
|  * | ||||
|  * @param address byte address to write to | ||||
|  * @param value   value to write | ||||
|  */ | ||||
| static inline void cvmx_scratch_write16(u64 address, u64 value) | ||||
| { | ||||
| 	*CASTPTR(volatile u16, CVMX_SCRATCH_BASE + address) = (u16)value; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Writes a 16 bit value to the processor local scratchpad memory. | ||||
|  * | ||||
|  * @param address byte address to write to | ||||
|  * @param value   value to write | ||||
|  */ | ||||
| static inline void cvmx_scratch_write32(u64 address, u64 value) | ||||
| { | ||||
| 	*CASTPTR(volatile u32, CVMX_SCRATCH_BASE + address) = (u32)value; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Writes a 64 bit value to the processor local scratchpad memory. | ||||
|  * | ||||
|  * @param address byte address to write to | ||||
|  * @param value   value to write | ||||
|  */ | ||||
| static inline void cvmx_scratch_write64(u64 address, u64 value) | ||||
| { | ||||
| 	*CASTPTR(volatile u64, CVMX_SCRATCH_BASE + address) = value; | ||||
| } | ||||
| 
 | ||||
| #endif /* __CVMX_SCRATCH_H__ */ | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -0,0 +1,141 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __OCTEON_ETH_H__ | ||||
| #define __OCTEON_ETH_H__ | ||||
| 
 | ||||
| #include <phy.h> | ||||
| #include <miiphy.h> | ||||
| 
 | ||||
| #include <mach/cvmx-helper.h> | ||||
| #include <mach/cvmx-helper-board.h> | ||||
| #include <mach/octeon_fdt.h> | ||||
| 
 | ||||
| struct eth_device; | ||||
| 
 | ||||
| /** Ethernet device private data structure for octeon ethernet */ | ||||
| struct octeon_eth_info { | ||||
| 	u64 link_state; | ||||
| 	u32 port;		   /** ipd port */ | ||||
| 	u32 interface;		   /** Port interface */ | ||||
| 	u32 index;		   /** port index on interface */ | ||||
| 	int node;		   /** OCX node number */ | ||||
| 	u32 initted_flag;	   /** 0 if port not initialized */ | ||||
| 	struct mii_dev *mii_bus;   /** MII bus for PHY */ | ||||
| 	struct phy_device *phydev; /** PHY device */ | ||||
| 	struct eth_device *ethdev; /** Eth device this priv is part of */ | ||||
| 	int mii_addr; | ||||
| 	int phy_fdt_offset;		    /** Offset of PHY info in device tree */ | ||||
| 	int fdt_offset;			    /** Offset of Eth interface in DT */ | ||||
| 	int phy_offset;			    /** Offset of PHY dev in device tree */ | ||||
| 	enum cvmx_phy_type phy_device_type; /** Type of PHY */ | ||||
| 	/* current link status, use to reconfigure on status changes */ | ||||
| 	u64 packets_sent; | ||||
| 	u64 packets_received; | ||||
| 	u32 link_speed : 2; | ||||
| 	u32 link_duplex : 1; | ||||
| 	u32 link_status : 1; | ||||
| 	u32 loopback : 1; | ||||
| 	u32 enabled : 1; | ||||
| 	u32 is_c45 : 1;		    /** Set if we need to use clause 45 */ | ||||
| 	u32 vitesse_sfp_config : 1; /** Need Vitesse SFP config */ | ||||
| 	u32 ti_gpio_config : 1;	    /** Need TI GPIO config */ | ||||
| 	u32 bgx_mac_set : 1;	    /** Has the BGX MAC been set already */ | ||||
| 	u64 last_bgx_mac;	    /** Last BGX MAC address set */ | ||||
| 	u64 gmx_base;		    /** Base address to access GMX CSRs */ | ||||
| 	bool mod_abs;		    /** True if module is absent */ | ||||
| 
 | ||||
| 	/**
 | ||||
| 	 * User defined function to check if a SFP+ module is absent or not. | ||||
| 	 * | ||||
| 	 * @param	dev	Ethernet device | ||||
| 	 * @param	data	User supplied data | ||||
| 	 */ | ||||
| 	int (*check_mod_abs)(struct eth_device *dev, void *data); | ||||
| 
 | ||||
| 	/** User supplied data for check_mod_abs */ | ||||
| 	void *mod_abs_data; | ||||
| 	/**
 | ||||
| 	 * Called to check the status of a port.  This is used for some | ||||
| 	 * Vitesse and Inphi phys to probe the sFP adapter. | ||||
| 	 */ | ||||
| 	int (*phy_port_check)(struct phy_device *dev); | ||||
| 	/**
 | ||||
| 	 * Called whenever mod_abs changes state | ||||
| 	 * | ||||
| 	 * @param	dev	Ethernet device | ||||
| 	 * @param	mod_abs	True if module is absent | ||||
| 	 * | ||||
| 	 * @return	0 for success, otherwise error | ||||
| 	 */ | ||||
| 	int (*mod_abs_changed)(struct eth_device *dev, bool mod_abs); | ||||
| 	/** SDK phy information data structure */ | ||||
| 	cvmx_phy_info_t phy_info; | ||||
| #ifdef CONFIG_OCTEON_SFP | ||||
| 	/** Information about connected SFP/SFP+/SFP28/QSFP+/QSFP28 module */ | ||||
| 	struct octeon_sfp_info sfp; | ||||
| #endif | ||||
| }; | ||||
| 
 | ||||
| /**
 | ||||
|  * Searches for an ethernet device based on interface and index. | ||||
|  * | ||||
|  * @param interface - interface number to search for | ||||
|  * @param index - index to search for | ||||
|  * | ||||
|  * @returns pointer to ethernet device or NULL if not found. | ||||
|  */ | ||||
| struct eth_device *octeon_find_eth_by_interface_index(int interface, int index); | ||||
| 
 | ||||
| /**
 | ||||
|  * User-defined function called when the link state changes | ||||
|  * | ||||
|  * @param[in]	dev		Ethernet device | ||||
|  * @param	link_state	new link state | ||||
|  * | ||||
|  * NOTE: This is defined as a weak function. | ||||
|  */ | ||||
| void board_net_set_link(struct eth_device *dev, cvmx_helper_link_info_t link_state); | ||||
| 
 | ||||
| /**
 | ||||
|  * Registers a function to be called when the link goes down.  The function is | ||||
|  * often used for things like reading the SFP+ EEPROM. | ||||
|  * | ||||
|  * @param	dev		Ethernet device | ||||
|  * @param	phy_port_check	Function to call | ||||
|  */ | ||||
| void octeon_eth_register_phy_port_check(struct eth_device *dev, | ||||
| 					int (*phy_port_check)(struct phy_device *dev)); | ||||
| 
 | ||||
| /**
 | ||||
|  * This weak function is called after the phy driver is connected but before | ||||
|  * it is initialized. | ||||
|  * | ||||
|  * @param	dev	Ethernet device for phy | ||||
|  * | ||||
|  * @return	0 to continue, or -1 for error to stop setting up the phy | ||||
|  */ | ||||
| int octeon_eth_board_post_setup_phy(struct eth_device *dev); | ||||
| 
 | ||||
| /**
 | ||||
|  * Registers a function to be called whenever a mod_abs change is detected. | ||||
|  * | ||||
|  * @param	dev		Ethernet device | ||||
|  * @param	mod_abs_changed	Function to be called | ||||
|  */ | ||||
| void octeon_eth_register_mod_abs_changed(struct eth_device *dev, | ||||
| 					 int (*mod_abs_changed)(struct eth_device *dev, | ||||
| 								bool mod_abs)); | ||||
| 
 | ||||
| /**
 | ||||
|  * Checks for state changes with the link state or module state | ||||
|  * | ||||
|  * @param	dev	Ethernet device to check | ||||
|  * | ||||
|  * NOTE: If the module state is changed then the module callback is called. | ||||
|  */ | ||||
| void octeon_phy_port_check(struct eth_device *dev); | ||||
| 
 | ||||
| #endif /* __OCTEON_ETH_H__ */ | ||||
|  | @ -0,0 +1,268 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __OCTEON_FDT_H__ | ||||
| #define __OCTEON_FDT_H__ | ||||
| 
 | ||||
| struct phy_device; | ||||
| 
 | ||||
| /** Type of GPIO pin */ | ||||
| enum octeon_gpio_type { | ||||
| 	GPIO_TYPE_OCTEON,  /** Native Octeon */ | ||||
| 	GPIO_TYPE_PCA953X, /** PCA953X i2c GPIO expander */ | ||||
| 	GPIO_TYPE_PCA9554, /** PCA9554 i2c GPIO expander */ | ||||
| 	GPIO_TYPE_PCA9555, /** PCA9555 i2c GPIO expander */ | ||||
| 	GPIO_TYPE_PCA9698, /** PCA9698 i2c GPIO expander */ | ||||
| #ifdef CONFIG_PHY_VITESSE | ||||
| 	GPIO_TYPE_VSC8488, /** Vitesse VSC8488 or related PHY GPIO */ | ||||
| #endif | ||||
| 	GPIO_TYPE_UNKNOWN /** Unknown GPIO type */ | ||||
| }; | ||||
| 
 | ||||
| /**
 | ||||
|  * Trims nodes from the flat device tree. | ||||
|  * | ||||
|  * @param fdt - pointer to working FDT, usually in gd->fdt_blob | ||||
|  * @param fdt_key - key to preserve.  All non-matching keys are removed | ||||
|  * @param trim_name - name of property to look for.  If NULL use | ||||
|  *		      'cavium,qlm-trim' | ||||
|  * @param rename - set to TRUE to rename interfaces. | ||||
|  * @param callback - function to call on matched nodes. | ||||
|  * @param cbarg - passed to callback. | ||||
|  * | ||||
|  * The key should look something like device #, type where device # is a | ||||
|  * number from 0-9 and type is a string describing the type.  For QLM | ||||
|  * operations this would typically contain the QLM number followed by | ||||
|  * the type in the device tree, like "0,xaui", "0,sgmii", etc.  This function | ||||
|  * will trim all items in the device tree which match the device number but | ||||
|  * have a type which does not match.  For example, if a QLM has a xaui module | ||||
|  * installed on QLM 0 and "0,xaui" is passed as a key, then all FDT nodes that | ||||
|  * have "0,xaui" will be preserved but all others, i.e. "0,sgmii" will be | ||||
|  * removed. | ||||
|  * | ||||
|  * Note that the trim_name must also match.  If trim_name is NULL then it | ||||
|  * looks for the property "cavium,qlm-trim". | ||||
|  * | ||||
|  * Also, when the trim_name is "cavium,qlm-trim" or NULL that the interfaces | ||||
|  * will also be renamed based on their register values. | ||||
|  * | ||||
|  * For example, if a PIP interface is named "interface@W" and has the property | ||||
|  * reg = <0> then the interface will be renamed after this function to | ||||
|  * interface@0. | ||||
|  * | ||||
|  * @return 0 for success. | ||||
|  */ | ||||
| int octeon_fdt_patch_rename(void *fdt, const char *fdt_key, const char *trim_name, bool rename, | ||||
| 			    void (*callback)(void *fdt, int offset, void *arg), void *cbarg); | ||||
| 
 | ||||
| /**
 | ||||
|  * Trims nodes from the flat device tree. | ||||
|  * | ||||
|  * @param fdt - pointer to working FDT, usually in gd->fdt_blob | ||||
|  * @param fdt_key - key to preserve.  All non-matching keys are removed | ||||
|  * @param trim_name - name of property to look for.  If NULL use | ||||
|  *		      'cavium,qlm-trim' | ||||
|  * | ||||
|  * The key should look something like device #, type where device # is a | ||||
|  * number from 0-9 and type is a string describing the type.  For QLM | ||||
|  * operations this would typically contain the QLM number followed by | ||||
|  * the type in the device tree, like "0,xaui", "0,sgmii", etc.  This function | ||||
|  * will trim all items in the device tree which match the device number but | ||||
|  * have a type which does not match.  For example, if a QLM has a xaui module | ||||
|  * installed on QLM 0 and "0,xaui" is passed as a key, then all FDT nodes that | ||||
|  * have "0,xaui" will be preserved but all others, i.e. "0,sgmii" will be | ||||
|  * removed. | ||||
|  * | ||||
|  * Note that the trim_name must also match.  If trim_name is NULL then it | ||||
|  * looks for the property "cavium,qlm-trim". | ||||
|  * | ||||
|  * Also, when the trim_name is "cavium,qlm-trim" or NULL that the interfaces | ||||
|  * will also be renamed based on their register values. | ||||
|  * | ||||
|  * For example, if a PIP interface is named "interface@W" and has the property | ||||
|  * reg = <0> then the interface will be renamed after this function to | ||||
|  * interface@0. | ||||
|  * | ||||
|  * @return 0 for success. | ||||
|  */ | ||||
| int octeon_fdt_patch(void *fdt, const char *fdt_key, const char *trim_name); | ||||
| 
 | ||||
| /**
 | ||||
|  * Fix up the MAC address in the flat device tree based on the MAC address | ||||
|  * stored in ethaddr or in the board descriptor. | ||||
|  * | ||||
|  * NOTE: This function is weak and an alias for __octeon_fixup_fdt_mac_addr. | ||||
|  */ | ||||
| void octeon_fixup_fdt_mac_addr(void); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function fixes the clock-frequency in the flat device tree for the UART. | ||||
|  * | ||||
|  * NOTE: This function is weak and an alias for __octeon_fixup_fdt_uart. | ||||
|  */ | ||||
| void octeon_fixup_fdt_uart(void); | ||||
| 
 | ||||
| /**
 | ||||
|  * This function fills in the /memory portion of the flat device tree. | ||||
|  * | ||||
|  * NOTE: This function is weak and aliased to __octeon_fixup_fdt_memory. | ||||
|  */ | ||||
| void octeon_fixup_fdt_memory(void); | ||||
| 
 | ||||
| int board_fixup_fdt(void); | ||||
| 
 | ||||
| void octeon_fixup_fdt(void); | ||||
| 
 | ||||
| /**
 | ||||
|  * This is a helper function to find the offset of a PHY device given | ||||
|  * an Ethernet device. | ||||
|  * | ||||
|  * @param[in] eth - Ethernet device to search for PHY offset | ||||
|  * | ||||
|  * @returns offset of phy info in device tree or -1 if not found | ||||
|  */ | ||||
| int octeon_fdt_find_phy(const struct udevice *eth); | ||||
| 
 | ||||
| /**
 | ||||
|  * This helper function returns if a node contains the specified vendor name. | ||||
|  * | ||||
|  * @param[in]	fdt		pointer to device tree blob | ||||
|  * @param	nodeoffset	offset of the tree node | ||||
|  * @param[in]	vendor		name of vendor to check | ||||
|  * | ||||
|  * returns: | ||||
|  *	0, if the node has a compatible vendor string property | ||||
|  *	1, if the node does not contain the vendor string property | ||||
|  *	-FDT_ERR_NOTFOUND, if the given node has no 'compatible' property | ||||
|  *	-FDT_ERR_BADOFFSET, if nodeoffset does not refer to a BEGIN_NODE tag | ||||
|  *	-FDT_ERR_BADMAGIC, | ||||
|  *	-FDT_ERR_BADVERSION, | ||||
|  *	-FDT_BADSTATE, | ||||
|  *	-FDT_ERR_BADSTRUCTURE, standard meanings | ||||
|  */ | ||||
| int octeon_fdt_compat_vendor(const void *fdt, int nodeoffset, const char *vendor); | ||||
| 
 | ||||
| /**
 | ||||
|  * Given a node in the device tree get the OCTEON OCX node number | ||||
|  * | ||||
|  * @param fdt		pointer to flat device tree | ||||
|  * @param nodeoffset	node offset to get OCX node for | ||||
|  * | ||||
|  * @return the Octeon OCX node number | ||||
|  */ | ||||
| int octeon_fdt_get_soc_node(const void *fdt, int nodeoffset); | ||||
| 
 | ||||
| /**
 | ||||
|  * Given a FDT node, check if it is compatible with a list of devices | ||||
|  * | ||||
|  * @param[in]	fdt		Flat device tree pointer | ||||
|  * @param	node_offset	Node offset in device tree | ||||
|  * @param[in]	strlist		Array of FDT devices to check, end must be NULL | ||||
|  * | ||||
|  * @return	0 if at least one device is compatible, 1 if not compatible. | ||||
|  */ | ||||
| int octeon_fdt_node_check_compatible(const void *fdt, int node_offset, const char *const *strlist); | ||||
| /**
 | ||||
|  * Given a node offset, find the i2c bus number for that node | ||||
|  * | ||||
|  * @param[in]	fdt	Pointer to flat device tree | ||||
|  * @param	node_offset	Node offset in device tree | ||||
|  * | ||||
|  * @return	i2c bus number or -1 if error | ||||
|  */ | ||||
| int octeon_fdt_i2c_get_bus(const void *fdt, int node_offset); | ||||
| 
 | ||||
| /**
 | ||||
|  * Given an offset into the fdt, output the i2c bus and address of the device | ||||
|  * | ||||
|  * @param[in]	fdt	fdt blob pointer | ||||
|  * @param	node	offset in FDT of device | ||||
|  * @param[out]	bus	i2c bus number of device | ||||
|  * @param[out]	addr	address of device on i2c bus | ||||
|  * | ||||
|  * @return	0 for success, -1 on error | ||||
|  */ | ||||
| int octeon_fdt_get_i2c_bus_addr(const void *fdt, int node, int *bus, int *addr); | ||||
| 
 | ||||
| /**
 | ||||
|  * Reads a GPIO pin given the node of the GPIO device in the device tree and | ||||
|  * the pin number. | ||||
|  * | ||||
|  * @param[in]	fdt	fdt blob pointer | ||||
|  * @param	phandle	phandle of GPIO node | ||||
|  * @param	pin	pin number to read | ||||
|  * | ||||
|  * @return	0 = pin is low, 1 = pin is high, -1 = error | ||||
|  */ | ||||
| int octeon_fdt_read_gpio(const void *fdt, int phandle, int pin); | ||||
| 
 | ||||
| /**
 | ||||
|  * Reads a GPIO pin given the node of the GPIO device in the device tree and | ||||
|  * the pin number. | ||||
|  * | ||||
|  * @param[in]	fdt	fdt blob pointer | ||||
|  * @param	phandle	phandle of GPIO node | ||||
|  * @param	pin	pin number to read | ||||
|  * @param	val	value to write (1 = high, 0 = low) | ||||
|  * | ||||
|  * @return	0 = success, -1 = error | ||||
|  */ | ||||
| int octeon_fdt_set_gpio(const void *fdt, int phandle, int pin, int val); | ||||
| 
 | ||||
| /**
 | ||||
|  * Given the node to a MAC entry in the device tree, output the i2c bus, address | ||||
|  * and if the module is absent. | ||||
|  * | ||||
|  * @param[in]	fdt		flat device tree pointer | ||||
|  * @param	mac_node	node of Ethernet port in the FDT | ||||
|  * @param[out]	bus		i2c bus address of SFP EEPROM | ||||
|  * @param[out]	addr		i2c address of SFP EEPROM | ||||
|  * @param[out]	mod_abs		Set true if module is absent, false if present | ||||
|  * | ||||
|  * @return	0 for success, -1 if there are problems with the device tree | ||||
|  */ | ||||
| int octeon_fdt_get_sfp_eeprom(const void *fdt, int mac_node, int *bus, int *addr, bool *mod_abs); | ||||
| 
 | ||||
| /**
 | ||||
|  * Given a node to a MAC entry in the device tree, output the i2c bus, address | ||||
|  * and if the module is absent | ||||
|  * | ||||
|  * @param[in]	fdt		flat device tree pointer | ||||
|  * @param	mac_node	node of QSFP Ethernet port in FDT | ||||
|  * @param[out]	bus		i2c bus address of SFP EEPROM | ||||
|  * @param[out]	addr		i2c address of SFP eeprom | ||||
|  * @param[out]	mod_abs		Set true if module is absent, false if present | ||||
|  * | ||||
|  * @return	0 for success, -1 if there are problems with the device tree | ||||
|  */ | ||||
| int octeon_fdt_get_qsfp_eeprom(const void *fdt, int mac_node, int *bus, int *addr, bool *mod_abs); | ||||
| 
 | ||||
| /**
 | ||||
|  * Given the node of a GPIO entry output the GPIO type, i2c bus and i2c | ||||
|  * address. | ||||
|  * | ||||
|  * @param	fdt_node	node of GPIO in device tree, generally | ||||
|  *				derived from a phandle. | ||||
|  * @param[out]	type		Type of GPIO detected | ||||
|  * @param[out]	i2c_bus		For i2c GPIO expanders, the i2c bus number | ||||
|  * @param[out]	i2c_addr	For i2c GPIO expanders, the i2c address | ||||
|  * | ||||
|  * @return	0 for success, -1 for errors | ||||
|  * | ||||
|  * NOTE: It is up to the caller to determine the pin number. | ||||
|  */ | ||||
| int octeon_fdt_get_gpio_info(int fdt_node, enum octeon_gpio_type *type, int *i2c_bus, | ||||
| 			     int *i2c_addr); | ||||
| 
 | ||||
| /**
 | ||||
|  * Get the PHY data structure for the specified FDT node and output the type | ||||
|  * | ||||
|  * @param	fdt_node	FDT node of phy | ||||
|  * @param[out]	type		Type of GPIO | ||||
|  * | ||||
|  * @return	pointer to phy device or NULL if no match found. | ||||
|  */ | ||||
| struct phy_device *octeon_fdt_get_phy_gpio_info(int fdt_node, enum octeon_gpio_type *type); | ||||
| #endif /* __OCTEON_FDT_H__ */ | ||||
|  | @ -0,0 +1,68 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __OCTEON_PCI_H__ | ||||
| #define __OCTEON_PCI_H__ | ||||
| 
 | ||||
| /**
 | ||||
|  * EEPROM entry struct | ||||
|  */ | ||||
| union octeon_pcie_eeprom { | ||||
| 	u64 u64; | ||||
| 	struct octeon_data_s { | ||||
| 		/**
 | ||||
| 		 * 0x9DA1 valid entry, 0x6A5D end of table, 0xffff invalid | ||||
| 		 * access | ||||
| 		 */ | ||||
| 		u64 preamble : 16; | ||||
| u64: 1; /** Reserved */ | ||||
| 		/** Physical function number accessed by the write operation. */ | ||||
| 		u64 pf : 2; | ||||
| 		/**
 | ||||
| 		 * Specifies bit<31> of the address written by hardware. | ||||
| 		 * 1 = configuration mask register, 0 = configuration register | ||||
| 		 */ | ||||
| 		u64 cs2 : 1; | ||||
| 		/**
 | ||||
| 		 * Specifies bits<11:0> of the address written by hardware. | ||||
| 		 * Bits<30:12> of this address are all 0s. | ||||
| 		 */ | ||||
| 		u64 address : 12; | ||||
| 		u64 data : 32; | ||||
| 	} s; | ||||
| }; | ||||
| 
 | ||||
| void pci_dev_post_init(void); | ||||
| 
 | ||||
| int octeon_pci_io_readb(unsigned int reg); | ||||
| void octeon_pci_io_writeb(int value, unsigned int reg); | ||||
| int octeon_pci_io_readw(unsigned int reg); | ||||
| void octeon_pci_io_writew(int value, unsigned int reg); | ||||
| int octeon_pci_io_readl(unsigned int reg); | ||||
| void octeon_pci_io_writel(int value, unsigned int reg); | ||||
| int octeon_pci_mem1_readb(unsigned int reg); | ||||
| void octeon_pci_mem1_writeb(int value, unsigned int reg); | ||||
| int octeon_pci_mem1_readw(unsigned int reg); | ||||
| void octeon_pci_mem1_writew(int value, unsigned int reg); | ||||
| int octeon_pci_mem1_readl(unsigned int reg); | ||||
| void octeon_pci_mem1_writel(int value, unsigned int reg); | ||||
| 
 | ||||
| /* In the TLB mapped case, these also work with virtual addresses,
 | ||||
| ** and do the required virt<->phys translations as well. */ | ||||
| u32 octeon_pci_phys_to_bus(u32 phys); | ||||
| u32 octeon_pci_bus_to_phys(u32 bus); | ||||
| 
 | ||||
| /**
 | ||||
|  * Searches PCIe EEPROM for override data specified by address and pf. | ||||
|  * | ||||
|  * @param	address - PCIe config space address | ||||
|  * @param	pf	- PCIe config space pf num | ||||
|  * @param[out]	id	- override device and vendor ID | ||||
|  * | ||||
|  * @return	0 if override found, 1 if not found. | ||||
|  */ | ||||
| int octeon_find_pcie_id_override(unsigned int address, unsigned int pf, u32 *id); | ||||
| 
 | ||||
| #endif /* __OCTEON_PCI_H__ */ | ||||
|  | @ -0,0 +1,109 @@ | |||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| /*
 | ||||
|  * Copyright (C) 2020 Marvell International Ltd. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __OCTEON_QLM_H__ | ||||
| #define __OCTEON_QLM_H__ | ||||
| 
 | ||||
| /* Reference clock selector values for ref_clk_sel */ | ||||
| #define OCTEON_QLM_REF_CLK_100MHZ 0 /** 100 MHz */ | ||||
| #define OCTEON_QLM_REF_CLK_125MHZ 1 /** 125 MHz */ | ||||
| #define OCTEON_QLM_REF_CLK_156MHZ 2 /** 156.25 MHz */ | ||||
| #define OCTEON_QLM_REF_CLK_161MHZ 3 /** 161.1328125 MHz */ | ||||
| 
 | ||||
| /**
 | ||||
|  * Configure qlm/dlm speed and mode. | ||||
|  * @param qlm     The QLM or DLM to configure | ||||
|  * @param speed   The speed the QLM needs to be configured in Mhz. | ||||
|  * @param mode    The QLM to be configured as SGMII/XAUI/PCIe. | ||||
|  * @param rc      Only used for PCIe, rc = 1 for root complex mode, 0 for EP | ||||
|  *		  mode. | ||||
|  * @param pcie_mode Only used when qlm/dlm are in pcie mode. | ||||
|  * @param ref_clk_sel Reference clock to use for 70XX where: | ||||
|  *			0: 100MHz | ||||
|  *			1: 125MHz | ||||
|  *			2: 156.25MHz | ||||
|  *			3: 161.1328125MHz (CN73XX and CN78XX only) | ||||
|  * @param ref_clk_input	This selects which reference clock input to use.  For | ||||
|  *			cn70xx: | ||||
|  *				0: DLMC_REF_CLK0 | ||||
|  *				1: DLMC_REF_CLK1 | ||||
|  *				2: DLM0_REF_CLK | ||||
|  *			cn61xx: (not used) | ||||
|  *			cn78xx/cn76xx/cn73xx: | ||||
|  *				0: Internal clock (QLM[0-7]_REF_CLK) | ||||
|  *				1: QLMC_REF_CLK0 | ||||
|  *				2: QLMC_REF_CLK1 | ||||
|  * | ||||
|  * @return	Return 0 on success or -1. | ||||
|  * | ||||
|  * @note	When the 161MHz clock is used it can only be used for | ||||
|  *		XLAUI mode with a 6316 speed or XFI mode with a 103125 speed. | ||||
|  *		This rate is also only supported for CN73XX and CN78XX. | ||||
|  */ | ||||
| int octeon_configure_qlm(int qlm, int speed, int mode, int rc, int pcie_mode, int ref_clk_sel, | ||||
| 			 int ref_clk_input); | ||||
| 
 | ||||
| int octeon_configure_qlm_cn78xx(int node, int qlm, int speed, int mode, int rc, int pcie_mode, | ||||
| 				int ref_clk_sel, int ref_clk_input); | ||||
| 
 | ||||
| /**
 | ||||
|  * Some QLM speeds need to override the default tuning parameters | ||||
|  * | ||||
|  * @param node     Node to configure | ||||
|  * @param qlm      QLM to configure | ||||
|  * @param baud_mhz Desired speed in MHz | ||||
|  * @param lane     Lane the apply the tuning parameters | ||||
|  * @param tx_swing Voltage swing.  The higher the value the lower the voltage, | ||||
|  *		   the default value is 7. | ||||
|  * @param tx_pre   pre-cursor pre-emphasis | ||||
|  * @param tx_post  post-cursor pre-emphasis. | ||||
|  * @param tx_gain   Transmit gain. Range 0-7 | ||||
|  * @param tx_vboost Transmit voltage boost. Range 0-1 | ||||
|  */ | ||||
| void octeon_qlm_tune_per_lane_v3(int node, int qlm, int baud_mhz, int lane, int tx_swing, | ||||
| 				 int tx_pre, int tx_post, int tx_gain, int tx_vboost); | ||||
| 
 | ||||
| /**
 | ||||
|  * Some QLM speeds need to override the default tuning parameters | ||||
|  * | ||||
|  * @param node     Node to configure | ||||
|  * @param qlm      QLM to configure | ||||
|  * @param baud_mhz Desired speed in MHz | ||||
|  * @param tx_swing Voltage swing.  The higher the value the lower the voltage, | ||||
|  *		   the default value is 7. | ||||
|  * @param tx_premptap bits [0:3] pre-cursor pre-emphasis, bits[4:8] post-cursor | ||||
|  *		      pre-emphasis. | ||||
|  * @param tx_gain   Transmit gain. Range 0-7 | ||||
|  * @param tx_vboost Transmit voltage boost. Range 0-1 | ||||
|  */ | ||||
| void octeon_qlm_tune_v3(int node, int qlm, int baud_mhz, int tx_swing, int tx_premptap, int tx_gain, | ||||
| 			int tx_vboost); | ||||
| 
 | ||||
| /**
 | ||||
|  * Disables DFE for the specified QLM lane(s). | ||||
|  * This function should only be called for low-loss channels. | ||||
|  * | ||||
|  * @param node     Node to configure | ||||
|  * @param qlm      QLM to configure | ||||
|  * @param lane     Lane to configure, or -1 all lanes | ||||
|  * @param baud_mhz The speed the QLM needs to be configured in Mhz. | ||||
|  * @param mode     The QLM to be configured as SGMII/XAUI/PCIe. | ||||
|  */ | ||||
| void octeon_qlm_dfe_disable(int node, int qlm, int lane, int baud_mhz, int mode); | ||||
| 
 | ||||
| /**
 | ||||
|  * Some QLMs need to override the default pre-ctle for low loss channels. | ||||
|  * | ||||
|  * @param node     Node to configure | ||||
|  * @param qlm      QLM to configure | ||||
|  * @param pre_ctle pre-ctle settings for low loss channels | ||||
|  */ | ||||
| void octeon_qlm_set_channel_v3(int node, int qlm, int pre_ctle); | ||||
| 
 | ||||
| void octeon_init_qlm(int node); | ||||
| 
 | ||||
| int octeon_mcu_probe(int node); | ||||
| 
 | ||||
| #endif /* __OCTEON_QLM_H__ */ | ||||
		Loading…
	
		Reference in New Issue