dma: ti: k3-udma: Switch to k3_ringacc_request_rings_pair
We only request ring pairs via K3 DMA driver, switch to use the new k3_ringacc_request_rings_pair() to simplify the code. As a good side effect, all boot stages now use exposed RING mode which avoid maintaining proxy mode for 32 bit R5 core. Signed-off-by: Vignesh Raghavendra <vigneshr@ti.com>
This commit is contained in:
		
							parent
							
								
									7be5121719
								
							
						
					
					
						commit
						ddcf5318af
					
				|  | @ -32,12 +32,6 @@ | ||||||
| #include "k3-udma-hwdef.h" | #include "k3-udma-hwdef.h" | ||||||
| #include "k3-psil-priv.h" | #include "k3-psil-priv.h" | ||||||
| 
 | 
 | ||||||
| #if BITS_PER_LONG == 64 |  | ||||||
| #define RINGACC_RING_USE_PROXY	(0) |  | ||||||
| #else |  | ||||||
| #define RINGACC_RING_USE_PROXY	(1) |  | ||||||
| #endif |  | ||||||
| 
 |  | ||||||
| #define K3_UDMA_MAX_RFLOWS 1024 | #define K3_UDMA_MAX_RFLOWS 1024 | ||||||
| 
 | 
 | ||||||
| struct udma_chan; | struct udma_chan; | ||||||
|  | @ -796,21 +790,14 @@ static int udma_alloc_tx_resources(struct udma_chan *uc) | ||||||
| 	if (ret) | 	if (ret) | ||||||
| 		return ret; | 		return ret; | ||||||
| 
 | 
 | ||||||
| 	uc->tchan->t_ring = k3_nav_ringacc_request_ring( | 	ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, uc->tchan->id, -1, | ||||||
| 				ud->ringacc, uc->tchan->id, | 						&uc->tchan->t_ring, | ||||||
| 				RINGACC_RING_USE_PROXY); | 						&uc->tchan->tc_ring); | ||||||
| 	if (!uc->tchan->t_ring) { | 	if (ret) { | ||||||
| 		ret = -EBUSY; | 		ret = -EBUSY; | ||||||
| 		goto err_tx_ring; | 		goto err_tx_ring; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	uc->tchan->tc_ring = k3_nav_ringacc_request_ring( |  | ||||||
| 				ud->ringacc, -1, RINGACC_RING_USE_PROXY); |  | ||||||
| 	if (!uc->tchan->tc_ring) { |  | ||||||
| 		ret = -EBUSY; |  | ||||||
| 		goto err_txc_ring; |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	memset(&ring_cfg, 0, sizeof(ring_cfg)); | 	memset(&ring_cfg, 0, sizeof(ring_cfg)); | ||||||
| 	ring_cfg.size = 16; | 	ring_cfg.size = 16; | ||||||
| 	ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8; | 	ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8; | ||||||
|  | @ -827,7 +814,6 @@ static int udma_alloc_tx_resources(struct udma_chan *uc) | ||||||
| err_ringcfg: | err_ringcfg: | ||||||
| 	k3_nav_ringacc_ring_free(uc->tchan->tc_ring); | 	k3_nav_ringacc_ring_free(uc->tchan->tc_ring); | ||||||
| 	uc->tchan->tc_ring = NULL; | 	uc->tchan->tc_ring = NULL; | ||||||
| err_txc_ring: |  | ||||||
| 	k3_nav_ringacc_ring_free(uc->tchan->t_ring); | 	k3_nav_ringacc_ring_free(uc->tchan->t_ring); | ||||||
| 	uc->tchan->t_ring = NULL; | 	uc->tchan->t_ring = NULL; | ||||||
| err_tx_ring: | err_tx_ring: | ||||||
|  | @ -857,6 +843,7 @@ static int udma_alloc_rx_resources(struct udma_chan *uc) | ||||||
| { | { | ||||||
| 	struct k3_nav_ring_cfg ring_cfg; | 	struct k3_nav_ring_cfg ring_cfg; | ||||||
| 	struct udma_dev *ud = uc->ud; | 	struct udma_dev *ud = uc->ud; | ||||||
|  | 	struct udma_rflow *rflow; | ||||||
| 	int fd_ring_id; | 	int fd_ring_id; | ||||||
| 	int ret; | 	int ret; | ||||||
| 
 | 
 | ||||||
|  | @ -876,40 +863,31 @@ static int udma_alloc_rx_resources(struct udma_chan *uc) | ||||||
| 
 | 
 | ||||||
| 	fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id; | 	fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id; | ||||||
| 
 | 
 | ||||||
| 	uc->rflow->fd_ring = k3_nav_ringacc_request_ring( | 	rflow = uc->rflow; | ||||||
| 				ud->ringacc, fd_ring_id, | 	ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1, | ||||||
| 				RINGACC_RING_USE_PROXY); | 						&rflow->fd_ring, &rflow->r_ring); | ||||||
| 	if (!uc->rflow->fd_ring) { | 	if (ret) { | ||||||
| 		ret = -EBUSY; | 		ret = -EBUSY; | ||||||
| 		goto err_rx_ring; | 		goto err_rx_ring; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	uc->rflow->r_ring = k3_nav_ringacc_request_ring( |  | ||||||
| 				ud->ringacc, -1, RINGACC_RING_USE_PROXY); |  | ||||||
| 	if (!uc->rflow->r_ring) { |  | ||||||
| 		ret = -EBUSY; |  | ||||||
| 		goto err_rxc_ring; |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	memset(&ring_cfg, 0, sizeof(ring_cfg)); | 	memset(&ring_cfg, 0, sizeof(ring_cfg)); | ||||||
| 	ring_cfg.size = 16; | 	ring_cfg.size = 16; | ||||||
| 	ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8; | 	ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8; | ||||||
| 	ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING; | 	ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING; | ||||||
| 
 | 
 | ||||||
| 	ret = k3_nav_ringacc_ring_cfg(uc->rflow->fd_ring, &ring_cfg); | 	ret = k3_nav_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg); | ||||||
| 	ret |= k3_nav_ringacc_ring_cfg(uc->rflow->r_ring, &ring_cfg); | 	ret |= k3_nav_ringacc_ring_cfg(rflow->r_ring, &ring_cfg); | ||||||
| 
 |  | ||||||
| 	if (ret) | 	if (ret) | ||||||
| 		goto err_ringcfg; | 		goto err_ringcfg; | ||||||
| 
 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
| 
 | 
 | ||||||
| err_ringcfg: | err_ringcfg: | ||||||
| 	k3_nav_ringacc_ring_free(uc->rflow->r_ring); | 	k3_nav_ringacc_ring_free(rflow->r_ring); | ||||||
| 	uc->rflow->r_ring = NULL; | 	rflow->r_ring = NULL; | ||||||
| err_rxc_ring: | 	k3_nav_ringacc_ring_free(rflow->fd_ring); | ||||||
| 	k3_nav_ringacc_ring_free(uc->rflow->fd_ring); | 	rflow->fd_ring = NULL; | ||||||
| 	uc->rflow->fd_ring = NULL; |  | ||||||
| err_rx_ring: | err_rx_ring: | ||||||
| 	udma_put_rflow(uc); | 	udma_put_rflow(uc); | ||||||
| err_rflow: | err_rflow: | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue