1 /*******************************************************************************
2   Header File to describe Normal/enhanced descriptor functions used for RING
3   and CHAINED modes.
4 
5   Copyright(C) 2011  STMicroelectronics Ltd
6 
7   It defines all the functions used to handle the normal/enhanced
8   descriptors in case of the DMA is configured to work in chained or
9   in ring mode.
10 
11   This program is free software; you can redistribute it and/or modify it
12   under the terms and conditions of the GNU General Public License,
13   version 2, as published by the Free Software Foundation.
14 
15   This program is distributed in the hope it will be useful, but WITHOUT
16   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18   more details.
19 
20   The full GNU General Public License is included in this distribution in
21   the file called "COPYING".
22 
23   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
24 *******************************************************************************/
25 
26 #ifndef __DESC_COM_H__
27 #define __DESC_COM_H__
28 
29 /* Specific functions used for Ring mode */
30 
31 /* Enhanced descriptors */
32 static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
33 {
34 	p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
35 			<< ERDES1_BUFFER2_SIZE_SHIFT)
36 		   & ERDES1_BUFFER2_SIZE_MASK);
37 
38 	if (end)
39 		p->des1 |= cpu_to_le32(ERDES1_END_RING);
40 }
41 
42 static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int end)
43 {
44 	if (end)
45 		p->des0 |= cpu_to_le32(ETDES0_END_RING);
46 	else
47 		p->des0 &= cpu_to_le32(~ETDES0_END_RING);
48 }
49 
50 static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
51 {
52 	if (unlikely(len > BUF_SIZE_4KiB)) {
53 		p->des1 |= cpu_to_le32((((len - BUF_SIZE_4KiB)
54 					<< ETDES1_BUFFER2_SIZE_SHIFT)
55 			    & ETDES1_BUFFER2_SIZE_MASK) | (BUF_SIZE_4KiB
56 			    & ETDES1_BUFFER1_SIZE_MASK));
57 	} else
58 		p->des1 |= cpu_to_le32((len & ETDES1_BUFFER1_SIZE_MASK));
59 }
60 
61 /* Normal descriptors */
62 static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
63 {
64 	p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1)
65 				<< RDES1_BUFFER2_SIZE_SHIFT)
66 		    & RDES1_BUFFER2_SIZE_MASK);
67 
68 	if (end)
69 		p->des1 |= cpu_to_le32(RDES1_END_RING);
70 }
71 
72 static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int end)
73 {
74 	if (end)
75 		p->des1 |= cpu_to_le32(TDES1_END_RING);
76 	else
77 		p->des1 &= cpu_to_le32(~TDES1_END_RING);
78 }
79 
80 static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
81 {
82 	if (unlikely(len > BUF_SIZE_2KiB)) {
83 		unsigned int buffer1 = (BUF_SIZE_2KiB - 1)
84 					& TDES1_BUFFER1_SIZE_MASK;
85 		p->des1 |= cpu_to_le32((((len - buffer1)
86 					<< TDES1_BUFFER2_SIZE_SHIFT)
87 				& TDES1_BUFFER2_SIZE_MASK) | buffer1);
88 	} else
89 		p->des1 |= cpu_to_le32((len & TDES1_BUFFER1_SIZE_MASK));
90 }
91 
92 /* Specific functions used for Chain mode */
93 
94 /* Enhanced descriptors */
95 static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p)
96 {
97 	p->des1 |= cpu_to_le32(ERDES1_SECOND_ADDRESS_CHAINED);
98 }
99 
100 static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p)
101 {
102 	p->des0 |= cpu_to_le32(ETDES0_SECOND_ADDRESS_CHAINED);
103 }
104 
105 static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
106 {
107 	p->des1 |= cpu_to_le32(len & ETDES1_BUFFER1_SIZE_MASK);
108 }
109 
110 /* Normal descriptors */
111 static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end)
112 {
113 	p->des1 |= cpu_to_le32(RDES1_SECOND_ADDRESS_CHAINED);
114 }
115 
116 static inline void ndesc_tx_set_on_chain(struct dma_desc *p)
117 {
118 	p->des1 |= cpu_to_le32(TDES1_SECOND_ADDRESS_CHAINED);
119 }
120 
121 static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
122 {
123 	p->des1 |= cpu_to_le32(len & TDES1_BUFFER1_SIZE_MASK);
124 }
125 #endif /* __DESC_COM_H__ */
126