1*3b575260SM Chetan Kumar // SPDX-License-Identifier: GPL-2.0-only
2*3b575260SM Chetan Kumar /*
3*3b575260SM Chetan Kumar  * Copyright (C) 2020-21 Intel Corporation.
4*3b575260SM Chetan Kumar  */
5*3b575260SM Chetan Kumar 
6*3b575260SM Chetan Kumar #include "iosm_ipc_imem.h"
7*3b575260SM Chetan Kumar #include "iosm_ipc_task_queue.h"
8*3b575260SM Chetan Kumar 
9*3b575260SM Chetan Kumar /* Actual tasklet function, will be called whenever tasklet is scheduled.
10*3b575260SM Chetan Kumar  * Calls event handler involves callback for each element in the message queue
11*3b575260SM Chetan Kumar  */
ipc_task_queue_handler(unsigned long data)12*3b575260SM Chetan Kumar static void ipc_task_queue_handler(unsigned long data)
13*3b575260SM Chetan Kumar {
14*3b575260SM Chetan Kumar 	struct ipc_task_queue *ipc_task = (struct ipc_task_queue *)data;
15*3b575260SM Chetan Kumar 	unsigned int q_rpos = ipc_task->q_rpos;
16*3b575260SM Chetan Kumar 
17*3b575260SM Chetan Kumar 	/* Loop over the input queue contents. */
18*3b575260SM Chetan Kumar 	while (q_rpos != ipc_task->q_wpos) {
19*3b575260SM Chetan Kumar 		/* Get the current first queue element. */
20*3b575260SM Chetan Kumar 		struct ipc_task_queue_args *args = &ipc_task->args[q_rpos];
21*3b575260SM Chetan Kumar 
22*3b575260SM Chetan Kumar 		/* Process the input message. */
23*3b575260SM Chetan Kumar 		if (args->func)
24*3b575260SM Chetan Kumar 			args->response = args->func(args->ipc_imem, args->arg,
25*3b575260SM Chetan Kumar 						    args->msg, args->size);
26*3b575260SM Chetan Kumar 
27*3b575260SM Chetan Kumar 		/* Signal completion for synchronous calls */
28*3b575260SM Chetan Kumar 		if (args->completion)
29*3b575260SM Chetan Kumar 			complete(args->completion);
30*3b575260SM Chetan Kumar 
31*3b575260SM Chetan Kumar 		/* Free message if copy was allocated. */
32*3b575260SM Chetan Kumar 		if (args->is_copy)
33*3b575260SM Chetan Kumar 			kfree(args->msg);
34*3b575260SM Chetan Kumar 
35*3b575260SM Chetan Kumar 		/* Set invalid queue element. Technically
36*3b575260SM Chetan Kumar 		 * spin_lock_irqsave is not required here as
37*3b575260SM Chetan Kumar 		 * the array element has been processed already
38*3b575260SM Chetan Kumar 		 * so we can assume that immediately after processing
39*3b575260SM Chetan Kumar 		 * ipc_task element, queue will not rotate again to
40*3b575260SM Chetan Kumar 		 * ipc_task same element within such short time.
41*3b575260SM Chetan Kumar 		 */
42*3b575260SM Chetan Kumar 		args->completion = NULL;
43*3b575260SM Chetan Kumar 		args->func = NULL;
44*3b575260SM Chetan Kumar 		args->msg = NULL;
45*3b575260SM Chetan Kumar 		args->size = 0;
46*3b575260SM Chetan Kumar 		args->is_copy = false;
47*3b575260SM Chetan Kumar 
48*3b575260SM Chetan Kumar 		/* calculate the new read ptr and update the volatile read
49*3b575260SM Chetan Kumar 		 * ptr
50*3b575260SM Chetan Kumar 		 */
51*3b575260SM Chetan Kumar 		q_rpos = (q_rpos + 1) % IPC_THREAD_QUEUE_SIZE;
52*3b575260SM Chetan Kumar 		ipc_task->q_rpos = q_rpos;
53*3b575260SM Chetan Kumar 	}
54*3b575260SM Chetan Kumar }
55*3b575260SM Chetan Kumar 
56*3b575260SM Chetan Kumar /* Free memory alloc and trigger completions left in the queue during dealloc */
ipc_task_queue_cleanup(struct ipc_task_queue * ipc_task)57*3b575260SM Chetan Kumar static void ipc_task_queue_cleanup(struct ipc_task_queue *ipc_task)
58*3b575260SM Chetan Kumar {
59*3b575260SM Chetan Kumar 	unsigned int q_rpos = ipc_task->q_rpos;
60*3b575260SM Chetan Kumar 
61*3b575260SM Chetan Kumar 	while (q_rpos != ipc_task->q_wpos) {
62*3b575260SM Chetan Kumar 		struct ipc_task_queue_args *args = &ipc_task->args[q_rpos];
63*3b575260SM Chetan Kumar 
64*3b575260SM Chetan Kumar 		if (args->completion)
65*3b575260SM Chetan Kumar 			complete(args->completion);
66*3b575260SM Chetan Kumar 
67*3b575260SM Chetan Kumar 		if (args->is_copy)
68*3b575260SM Chetan Kumar 			kfree(args->msg);
69*3b575260SM Chetan Kumar 
70*3b575260SM Chetan Kumar 		q_rpos = (q_rpos + 1) % IPC_THREAD_QUEUE_SIZE;
71*3b575260SM Chetan Kumar 		ipc_task->q_rpos = q_rpos;
72*3b575260SM Chetan Kumar 	}
73*3b575260SM Chetan Kumar }
74*3b575260SM Chetan Kumar 
75*3b575260SM Chetan Kumar /* Add a message to the queue and trigger the ipc_task. */
76*3b575260SM Chetan Kumar static int
ipc_task_queue_add_task(struct iosm_imem * ipc_imem,int arg,void * msg,int (* func)(struct iosm_imem * ipc_imem,int arg,void * msg,size_t size),size_t size,bool is_copy,bool wait)77*3b575260SM Chetan Kumar ipc_task_queue_add_task(struct iosm_imem *ipc_imem,
78*3b575260SM Chetan Kumar 			int arg, void *msg,
79*3b575260SM Chetan Kumar 			int (*func)(struct iosm_imem *ipc_imem, int arg,
80*3b575260SM Chetan Kumar 				    void *msg, size_t size),
81*3b575260SM Chetan Kumar 			size_t size, bool is_copy, bool wait)
82*3b575260SM Chetan Kumar {
83*3b575260SM Chetan Kumar 	struct tasklet_struct *ipc_tasklet = ipc_imem->ipc_task->ipc_tasklet;
84*3b575260SM Chetan Kumar 	struct ipc_task_queue *ipc_task = &ipc_imem->ipc_task->ipc_queue;
85*3b575260SM Chetan Kumar 	struct completion completion;
86*3b575260SM Chetan Kumar 	unsigned int pos, nextpos;
87*3b575260SM Chetan Kumar 	unsigned long flags;
88*3b575260SM Chetan Kumar 	int result = -EIO;
89*3b575260SM Chetan Kumar 
90*3b575260SM Chetan Kumar 	init_completion(&completion);
91*3b575260SM Chetan Kumar 
92*3b575260SM Chetan Kumar 	/* tasklet send may be called from both interrupt or thread
93*3b575260SM Chetan Kumar 	 * context, therefore protect queue operation by spinlock
94*3b575260SM Chetan Kumar 	 */
95*3b575260SM Chetan Kumar 	spin_lock_irqsave(&ipc_task->q_lock, flags);
96*3b575260SM Chetan Kumar 
97*3b575260SM Chetan Kumar 	pos = ipc_task->q_wpos;
98*3b575260SM Chetan Kumar 	nextpos = (pos + 1) % IPC_THREAD_QUEUE_SIZE;
99*3b575260SM Chetan Kumar 
100*3b575260SM Chetan Kumar 	/* Get next queue position. */
101*3b575260SM Chetan Kumar 	if (nextpos != ipc_task->q_rpos) {
102*3b575260SM Chetan Kumar 		/* Get the reference to the queue element and save the passed
103*3b575260SM Chetan Kumar 		 * values.
104*3b575260SM Chetan Kumar 		 */
105*3b575260SM Chetan Kumar 		ipc_task->args[pos].arg = arg;
106*3b575260SM Chetan Kumar 		ipc_task->args[pos].msg = msg;
107*3b575260SM Chetan Kumar 		ipc_task->args[pos].func = func;
108*3b575260SM Chetan Kumar 		ipc_task->args[pos].ipc_imem = ipc_imem;
109*3b575260SM Chetan Kumar 		ipc_task->args[pos].size = size;
110*3b575260SM Chetan Kumar 		ipc_task->args[pos].is_copy = is_copy;
111*3b575260SM Chetan Kumar 		ipc_task->args[pos].completion = wait ? &completion : NULL;
112*3b575260SM Chetan Kumar 		ipc_task->args[pos].response = -1;
113*3b575260SM Chetan Kumar 
114*3b575260SM Chetan Kumar 		/* apply write barrier so that ipc_task->q_rpos elements
115*3b575260SM Chetan Kumar 		 * are updated before ipc_task->q_wpos is being updated.
116*3b575260SM Chetan Kumar 		 */
117*3b575260SM Chetan Kumar 		smp_wmb();
118*3b575260SM Chetan Kumar 
119*3b575260SM Chetan Kumar 		/* Update the status of the free queue space. */
120*3b575260SM Chetan Kumar 		ipc_task->q_wpos = nextpos;
121*3b575260SM Chetan Kumar 		result = 0;
122*3b575260SM Chetan Kumar 	}
123*3b575260SM Chetan Kumar 
124*3b575260SM Chetan Kumar 	spin_unlock_irqrestore(&ipc_task->q_lock, flags);
125*3b575260SM Chetan Kumar 
126*3b575260SM Chetan Kumar 	if (result == 0) {
127*3b575260SM Chetan Kumar 		tasklet_schedule(ipc_tasklet);
128*3b575260SM Chetan Kumar 
129*3b575260SM Chetan Kumar 		if (wait) {
130*3b575260SM Chetan Kumar 			wait_for_completion(&completion);
131*3b575260SM Chetan Kumar 			result = ipc_task->args[pos].response;
132*3b575260SM Chetan Kumar 		}
133*3b575260SM Chetan Kumar 	} else {
134*3b575260SM Chetan Kumar 		dev_err(ipc_imem->ipc_task->dev, "queue is full");
135*3b575260SM Chetan Kumar 	}
136*3b575260SM Chetan Kumar 
137*3b575260SM Chetan Kumar 	return result;
138*3b575260SM Chetan Kumar }
139*3b575260SM Chetan Kumar 
ipc_task_queue_send_task(struct iosm_imem * imem,int (* func)(struct iosm_imem * ipc_imem,int arg,void * msg,size_t size),int arg,void * msg,size_t size,bool wait)140*3b575260SM Chetan Kumar int ipc_task_queue_send_task(struct iosm_imem *imem,
141*3b575260SM Chetan Kumar 			     int (*func)(struct iosm_imem *ipc_imem, int arg,
142*3b575260SM Chetan Kumar 					 void *msg, size_t size),
143*3b575260SM Chetan Kumar 			     int arg, void *msg, size_t size, bool wait)
144*3b575260SM Chetan Kumar {
145*3b575260SM Chetan Kumar 	bool is_copy = false;
146*3b575260SM Chetan Kumar 	void *copy = msg;
147*3b575260SM Chetan Kumar 	int ret = -ENOMEM;
148*3b575260SM Chetan Kumar 
149*3b575260SM Chetan Kumar 	if (size > 0) {
150*3b575260SM Chetan Kumar 		copy = kmemdup(msg, size, GFP_ATOMIC);
151*3b575260SM Chetan Kumar 		if (!copy)
152*3b575260SM Chetan Kumar 			goto out;
153*3b575260SM Chetan Kumar 
154*3b575260SM Chetan Kumar 		is_copy = true;
155*3b575260SM Chetan Kumar 	}
156*3b575260SM Chetan Kumar 
157*3b575260SM Chetan Kumar 	ret = ipc_task_queue_add_task(imem, arg, copy, func,
158*3b575260SM Chetan Kumar 				      size, is_copy, wait);
159*3b575260SM Chetan Kumar 	if (ret < 0) {
160*3b575260SM Chetan Kumar 		dev_err(imem->ipc_task->dev,
161*3b575260SM Chetan Kumar 			"add task failed for %ps %d, %p, %zu, %d", func, arg,
162*3b575260SM Chetan Kumar 			copy, size, is_copy);
163*3b575260SM Chetan Kumar 		if (is_copy)
164*3b575260SM Chetan Kumar 			kfree(copy);
165*3b575260SM Chetan Kumar 		goto out;
166*3b575260SM Chetan Kumar 	}
167*3b575260SM Chetan Kumar 
168*3b575260SM Chetan Kumar 	ret = 0;
169*3b575260SM Chetan Kumar out:
170*3b575260SM Chetan Kumar 	return ret;
171*3b575260SM Chetan Kumar }
172*3b575260SM Chetan Kumar 
ipc_task_init(struct ipc_task * ipc_task)173*3b575260SM Chetan Kumar int ipc_task_init(struct ipc_task *ipc_task)
174*3b575260SM Chetan Kumar {
175*3b575260SM Chetan Kumar 	struct ipc_task_queue *ipc_queue = &ipc_task->ipc_queue;
176*3b575260SM Chetan Kumar 
177*3b575260SM Chetan Kumar 	ipc_task->ipc_tasklet = kzalloc(sizeof(*ipc_task->ipc_tasklet),
178*3b575260SM Chetan Kumar 					GFP_KERNEL);
179*3b575260SM Chetan Kumar 
180*3b575260SM Chetan Kumar 	if (!ipc_task->ipc_tasklet)
181*3b575260SM Chetan Kumar 		return -ENOMEM;
182*3b575260SM Chetan Kumar 
183*3b575260SM Chetan Kumar 	/* Initialize the spinlock needed to protect the message queue of the
184*3b575260SM Chetan Kumar 	 * ipc_task
185*3b575260SM Chetan Kumar 	 */
186*3b575260SM Chetan Kumar 	spin_lock_init(&ipc_queue->q_lock);
187*3b575260SM Chetan Kumar 
188*3b575260SM Chetan Kumar 	tasklet_init(ipc_task->ipc_tasklet, ipc_task_queue_handler,
189*3b575260SM Chetan Kumar 		     (unsigned long)ipc_queue);
190*3b575260SM Chetan Kumar 	return 0;
191*3b575260SM Chetan Kumar }
192*3b575260SM Chetan Kumar 
ipc_task_deinit(struct ipc_task * ipc_task)193*3b575260SM Chetan Kumar void ipc_task_deinit(struct ipc_task *ipc_task)
194*3b575260SM Chetan Kumar {
195*3b575260SM Chetan Kumar 	tasklet_kill(ipc_task->ipc_tasklet);
196*3b575260SM Chetan Kumar 
197*3b575260SM Chetan Kumar 	kfree(ipc_task->ipc_tasklet);
198*3b575260SM Chetan Kumar 	/* This will free/complete any outstanding messages,
199*3b575260SM Chetan Kumar 	 * without calling the actual handler
200*3b575260SM Chetan Kumar 	 */
201*3b575260SM Chetan Kumar 	ipc_task_queue_cleanup(&ipc_task->ipc_queue);
202*3b575260SM Chetan Kumar }
203