1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * AMD SFH Client Layer 4 * Copyright 2020-2021 Advanced Micro Devices, Inc. 5 * Authors: Nehal Bakulchandra Shah <Nehal-Bakulchandra.Shah@amd.com> 6 * Sandeep Singh <Sandeep.singh@amd.com> 7 * Basavaraj Natikar <Basavaraj.Natikar@amd.com> 8 */ 9 10 #include <linux/dma-mapping.h> 11 #include <linux/hid.h> 12 #include <linux/list.h> 13 #include <linux/slab.h> 14 #include <linux/workqueue.h> 15 #include <linux/errno.h> 16 17 #include "hid_descriptor/amd_sfh_hid_desc.h" 18 #include "amd_sfh_pcie.h" 19 #include "amd_sfh_hid.h" 20 21 22 struct request_list { 23 struct hid_device *hid; 24 struct list_head list; 25 u8 report_id; 26 u8 sensor_idx; 27 u8 report_type; 28 u8 current_index; 29 }; 30 31 static struct request_list req_list; 32 33 void amd_sfh_set_report(struct hid_device *hid, int report_id, 34 int report_type) 35 { 36 struct amdtp_hid_data *hid_data = hid->driver_data; 37 struct amdtp_cl_data *cli_data = hid_data->cli_data; 38 int i; 39 40 for (i = 0; i < cli_data->num_hid_devices; i++) { 41 if (cli_data->hid_sensor_hubs[i] == hid) { 42 cli_data->cur_hid_dev = i; 43 break; 44 } 45 } 46 amdtp_hid_wakeup(hid); 47 } 48 49 int amd_sfh_get_report(struct hid_device *hid, int report_id, int report_type) 50 { 51 struct amdtp_hid_data *hid_data = hid->driver_data; 52 struct amdtp_cl_data *cli_data = hid_data->cli_data; 53 int i; 54 55 for (i = 0; i < cli_data->num_hid_devices; i++) { 56 if (cli_data->hid_sensor_hubs[i] == hid) { 57 struct request_list *new = kzalloc(sizeof(*new), GFP_KERNEL); 58 59 if (!new) 60 return -ENOMEM; 61 62 new->current_index = i; 63 new->sensor_idx = cli_data->sensor_idx[i]; 64 new->hid = hid; 65 new->report_type = report_type; 66 new->report_id = report_id; 67 cli_data->report_id[i] = report_id; 68 cli_data->request_done[i] = false; 69 list_add(&new->list, &req_list.list); 70 break; 71 } 72 } 73 schedule_delayed_work(&cli_data->work, 0); 74 return 0; 75 } 76 77 static void amd_sfh_work(struct work_struct *work) 78 { 79 struct amdtp_cl_data *cli_data = container_of(work, struct amdtp_cl_data, work.work); 80 struct amd_input_data *in_data = cli_data->in_data; 81 struct request_list *req_node; 82 u8 current_index, sensor_index; 83 u8 report_id, node_type; 84 u8 report_size = 0; 85 86 req_node = list_last_entry(&req_list.list, struct request_list, list); 87 list_del(&req_node->list); 88 current_index = req_node->current_index; 89 sensor_index = req_node->sensor_idx; 90 report_id = req_node->report_id; 91 node_type = req_node->report_type; 92 kfree(req_node); 93 94 if (node_type == HID_FEATURE_REPORT) { 95 report_size = get_feature_report(sensor_index, report_id, 96 cli_data->feature_report[current_index]); 97 if (report_size) 98 hid_input_report(cli_data->hid_sensor_hubs[current_index], 99 cli_data->report_type[current_index], 100 cli_data->feature_report[current_index], report_size, 0); 101 else 102 pr_err("AMDSFH: Invalid report size\n"); 103 104 } else if (node_type == HID_INPUT_REPORT) { 105 report_size = get_input_report(current_index, sensor_index, report_id, in_data); 106 if (report_size) 107 hid_input_report(cli_data->hid_sensor_hubs[current_index], 108 cli_data->report_type[current_index], 109 in_data->input_report[current_index], report_size, 0); 110 else 111 pr_err("AMDSFH: Invalid report size\n"); 112 } 113 cli_data->cur_hid_dev = current_index; 114 cli_data->sensor_requested_cnt[current_index] = 0; 115 amdtp_hid_wakeup(cli_data->hid_sensor_hubs[current_index]); 116 } 117 118 static void amd_sfh_work_buffer(struct work_struct *work) 119 { 120 struct amdtp_cl_data *cli_data = container_of(work, struct amdtp_cl_data, work_buffer.work); 121 struct amd_input_data *in_data = cli_data->in_data; 122 u8 report_size; 123 int i; 124 125 for (i = 0; i < cli_data->num_hid_devices; i++) { 126 if (cli_data->sensor_sts[i] == SENSOR_ENABLED) { 127 report_size = get_input_report 128 (i, cli_data->sensor_idx[i], cli_data->report_id[i], in_data); 129 hid_input_report(cli_data->hid_sensor_hubs[i], HID_INPUT_REPORT, 130 in_data->input_report[i], report_size, 0); 131 } 132 } 133 schedule_delayed_work(&cli_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP)); 134 } 135 136 u32 amd_sfh_wait_for_response(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts) 137 { 138 if (mp2->mp2_ops->response) 139 sensor_sts = mp2->mp2_ops->response(mp2, sid, sensor_sts); 140 141 return sensor_sts; 142 } 143 144 int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata) 145 { 146 struct amd_input_data *in_data = &privdata->in_data; 147 struct amdtp_cl_data *cl_data = privdata->cl_data; 148 struct amd_mp2_sensor_info info; 149 struct device *dev; 150 u32 feature_report_size; 151 u32 input_report_size; 152 int rc, i, status; 153 u8 cl_idx; 154 155 dev = &privdata->pdev->dev; 156 157 cl_data->num_hid_devices = amd_mp2_get_sensor_num(privdata, &cl_data->sensor_idx[0]); 158 159 INIT_DELAYED_WORK(&cl_data->work, amd_sfh_work); 160 INIT_DELAYED_WORK(&cl_data->work_buffer, amd_sfh_work_buffer); 161 INIT_LIST_HEAD(&req_list.list); 162 cl_data->in_data = in_data; 163 164 for (i = 0; i < cl_data->num_hid_devices; i++) { 165 in_data->sensor_virt_addr[i] = dma_alloc_coherent(dev, sizeof(int) * 8, 166 &cl_data->sensor_dma_addr[i], 167 GFP_KERNEL); 168 cl_data->sensor_sts[i] = SENSOR_DISABLED; 169 cl_data->sensor_requested_cnt[i] = 0; 170 cl_data->cur_hid_dev = i; 171 cl_idx = cl_data->sensor_idx[i]; 172 cl_data->report_descr_sz[i] = get_descr_sz(cl_idx, descr_size); 173 if (!cl_data->report_descr_sz[i]) { 174 rc = -EINVAL; 175 goto cleanup; 176 } 177 feature_report_size = get_descr_sz(cl_idx, feature_size); 178 if (!feature_report_size) { 179 rc = -EINVAL; 180 goto cleanup; 181 } 182 input_report_size = get_descr_sz(cl_idx, input_size); 183 if (!input_report_size) { 184 rc = -EINVAL; 185 goto cleanup; 186 } 187 cl_data->feature_report[i] = devm_kzalloc(dev, feature_report_size, GFP_KERNEL); 188 if (!cl_data->feature_report[i]) { 189 rc = -ENOMEM; 190 goto cleanup; 191 } 192 in_data->input_report[i] = devm_kzalloc(dev, input_report_size, GFP_KERNEL); 193 if (!in_data->input_report[i]) { 194 rc = -ENOMEM; 195 goto cleanup; 196 } 197 info.period = AMD_SFH_IDLE_LOOP; 198 info.sensor_idx = cl_idx; 199 info.dma_address = cl_data->sensor_dma_addr[i]; 200 201 cl_data->report_descr[i] = 202 devm_kzalloc(dev, cl_data->report_descr_sz[i], GFP_KERNEL); 203 if (!cl_data->report_descr[i]) { 204 rc = -ENOMEM; 205 goto cleanup; 206 } 207 rc = get_report_descriptor(cl_idx, cl_data->report_descr[i]); 208 if (rc) 209 return rc; 210 privdata->mp2_ops->start(privdata, info); 211 status = amd_sfh_wait_for_response 212 (privdata, cl_data->sensor_idx[i], SENSOR_ENABLED); 213 if (status == SENSOR_ENABLED) { 214 cl_data->sensor_sts[i] = SENSOR_ENABLED; 215 rc = amdtp_hid_probe(cl_data->cur_hid_dev, cl_data); 216 if (rc) { 217 privdata->mp2_ops->stop(privdata, cl_data->sensor_idx[i]); 218 status = amd_sfh_wait_for_response 219 (privdata, cl_data->sensor_idx[i], SENSOR_DISABLED); 220 if (status != SENSOR_ENABLED) 221 cl_data->sensor_sts[i] = SENSOR_DISABLED; 222 dev_dbg(dev, "sid 0x%x status 0x%x\n", 223 cl_data->sensor_idx[i], cl_data->sensor_sts[i]); 224 goto cleanup; 225 } 226 } 227 dev_dbg(dev, "sid 0x%x status 0x%x\n", 228 cl_data->sensor_idx[i], cl_data->sensor_sts[i]); 229 } 230 schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP)); 231 return 0; 232 233 cleanup: 234 for (i = 0; i < cl_data->num_hid_devices; i++) { 235 if (in_data->sensor_virt_addr[i]) { 236 dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int), 237 in_data->sensor_virt_addr[i], 238 cl_data->sensor_dma_addr[i]); 239 } 240 devm_kfree(dev, cl_data->feature_report[i]); 241 devm_kfree(dev, in_data->input_report[i]); 242 devm_kfree(dev, cl_data->report_descr[i]); 243 } 244 return rc; 245 } 246 247 int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata) 248 { 249 struct amdtp_cl_data *cl_data = privdata->cl_data; 250 struct amd_input_data *in_data = cl_data->in_data; 251 int i, status; 252 253 for (i = 0; i < cl_data->num_hid_devices; i++) { 254 if (cl_data->sensor_sts[i] == SENSOR_ENABLED) { 255 privdata->mp2_ops->stop(privdata, cl_data->sensor_idx[i]); 256 status = amd_sfh_wait_for_response 257 (privdata, cl_data->sensor_idx[i], SENSOR_DISABLED); 258 if (status != SENSOR_ENABLED) 259 cl_data->sensor_sts[i] = SENSOR_DISABLED; 260 dev_dbg(&privdata->pdev->dev, "stopping sid 0x%x status 0x%x\n", 261 cl_data->sensor_idx[i], cl_data->sensor_sts[i]); 262 } 263 } 264 265 cancel_delayed_work_sync(&cl_data->work); 266 cancel_delayed_work_sync(&cl_data->work_buffer); 267 amdtp_hid_remove(cl_data); 268 269 for (i = 0; i < cl_data->num_hid_devices; i++) { 270 if (in_data->sensor_virt_addr[i]) { 271 dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int), 272 in_data->sensor_virt_addr[i], 273 cl_data->sensor_dma_addr[i]); 274 } 275 } 276 return 0; 277 } 278