1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * AMD SFH Client Layer 4 * Copyright 2020 Advanced Micro Devices, Inc. 5 * Authors: Nehal Bakulchandra Shah <Nehal-Bakulchandra.Shah@amd.com> 6 * Sandeep Singh <Sandeep.singh@amd.com> 7 */ 8 9 #include <linux/dma-mapping.h> 10 #include <linux/hid.h> 11 #include <linux/list.h> 12 #include <linux/slab.h> 13 #include <linux/workqueue.h> 14 #include <linux/errno.h> 15 16 #include "hid_descriptor/amd_sfh_hid_desc.h" 17 #include "amd_sfh_pcie.h" 18 #include "amd_sfh_hid.h" 19 20 21 struct request_list { 22 struct hid_device *hid; 23 struct list_head list; 24 u8 report_id; 25 u8 sensor_idx; 26 u8 report_type; 27 u8 current_index; 28 }; 29 30 static struct request_list req_list; 31 32 void amd_sfh_set_report(struct hid_device *hid, int report_id, 33 int report_type) 34 { 35 struct amdtp_hid_data *hid_data = hid->driver_data; 36 struct amdtp_cl_data *cli_data = hid_data->cli_data; 37 int i; 38 39 for (i = 0; i < cli_data->num_hid_devices; i++) { 40 if (cli_data->hid_sensor_hubs[i] == hid) { 41 cli_data->cur_hid_dev = i; 42 break; 43 } 44 } 45 amdtp_hid_wakeup(hid); 46 } 47 48 int amd_sfh_get_report(struct hid_device *hid, int report_id, int report_type) 49 { 50 struct amdtp_hid_data *hid_data = hid->driver_data; 51 struct amdtp_cl_data *cli_data = hid_data->cli_data; 52 int i; 53 54 for (i = 0; i < cli_data->num_hid_devices; i++) { 55 if (cli_data->hid_sensor_hubs[i] == hid) { 56 struct request_list *new = kzalloc(sizeof(*new), GFP_KERNEL); 57 58 if (!new) 59 return -ENOMEM; 60 61 new->current_index = i; 62 new->sensor_idx = cli_data->sensor_idx[i]; 63 new->hid = hid; 64 new->report_type = report_type; 65 new->report_id = report_id; 66 cli_data->report_id[i] = report_id; 67 cli_data->request_done[i] = false; 68 list_add(&new->list, &req_list.list); 69 break; 70 } 71 } 72 schedule_delayed_work(&cli_data->work, 0); 73 return 0; 74 } 75 76 static void amd_sfh_work(struct work_struct *work) 77 { 78 struct amdtp_cl_data *cli_data = container_of(work, struct amdtp_cl_data, work.work); 79 struct amd_input_data *in_data = cli_data->in_data; 80 struct request_list *req_node; 81 u8 current_index, sensor_index; 82 u8 report_id, node_type; 83 u8 report_size = 0; 84 85 req_node = list_last_entry(&req_list.list, struct request_list, list); 86 list_del(&req_node->list); 87 current_index = req_node->current_index; 88 sensor_index = req_node->sensor_idx; 89 report_id = req_node->report_id; 90 node_type = req_node->report_type; 91 kfree(req_node); 92 93 if (node_type == HID_FEATURE_REPORT) { 94 report_size = get_feature_report(sensor_index, report_id, 95 cli_data->feature_report[current_index]); 96 if (report_size) 97 hid_input_report(cli_data->hid_sensor_hubs[current_index], 98 cli_data->report_type[current_index], 99 cli_data->feature_report[current_index], report_size, 0); 100 else 101 pr_err("AMDSFH: Invalid report size\n"); 102 103 } else if (node_type == HID_INPUT_REPORT) { 104 report_size = get_input_report(current_index, sensor_index, report_id, in_data); 105 if (report_size) 106 hid_input_report(cli_data->hid_sensor_hubs[current_index], 107 cli_data->report_type[current_index], 108 in_data->input_report[current_index], report_size, 0); 109 else 110 pr_err("AMDSFH: Invalid report size\n"); 111 } 112 cli_data->cur_hid_dev = current_index; 113 cli_data->sensor_requested_cnt[current_index] = 0; 114 amdtp_hid_wakeup(cli_data->hid_sensor_hubs[current_index]); 115 } 116 117 static void amd_sfh_work_buffer(struct work_struct *work) 118 { 119 struct amdtp_cl_data *cli_data = container_of(work, struct amdtp_cl_data, work_buffer.work); 120 struct amd_input_data *in_data = cli_data->in_data; 121 u8 report_size; 122 int i; 123 124 for (i = 0; i < cli_data->num_hid_devices; i++) { 125 if (cli_data->sensor_sts[i] == SENSOR_ENABLED) { 126 report_size = get_input_report 127 (i, cli_data->sensor_idx[i], cli_data->report_id[i], in_data); 128 hid_input_report(cli_data->hid_sensor_hubs[i], HID_INPUT_REPORT, 129 in_data->input_report[i], report_size, 0); 130 } 131 } 132 schedule_delayed_work(&cli_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP)); 133 } 134 135 u32 amd_sfh_wait_for_response(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts) 136 { 137 if (mp2->mp2_ops->response) 138 sensor_sts = mp2->mp2_ops->response(mp2, sid, sensor_sts); 139 140 return sensor_sts; 141 } 142 143 int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata) 144 { 145 struct amd_input_data *in_data = &privdata->in_data; 146 struct amdtp_cl_data *cl_data = privdata->cl_data; 147 struct amd_mp2_sensor_info info; 148 struct device *dev; 149 u32 feature_report_size; 150 u32 input_report_size; 151 int rc, i, status; 152 u8 cl_idx; 153 154 dev = &privdata->pdev->dev; 155 156 cl_data->num_hid_devices = amd_mp2_get_sensor_num(privdata, &cl_data->sensor_idx[0]); 157 158 INIT_DELAYED_WORK(&cl_data->work, amd_sfh_work); 159 INIT_DELAYED_WORK(&cl_data->work_buffer, amd_sfh_work_buffer); 160 INIT_LIST_HEAD(&req_list.list); 161 cl_data->in_data = in_data; 162 163 for (i = 0; i < cl_data->num_hid_devices; i++) { 164 in_data->sensor_virt_addr[i] = dma_alloc_coherent(dev, sizeof(int) * 8, 165 &cl_data->sensor_dma_addr[i], 166 GFP_KERNEL); 167 cl_data->sensor_sts[i] = SENSOR_DISABLED; 168 cl_data->sensor_requested_cnt[i] = 0; 169 cl_data->cur_hid_dev = i; 170 cl_idx = cl_data->sensor_idx[i]; 171 cl_data->report_descr_sz[i] = get_descr_sz(cl_idx, descr_size); 172 if (!cl_data->report_descr_sz[i]) { 173 rc = -EINVAL; 174 goto cleanup; 175 } 176 feature_report_size = get_descr_sz(cl_idx, feature_size); 177 if (!feature_report_size) { 178 rc = -EINVAL; 179 goto cleanup; 180 } 181 input_report_size = get_descr_sz(cl_idx, input_size); 182 if (!input_report_size) { 183 rc = -EINVAL; 184 goto cleanup; 185 } 186 cl_data->feature_report[i] = devm_kzalloc(dev, feature_report_size, GFP_KERNEL); 187 if (!cl_data->feature_report[i]) { 188 rc = -ENOMEM; 189 goto cleanup; 190 } 191 in_data->input_report[i] = devm_kzalloc(dev, input_report_size, GFP_KERNEL); 192 if (!in_data->input_report[i]) { 193 rc = -ENOMEM; 194 goto cleanup; 195 } 196 info.period = AMD_SFH_IDLE_LOOP; 197 info.sensor_idx = cl_idx; 198 info.dma_address = cl_data->sensor_dma_addr[i]; 199 200 cl_data->report_descr[i] = 201 devm_kzalloc(dev, cl_data->report_descr_sz[i], GFP_KERNEL); 202 if (!cl_data->report_descr[i]) { 203 rc = -ENOMEM; 204 goto cleanup; 205 } 206 rc = get_report_descriptor(cl_idx, cl_data->report_descr[i]); 207 if (rc) 208 return rc; 209 privdata->mp2_ops->start(privdata, info); 210 status = amd_sfh_wait_for_response 211 (privdata, cl_data->sensor_idx[i], SENSOR_ENABLED); 212 if (status == SENSOR_ENABLED) { 213 cl_data->sensor_sts[i] = SENSOR_ENABLED; 214 rc = amdtp_hid_probe(cl_data->cur_hid_dev, cl_data); 215 if (rc) { 216 privdata->mp2_ops->stop(privdata, cl_data->sensor_idx[i]); 217 status = amd_sfh_wait_for_response 218 (privdata, cl_data->sensor_idx[i], SENSOR_DISABLED); 219 if (status != SENSOR_ENABLED) 220 cl_data->sensor_sts[i] = SENSOR_DISABLED; 221 dev_dbg(dev, "sid 0x%x status 0x%x\n", 222 cl_data->sensor_idx[i], cl_data->sensor_sts[i]); 223 goto cleanup; 224 } 225 } 226 dev_dbg(dev, "sid 0x%x status 0x%x\n", 227 cl_data->sensor_idx[i], cl_data->sensor_sts[i]); 228 } 229 schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP)); 230 return 0; 231 232 cleanup: 233 for (i = 0; i < cl_data->num_hid_devices; i++) { 234 if (in_data->sensor_virt_addr[i]) { 235 dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int), 236 in_data->sensor_virt_addr[i], 237 cl_data->sensor_dma_addr[i]); 238 } 239 devm_kfree(dev, cl_data->feature_report[i]); 240 devm_kfree(dev, in_data->input_report[i]); 241 devm_kfree(dev, cl_data->report_descr[i]); 242 } 243 return rc; 244 } 245 246 int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata) 247 { 248 struct amdtp_cl_data *cl_data = privdata->cl_data; 249 struct amd_input_data *in_data = cl_data->in_data; 250 int i, status; 251 252 for (i = 0; i < cl_data->num_hid_devices; i++) { 253 if (cl_data->sensor_sts[i] == SENSOR_ENABLED) { 254 privdata->mp2_ops->stop(privdata, cl_data->sensor_idx[i]); 255 status = amd_sfh_wait_for_response 256 (privdata, cl_data->sensor_idx[i], SENSOR_DISABLED); 257 if (status != SENSOR_ENABLED) 258 cl_data->sensor_sts[i] = SENSOR_DISABLED; 259 dev_dbg(&privdata->pdev->dev, "stopping sid 0x%x status 0x%x\n", 260 cl_data->sensor_idx[i], cl_data->sensor_sts[i]); 261 } 262 } 263 264 cancel_delayed_work_sync(&cl_data->work); 265 cancel_delayed_work_sync(&cl_data->work_buffer); 266 amdtp_hid_remove(cl_data); 267 268 for (i = 0; i < cl_data->num_hid_devices; i++) { 269 if (in_data->sensor_virt_addr[i]) { 270 dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int), 271 in_data->sensor_virt_addr[i], 272 cl_data->sensor_dma_addr[i]); 273 } 274 } 275 return 0; 276 } 277