1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * System Control and Management Interface (SCMI) Clock Protocol 4 * 5 * Copyright (C) 2018-2021 ARM Ltd. 6 */ 7 8 #include <linux/module.h> 9 #include <linux/sort.h> 10 11 #include "common.h" 12 13 enum scmi_clock_protocol_cmd { 14 CLOCK_ATTRIBUTES = 0x3, 15 CLOCK_DESCRIBE_RATES = 0x4, 16 CLOCK_RATE_SET = 0x5, 17 CLOCK_RATE_GET = 0x6, 18 CLOCK_CONFIG_SET = 0x7, 19 }; 20 21 struct scmi_msg_resp_clock_protocol_attributes { 22 __le16 num_clocks; 23 u8 max_async_req; 24 u8 reserved; 25 }; 26 27 struct scmi_msg_resp_clock_attributes { 28 __le32 attributes; 29 #define CLOCK_ENABLE BIT(0) 30 u8 name[SCMI_MAX_STR_SIZE]; 31 }; 32 33 struct scmi_clock_set_config { 34 __le32 id; 35 __le32 attributes; 36 }; 37 38 struct scmi_msg_clock_describe_rates { 39 __le32 id; 40 __le32 rate_index; 41 }; 42 43 struct scmi_msg_resp_clock_describe_rates { 44 __le32 num_rates_flags; 45 #define NUM_RETURNED(x) ((x) & 0xfff) 46 #define RATE_DISCRETE(x) !((x) & BIT(12)) 47 #define NUM_REMAINING(x) ((x) >> 16) 48 struct { 49 __le32 value_low; 50 __le32 value_high; 51 } rate[0]; 52 #define RATE_TO_U64(X) \ 53 ({ \ 54 typeof(X) x = (X); \ 55 le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \ 56 }) 57 }; 58 59 struct scmi_clock_set_rate { 60 __le32 flags; 61 #define CLOCK_SET_ASYNC BIT(0) 62 #define CLOCK_SET_IGNORE_RESP BIT(1) 63 #define CLOCK_SET_ROUND_UP BIT(2) 64 #define CLOCK_SET_ROUND_AUTO BIT(3) 65 __le32 id; 66 __le32 value_low; 67 __le32 value_high; 68 }; 69 70 struct clock_info { 71 u32 version; 72 int num_clocks; 73 int max_async_req; 74 atomic_t cur_async_req; 75 struct scmi_clock_info *clk; 76 }; 77 78 static int 79 scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph, 80 struct clock_info *ci) 81 { 82 int ret; 83 struct scmi_xfer *t; 84 struct scmi_msg_resp_clock_protocol_attributes *attr; 85 86 ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 87 0, sizeof(*attr), &t); 88 if (ret) 89 return ret; 90 91 attr = t->rx.buf; 92 93 ret = ph->xops->do_xfer(ph, t); 94 if (!ret) { 95 ci->num_clocks = le16_to_cpu(attr->num_clocks); 96 ci->max_async_req = attr->max_async_req; 97 } 98 99 ph->xops->xfer_put(ph, t); 100 return ret; 101 } 102 103 static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph, 104 u32 clk_id, struct scmi_clock_info *clk) 105 { 106 int ret; 107 struct scmi_xfer *t; 108 struct scmi_msg_resp_clock_attributes *attr; 109 110 ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES, 111 sizeof(clk_id), sizeof(*attr), &t); 112 if (ret) 113 return ret; 114 115 put_unaligned_le32(clk_id, t->tx.buf); 116 attr = t->rx.buf; 117 118 ret = ph->xops->do_xfer(ph, t); 119 if (!ret) 120 strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE); 121 else 122 clk->name[0] = '\0'; 123 124 ph->xops->xfer_put(ph, t); 125 return ret; 126 } 127 128 static int rate_cmp_func(const void *_r1, const void *_r2) 129 { 130 const u64 *r1 = _r1, *r2 = _r2; 131 132 if (*r1 < *r2) 133 return -1; 134 else if (*r1 == *r2) 135 return 0; 136 else 137 return 1; 138 } 139 140 static int 141 scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id, 142 struct scmi_clock_info *clk) 143 { 144 u64 *rate = NULL; 145 int ret, cnt; 146 bool rate_discrete = false; 147 u32 tot_rate_cnt = 0, rates_flag; 148 u16 num_returned, num_remaining; 149 struct scmi_xfer *t; 150 struct scmi_msg_clock_describe_rates *clk_desc; 151 struct scmi_msg_resp_clock_describe_rates *rlist; 152 153 ret = ph->xops->xfer_get_init(ph, CLOCK_DESCRIBE_RATES, 154 sizeof(*clk_desc), 0, &t); 155 if (ret) 156 return ret; 157 158 clk_desc = t->tx.buf; 159 rlist = t->rx.buf; 160 161 do { 162 clk_desc->id = cpu_to_le32(clk_id); 163 /* Set the number of rates to be skipped/already read */ 164 clk_desc->rate_index = cpu_to_le32(tot_rate_cnt); 165 166 ret = ph->xops->do_xfer(ph, t); 167 if (ret) 168 goto err; 169 170 rates_flag = le32_to_cpu(rlist->num_rates_flags); 171 num_remaining = NUM_REMAINING(rates_flag); 172 rate_discrete = RATE_DISCRETE(rates_flag); 173 num_returned = NUM_RETURNED(rates_flag); 174 175 if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) { 176 dev_err(ph->dev, "No. of rates > MAX_NUM_RATES"); 177 break; 178 } 179 180 if (!rate_discrete) { 181 clk->range.min_rate = RATE_TO_U64(rlist->rate[0]); 182 clk->range.max_rate = RATE_TO_U64(rlist->rate[1]); 183 clk->range.step_size = RATE_TO_U64(rlist->rate[2]); 184 dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n", 185 clk->range.min_rate, clk->range.max_rate, 186 clk->range.step_size); 187 break; 188 } 189 190 rate = &clk->list.rates[tot_rate_cnt]; 191 for (cnt = 0; cnt < num_returned; cnt++, rate++) { 192 *rate = RATE_TO_U64(rlist->rate[cnt]); 193 dev_dbg(ph->dev, "Rate %llu Hz\n", *rate); 194 } 195 196 tot_rate_cnt += num_returned; 197 198 ph->xops->reset_rx_to_maxsz(ph, t); 199 /* 200 * check for both returned and remaining to avoid infinite 201 * loop due to buggy firmware 202 */ 203 } while (num_returned && num_remaining); 204 205 if (rate_discrete && rate) { 206 clk->list.num_rates = tot_rate_cnt; 207 sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL); 208 } 209 210 clk->rate_discrete = rate_discrete; 211 212 err: 213 ph->xops->xfer_put(ph, t); 214 return ret; 215 } 216 217 static int 218 scmi_clock_rate_get(const struct scmi_protocol_handle *ph, 219 u32 clk_id, u64 *value) 220 { 221 int ret; 222 struct scmi_xfer *t; 223 224 ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET, 225 sizeof(__le32), sizeof(u64), &t); 226 if (ret) 227 return ret; 228 229 put_unaligned_le32(clk_id, t->tx.buf); 230 231 ret = ph->xops->do_xfer(ph, t); 232 if (!ret) 233 *value = get_unaligned_le64(t->rx.buf); 234 235 ph->xops->xfer_put(ph, t); 236 return ret; 237 } 238 239 static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph, 240 u32 clk_id, u64 rate) 241 { 242 int ret; 243 u32 flags = 0; 244 struct scmi_xfer *t; 245 struct scmi_clock_set_rate *cfg; 246 struct clock_info *ci = ph->get_priv(ph); 247 248 ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t); 249 if (ret) 250 return ret; 251 252 if (ci->max_async_req && 253 atomic_inc_return(&ci->cur_async_req) < ci->max_async_req) 254 flags |= CLOCK_SET_ASYNC; 255 256 cfg = t->tx.buf; 257 cfg->flags = cpu_to_le32(flags); 258 cfg->id = cpu_to_le32(clk_id); 259 cfg->value_low = cpu_to_le32(rate & 0xffffffff); 260 cfg->value_high = cpu_to_le32(rate >> 32); 261 262 if (flags & CLOCK_SET_ASYNC) 263 ret = ph->xops->do_xfer_with_response(ph, t); 264 else 265 ret = ph->xops->do_xfer(ph, t); 266 267 if (ci->max_async_req) 268 atomic_dec(&ci->cur_async_req); 269 270 ph->xops->xfer_put(ph, t); 271 return ret; 272 } 273 274 static int 275 scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id, 276 u32 config) 277 { 278 int ret; 279 struct scmi_xfer *t; 280 struct scmi_clock_set_config *cfg; 281 282 ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET, 283 sizeof(*cfg), 0, &t); 284 if (ret) 285 return ret; 286 287 cfg = t->tx.buf; 288 cfg->id = cpu_to_le32(clk_id); 289 cfg->attributes = cpu_to_le32(config); 290 291 ret = ph->xops->do_xfer(ph, t); 292 293 ph->xops->xfer_put(ph, t); 294 return ret; 295 } 296 297 static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id) 298 { 299 return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE); 300 } 301 302 static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id) 303 { 304 return scmi_clock_config_set(ph, clk_id, 0); 305 } 306 307 static int scmi_clock_count_get(const struct scmi_protocol_handle *ph) 308 { 309 struct clock_info *ci = ph->get_priv(ph); 310 311 return ci->num_clocks; 312 } 313 314 static const struct scmi_clock_info * 315 scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id) 316 { 317 struct clock_info *ci = ph->get_priv(ph); 318 struct scmi_clock_info *clk = ci->clk + clk_id; 319 320 if (!clk->name[0]) 321 return NULL; 322 323 return clk; 324 } 325 326 static const struct scmi_clk_proto_ops clk_proto_ops = { 327 .count_get = scmi_clock_count_get, 328 .info_get = scmi_clock_info_get, 329 .rate_get = scmi_clock_rate_get, 330 .rate_set = scmi_clock_rate_set, 331 .enable = scmi_clock_enable, 332 .disable = scmi_clock_disable, 333 }; 334 335 static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph) 336 { 337 u32 version; 338 int clkid, ret; 339 struct clock_info *cinfo; 340 341 ph->xops->version_get(ph, &version); 342 343 dev_dbg(ph->dev, "Clock Version %d.%d\n", 344 PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); 345 346 cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL); 347 if (!cinfo) 348 return -ENOMEM; 349 350 scmi_clock_protocol_attributes_get(ph, cinfo); 351 352 cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks, 353 sizeof(*cinfo->clk), GFP_KERNEL); 354 if (!cinfo->clk) 355 return -ENOMEM; 356 357 for (clkid = 0; clkid < cinfo->num_clocks; clkid++) { 358 struct scmi_clock_info *clk = cinfo->clk + clkid; 359 360 ret = scmi_clock_attributes_get(ph, clkid, clk); 361 if (!ret) 362 scmi_clock_describe_rates_get(ph, clkid, clk); 363 } 364 365 cinfo->version = version; 366 return ph->set_priv(ph, cinfo); 367 } 368 369 static const struct scmi_protocol scmi_clock = { 370 .id = SCMI_PROTOCOL_CLOCK, 371 .owner = THIS_MODULE, 372 .instance_init = &scmi_clock_protocol_init, 373 .ops = &clk_proto_ops, 374 }; 375 376 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock) 377