1 /* 2 * Copyright (C) ST-Ericsson AB 2010 3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com 4 * License terms: GNU General Public License (GPL) version 2 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ 8 9 #include <linux/kernel.h> 10 #include <linux/stddef.h> 11 #include <linux/slab.h> 12 #include <linux/netdevice.h> 13 #include <net/caif/caif_layer.h> 14 #include <net/caif/cfpkt.h> 15 #include <net/caif/cfcnfg.h> 16 #include <net/caif/cfctrl.h> 17 #include <net/caif/cfmuxl.h> 18 #include <net/caif/cffrml.h> 19 #include <net/caif/cfserl.h> 20 #include <net/caif/cfsrvl.h> 21 22 #include <linux/module.h> 23 #include <asm/atomic.h> 24 25 #define MAX_PHY_LAYERS 7 26 27 #define container_obj(layr) container_of(layr, struct cfcnfg, layer) 28 29 /* Information about CAIF physical interfaces held by Config Module in order 30 * to manage physical interfaces 31 */ 32 struct cfcnfg_phyinfo { 33 /* Pointer to the layer below the MUX (framing layer) */ 34 struct cflayer *frm_layer; 35 /* Pointer to the lowest actual physical layer */ 36 struct cflayer *phy_layer; 37 /* Unique identifier of the physical interface */ 38 unsigned int id; 39 /* Preference of the physical in interface */ 40 enum cfcnfg_phy_preference pref; 41 42 /* Reference count, number of channels using the device */ 43 int phy_ref_count; 44 45 /* Information about the physical device */ 46 struct dev_info dev_info; 47 48 /* Interface index */ 49 int ifindex; 50 51 /* Use Start of frame extension */ 52 bool use_stx; 53 54 /* Use Start of frame checksum */ 55 bool use_fcs; 56 }; 57 58 struct cfcnfg { 59 struct cflayer layer; 60 struct cflayer *ctrl; 61 struct cflayer *mux; 62 u8 last_phyid; 63 struct cfcnfg_phyinfo phy_layers[MAX_PHY_LAYERS]; 64 }; 65 66 static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, 67 enum cfctrl_srv serv, u8 phyid, 68 struct cflayer *adapt_layer); 69 static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id); 70 static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id, 71 struct cflayer *adapt_layer); 72 static void cfctrl_resp_func(void); 73 static void cfctrl_enum_resp(void); 74 75 struct cfcnfg *cfcnfg_create(void) 76 { 77 struct cfcnfg *this; 78 struct cfctrl_rsp *resp; 79 /* Initiate this layer */ 80 this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC); 81 if (!this) { 82 pr_warn("Out of memory\n"); 83 return NULL; 84 } 85 this->mux = cfmuxl_create(); 86 if (!this->mux) 87 goto out_of_mem; 88 this->ctrl = cfctrl_create(); 89 if (!this->ctrl) 90 goto out_of_mem; 91 /* Initiate response functions */ 92 resp = cfctrl_get_respfuncs(this->ctrl); 93 resp->enum_rsp = cfctrl_enum_resp; 94 resp->linkerror_ind = cfctrl_resp_func; 95 resp->linkdestroy_rsp = cfcnfg_linkdestroy_rsp; 96 resp->sleep_rsp = cfctrl_resp_func; 97 resp->wake_rsp = cfctrl_resp_func; 98 resp->restart_rsp = cfctrl_resp_func; 99 resp->radioset_rsp = cfctrl_resp_func; 100 resp->linksetup_rsp = cfcnfg_linkup_rsp; 101 resp->reject_rsp = cfcnfg_reject_rsp; 102 103 this->last_phyid = 1; 104 105 cfmuxl_set_uplayer(this->mux, this->ctrl, 0); 106 layer_set_dn(this->ctrl, this->mux); 107 layer_set_up(this->ctrl, this); 108 return this; 109 out_of_mem: 110 pr_warn("Out of memory\n"); 111 kfree(this->mux); 112 kfree(this->ctrl); 113 kfree(this); 114 return NULL; 115 } 116 EXPORT_SYMBOL(cfcnfg_create); 117 118 void cfcnfg_remove(struct cfcnfg *cfg) 119 { 120 if (cfg) { 121 kfree(cfg->mux); 122 kfree(cfg->ctrl); 123 kfree(cfg); 124 } 125 } 126 127 static void cfctrl_resp_func(void) 128 { 129 } 130 131 static void cfctrl_enum_resp(void) 132 { 133 } 134 135 struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg, 136 enum cfcnfg_phy_preference phy_pref) 137 { 138 u16 i; 139 140 /* Try to match with specified preference */ 141 for (i = 1; i < MAX_PHY_LAYERS; i++) { 142 if (cnfg->phy_layers[i].id == i && 143 cnfg->phy_layers[i].pref == phy_pref && 144 cnfg->phy_layers[i].frm_layer != NULL) { 145 caif_assert(cnfg->phy_layers != NULL); 146 caif_assert(cnfg->phy_layers[i].id == i); 147 return &cnfg->phy_layers[i].dev_info; 148 } 149 } 150 /* Otherwise just return something */ 151 for (i = 1; i < MAX_PHY_LAYERS; i++) { 152 if (cnfg->phy_layers[i].id == i) { 153 caif_assert(cnfg->phy_layers != NULL); 154 caif_assert(cnfg->phy_layers[i].id == i); 155 return &cnfg->phy_layers[i].dev_info; 156 } 157 } 158 159 return NULL; 160 } 161 162 static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg, 163 u8 phyid) 164 { 165 int i; 166 /* Try to match with specified preference */ 167 for (i = 0; i < MAX_PHY_LAYERS; i++) 168 if (cnfg->phy_layers[i].frm_layer != NULL && 169 cnfg->phy_layers[i].id == phyid) 170 return &cnfg->phy_layers[i]; 171 return NULL; 172 } 173 174 175 int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi) 176 { 177 int i; 178 for (i = 0; i < MAX_PHY_LAYERS; i++) 179 if (cnfg->phy_layers[i].frm_layer != NULL && 180 cnfg->phy_layers[i].ifindex == ifi) 181 return i; 182 return -ENODEV; 183 } 184 185 int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) 186 { 187 u8 channel_id = 0; 188 int ret = 0; 189 struct cflayer *servl = NULL; 190 struct cfcnfg_phyinfo *phyinfo = NULL; 191 u8 phyid = 0; 192 193 caif_assert(adap_layer != NULL); 194 channel_id = adap_layer->id; 195 if (adap_layer->dn == NULL || channel_id == 0) { 196 pr_err("adap_layer->dn == NULL or adap_layer->id is 0\n"); 197 ret = -ENOTCONN; 198 goto end; 199 } 200 servl = cfmuxl_remove_uplayer(cnfg->mux, channel_id); 201 if (servl == NULL) { 202 pr_err("PROTOCOL ERROR - Error removing service_layer Channel_Id(%d)", 203 channel_id); 204 ret = -EINVAL; 205 goto end; 206 } 207 layer_set_up(servl, NULL); 208 ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer); 209 if (ret) 210 goto end; 211 caif_assert(channel_id == servl->id); 212 if (adap_layer->dn != NULL) { 213 phyid = cfsrvl_getphyid(adap_layer->dn); 214 215 phyinfo = cfcnfg_get_phyinfo(cnfg, phyid); 216 if (phyinfo == NULL) { 217 pr_warn("No interface to send disconnect to\n"); 218 ret = -ENODEV; 219 goto end; 220 } 221 if (phyinfo->id != phyid || 222 phyinfo->phy_layer->id != phyid || 223 phyinfo->frm_layer->id != phyid) { 224 pr_err("Inconsistency in phy registration\n"); 225 ret = -EINVAL; 226 goto end; 227 } 228 } 229 if (phyinfo != NULL && --phyinfo->phy_ref_count == 0 && 230 phyinfo->phy_layer != NULL && 231 phyinfo->phy_layer->modemcmd != NULL) { 232 phyinfo->phy_layer->modemcmd(phyinfo->phy_layer, 233 _CAIF_MODEMCMD_PHYIF_USELESS); 234 } 235 end: 236 cfsrvl_put(servl); 237 cfctrl_cancel_req(cnfg->ctrl, adap_layer); 238 if (adap_layer->ctrlcmd != NULL) 239 adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0); 240 return ret; 241 242 } 243 EXPORT_SYMBOL(cfcnfg_disconn_adapt_layer); 244 245 void cfcnfg_release_adap_layer(struct cflayer *adap_layer) 246 { 247 if (adap_layer->dn) 248 cfsrvl_put(adap_layer->dn); 249 } 250 EXPORT_SYMBOL(cfcnfg_release_adap_layer); 251 252 static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id) 253 { 254 } 255 256 int protohead[CFCTRL_SRV_MASK] = { 257 [CFCTRL_SRV_VEI] = 4, 258 [CFCTRL_SRV_DATAGRAM] = 7, 259 [CFCTRL_SRV_UTIL] = 4, 260 [CFCTRL_SRV_RFM] = 3, 261 [CFCTRL_SRV_DBG] = 3, 262 }; 263 264 int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg, 265 struct cfctrl_link_param *param, 266 struct cflayer *adap_layer, 267 int *ifindex, 268 int *proto_head, 269 int *proto_tail) 270 { 271 struct cflayer *frml; 272 if (adap_layer == NULL) { 273 pr_err("adap_layer is zero\n"); 274 return -EINVAL; 275 } 276 if (adap_layer->receive == NULL) { 277 pr_err("adap_layer->receive is NULL\n"); 278 return -EINVAL; 279 } 280 if (adap_layer->ctrlcmd == NULL) { 281 pr_err("adap_layer->ctrlcmd == NULL\n"); 282 return -EINVAL; 283 } 284 frml = cnfg->phy_layers[param->phyid].frm_layer; 285 if (frml == NULL) { 286 pr_err("Specified PHY type does not exist!\n"); 287 return -ENODEV; 288 } 289 caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id); 290 caif_assert(cnfg->phy_layers[param->phyid].frm_layer->id == 291 param->phyid); 292 caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id == 293 param->phyid); 294 295 *ifindex = cnfg->phy_layers[param->phyid].ifindex; 296 *proto_head = 297 protohead[param->linktype]+ 298 (cnfg->phy_layers[param->phyid].use_stx ? 1 : 0); 299 300 *proto_tail = 2; 301 302 /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */ 303 cfctrl_enum_req(cnfg->ctrl, param->phyid); 304 return cfctrl_linkup_request(cnfg->ctrl, param, adap_layer); 305 } 306 EXPORT_SYMBOL(cfcnfg_add_adaptation_layer); 307 308 static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id, 309 struct cflayer *adapt_layer) 310 { 311 if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL) 312 adapt_layer->ctrlcmd(adapt_layer, 313 CAIF_CTRLCMD_INIT_FAIL_RSP, 0); 314 } 315 316 static void 317 cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, 318 u8 phyid, struct cflayer *adapt_layer) 319 { 320 struct cfcnfg *cnfg = container_obj(layer); 321 struct cflayer *servicel = NULL; 322 struct cfcnfg_phyinfo *phyinfo; 323 struct net_device *netdev; 324 325 if (adapt_layer == NULL) { 326 pr_debug("link setup response but no client exist, send linkdown back\n"); 327 cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL); 328 return; 329 } 330 331 caif_assert(cnfg != NULL); 332 caif_assert(phyid != 0); 333 phyinfo = &cnfg->phy_layers[phyid]; 334 caif_assert(phyinfo->id == phyid); 335 caif_assert(phyinfo->phy_layer != NULL); 336 caif_assert(phyinfo->phy_layer->id == phyid); 337 338 phyinfo->phy_ref_count++; 339 if (phyinfo->phy_ref_count == 1 && 340 phyinfo->phy_layer->modemcmd != NULL) { 341 phyinfo->phy_layer->modemcmd(phyinfo->phy_layer, 342 _CAIF_MODEMCMD_PHYIF_USEFULL); 343 } 344 adapt_layer->id = channel_id; 345 346 switch (serv) { 347 case CFCTRL_SRV_VEI: 348 servicel = cfvei_create(channel_id, &phyinfo->dev_info); 349 break; 350 case CFCTRL_SRV_DATAGRAM: 351 servicel = cfdgml_create(channel_id, &phyinfo->dev_info); 352 break; 353 case CFCTRL_SRV_RFM: 354 netdev = phyinfo->dev_info.dev; 355 servicel = cfrfml_create(channel_id, &phyinfo->dev_info, 356 netdev->mtu); 357 break; 358 case CFCTRL_SRV_UTIL: 359 servicel = cfutill_create(channel_id, &phyinfo->dev_info); 360 break; 361 case CFCTRL_SRV_VIDEO: 362 servicel = cfvidl_create(channel_id, &phyinfo->dev_info); 363 break; 364 case CFCTRL_SRV_DBG: 365 servicel = cfdbgl_create(channel_id, &phyinfo->dev_info); 366 break; 367 default: 368 pr_err("Protocol error. Link setup response - unknown channel type\n"); 369 return; 370 } 371 if (!servicel) { 372 pr_warn("Out of memory\n"); 373 return; 374 } 375 layer_set_dn(servicel, cnfg->mux); 376 cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id); 377 layer_set_up(servicel, adapt_layer); 378 layer_set_dn(adapt_layer, servicel); 379 cfsrvl_get(servicel); 380 servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0); 381 } 382 383 void 384 cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, 385 struct net_device *dev, struct cflayer *phy_layer, 386 u16 *phyid, enum cfcnfg_phy_preference pref, 387 bool fcs, bool stx) 388 { 389 struct cflayer *frml; 390 struct cflayer *phy_driver = NULL; 391 int i; 392 393 394 if (cnfg->phy_layers[cnfg->last_phyid].frm_layer == NULL) { 395 *phyid = cnfg->last_phyid; 396 397 /* range: * 1..(MAX_PHY_LAYERS-1) */ 398 cnfg->last_phyid = 399 (cnfg->last_phyid % (MAX_PHY_LAYERS - 1)) + 1; 400 } else { 401 *phyid = 0; 402 for (i = 1; i < MAX_PHY_LAYERS; i++) { 403 if (cnfg->phy_layers[i].frm_layer == NULL) { 404 *phyid = i; 405 break; 406 } 407 } 408 } 409 if (*phyid == 0) { 410 pr_err("No Available PHY ID\n"); 411 return; 412 } 413 414 switch (phy_type) { 415 case CFPHYTYPE_FRAG: 416 phy_driver = 417 cfserl_create(CFPHYTYPE_FRAG, *phyid, stx); 418 if (!phy_driver) { 419 pr_warn("Out of memory\n"); 420 return; 421 } 422 423 break; 424 case CFPHYTYPE_CAIF: 425 phy_driver = NULL; 426 break; 427 default: 428 pr_err("%d\n", phy_type); 429 return; 430 break; 431 } 432 433 phy_layer->id = *phyid; 434 cnfg->phy_layers[*phyid].pref = pref; 435 cnfg->phy_layers[*phyid].id = *phyid; 436 cnfg->phy_layers[*phyid].dev_info.id = *phyid; 437 cnfg->phy_layers[*phyid].dev_info.dev = dev; 438 cnfg->phy_layers[*phyid].phy_layer = phy_layer; 439 cnfg->phy_layers[*phyid].phy_ref_count = 0; 440 cnfg->phy_layers[*phyid].ifindex = dev->ifindex; 441 cnfg->phy_layers[*phyid].use_stx = stx; 442 cnfg->phy_layers[*phyid].use_fcs = fcs; 443 444 phy_layer->type = phy_type; 445 frml = cffrml_create(*phyid, fcs); 446 if (!frml) { 447 pr_warn("Out of memory\n"); 448 return; 449 } 450 cnfg->phy_layers[*phyid].frm_layer = frml; 451 cfmuxl_set_dnlayer(cnfg->mux, frml, *phyid); 452 layer_set_up(frml, cnfg->mux); 453 454 if (phy_driver != NULL) { 455 phy_driver->id = *phyid; 456 layer_set_dn(frml, phy_driver); 457 layer_set_up(phy_driver, frml); 458 layer_set_dn(phy_driver, phy_layer); 459 layer_set_up(phy_layer, phy_driver); 460 } else { 461 layer_set_dn(frml, phy_layer); 462 layer_set_up(phy_layer, frml); 463 } 464 } 465 EXPORT_SYMBOL(cfcnfg_add_phy_layer); 466 467 int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer) 468 { 469 struct cflayer *frml, *frml_dn; 470 u16 phyid; 471 phyid = phy_layer->id; 472 caif_assert(phyid == cnfg->phy_layers[phyid].id); 473 caif_assert(phy_layer == cnfg->phy_layers[phyid].phy_layer); 474 caif_assert(phy_layer->id == phyid); 475 caif_assert(cnfg->phy_layers[phyid].frm_layer->id == phyid); 476 477 memset(&cnfg->phy_layers[phy_layer->id], 0, 478 sizeof(struct cfcnfg_phyinfo)); 479 frml = cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id); 480 frml_dn = frml->dn; 481 cffrml_set_uplayer(frml, NULL); 482 cffrml_set_dnlayer(frml, NULL); 483 kfree(frml); 484 485 if (phy_layer != frml_dn) { 486 layer_set_up(frml_dn, NULL); 487 layer_set_dn(frml_dn, NULL); 488 kfree(frml_dn); 489 } 490 layer_set_up(phy_layer, NULL); 491 return 0; 492 } 493 EXPORT_SYMBOL(cfcnfg_del_phy_layer); 494