1 /* 2 * Copyright (C) ST-Ericsson AB 2010 3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com 4 * License terms: GNU General Public License (GPL) version 2 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ 8 9 #include <linux/kernel.h> 10 #include <linux/stddef.h> 11 #include <linux/slab.h> 12 #include <linux/netdevice.h> 13 #include <net/caif/caif_layer.h> 14 #include <net/caif/cfpkt.h> 15 #include <net/caif/cfcnfg.h> 16 #include <net/caif/cfctrl.h> 17 #include <net/caif/cfmuxl.h> 18 #include <net/caif/cffrml.h> 19 #include <net/caif/cfserl.h> 20 #include <net/caif/cfsrvl.h> 21 22 #include <linux/module.h> 23 #include <asm/atomic.h> 24 25 #define MAX_PHY_LAYERS 7 26 #define PHY_NAME_LEN 20 27 28 #define container_obj(layr) container_of(layr, struct cfcnfg, layer) 29 #define RFM_FRAGMENT_SIZE 4030 30 31 /* Information about CAIF physical interfaces held by Config Module in order 32 * to manage physical interfaces 33 */ 34 struct cfcnfg_phyinfo { 35 /* Pointer to the layer below the MUX (framing layer) */ 36 struct cflayer *frm_layer; 37 /* Pointer to the lowest actual physical layer */ 38 struct cflayer *phy_layer; 39 /* Unique identifier of the physical interface */ 40 unsigned int id; 41 /* Preference of the physical in interface */ 42 enum cfcnfg_phy_preference pref; 43 44 /* Reference count, number of channels using the device */ 45 int phy_ref_count; 46 47 /* Information about the physical device */ 48 struct dev_info dev_info; 49 50 /* Interface index */ 51 int ifindex; 52 53 /* Use Start of frame extension */ 54 bool use_stx; 55 56 /* Use Start of frame checksum */ 57 bool use_fcs; 58 }; 59 60 struct cfcnfg { 61 struct cflayer layer; 62 struct cflayer *ctrl; 63 struct cflayer *mux; 64 u8 last_phyid; 65 struct cfcnfg_phyinfo phy_layers[MAX_PHY_LAYERS]; 66 }; 67 68 static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, 69 enum cfctrl_srv serv, u8 phyid, 70 struct cflayer *adapt_layer); 71 static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id); 72 static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id, 73 struct cflayer *adapt_layer); 74 static void cfctrl_resp_func(void); 75 static void cfctrl_enum_resp(void); 76 77 struct cfcnfg *cfcnfg_create(void) 78 { 79 struct cfcnfg *this; 80 struct cfctrl_rsp *resp; 81 /* Initiate this layer */ 82 this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC); 83 if (!this) { 84 pr_warn("Out of memory\n"); 85 return NULL; 86 } 87 this->mux = cfmuxl_create(); 88 if (!this->mux) 89 goto out_of_mem; 90 this->ctrl = cfctrl_create(); 91 if (!this->ctrl) 92 goto out_of_mem; 93 /* Initiate response functions */ 94 resp = cfctrl_get_respfuncs(this->ctrl); 95 resp->enum_rsp = cfctrl_enum_resp; 96 resp->linkerror_ind = cfctrl_resp_func; 97 resp->linkdestroy_rsp = cfcnfg_linkdestroy_rsp; 98 resp->sleep_rsp = cfctrl_resp_func; 99 resp->wake_rsp = cfctrl_resp_func; 100 resp->restart_rsp = cfctrl_resp_func; 101 resp->radioset_rsp = cfctrl_resp_func; 102 resp->linksetup_rsp = cfcnfg_linkup_rsp; 103 resp->reject_rsp = cfcnfg_reject_rsp; 104 105 this->last_phyid = 1; 106 107 cfmuxl_set_uplayer(this->mux, this->ctrl, 0); 108 layer_set_dn(this->ctrl, this->mux); 109 layer_set_up(this->ctrl, this); 110 return this; 111 out_of_mem: 112 pr_warn("Out of memory\n"); 113 kfree(this->mux); 114 kfree(this->ctrl); 115 kfree(this); 116 return NULL; 117 } 118 EXPORT_SYMBOL(cfcnfg_create); 119 120 void cfcnfg_remove(struct cfcnfg *cfg) 121 { 122 if (cfg) { 123 kfree(cfg->mux); 124 kfree(cfg->ctrl); 125 kfree(cfg); 126 } 127 } 128 129 static void cfctrl_resp_func(void) 130 { 131 } 132 133 static void cfctrl_enum_resp(void) 134 { 135 } 136 137 struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg, 138 enum cfcnfg_phy_preference phy_pref) 139 { 140 u16 i; 141 142 /* Try to match with specified preference */ 143 for (i = 1; i < MAX_PHY_LAYERS; i++) { 144 if (cnfg->phy_layers[i].id == i && 145 cnfg->phy_layers[i].pref == phy_pref && 146 cnfg->phy_layers[i].frm_layer != NULL) { 147 caif_assert(cnfg->phy_layers != NULL); 148 caif_assert(cnfg->phy_layers[i].id == i); 149 return &cnfg->phy_layers[i].dev_info; 150 } 151 } 152 /* Otherwise just return something */ 153 for (i = 1; i < MAX_PHY_LAYERS; i++) { 154 if (cnfg->phy_layers[i].id == i) { 155 caif_assert(cnfg->phy_layers != NULL); 156 caif_assert(cnfg->phy_layers[i].id == i); 157 return &cnfg->phy_layers[i].dev_info; 158 } 159 } 160 161 return NULL; 162 } 163 164 static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg, 165 u8 phyid) 166 { 167 int i; 168 /* Try to match with specified preference */ 169 for (i = 0; i < MAX_PHY_LAYERS; i++) 170 if (cnfg->phy_layers[i].frm_layer != NULL && 171 cnfg->phy_layers[i].id == phyid) 172 return &cnfg->phy_layers[i]; 173 return NULL; 174 } 175 176 177 int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi) 178 { 179 int i; 180 for (i = 0; i < MAX_PHY_LAYERS; i++) 181 if (cnfg->phy_layers[i].frm_layer != NULL && 182 cnfg->phy_layers[i].ifindex == ifi) 183 return i; 184 return -ENODEV; 185 } 186 187 int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) 188 { 189 u8 channel_id = 0; 190 int ret = 0; 191 struct cflayer *servl = NULL; 192 struct cfcnfg_phyinfo *phyinfo = NULL; 193 u8 phyid = 0; 194 195 caif_assert(adap_layer != NULL); 196 channel_id = adap_layer->id; 197 if (adap_layer->dn == NULL || channel_id == 0) { 198 pr_err("adap_layer->dn == NULL or adap_layer->id is 0\n"); 199 ret = -ENOTCONN; 200 goto end; 201 } 202 servl = cfmuxl_remove_uplayer(cnfg->mux, channel_id); 203 if (servl == NULL) { 204 pr_err("PROTOCOL ERROR - Error removing service_layer Channel_Id(%d)", 205 channel_id); 206 ret = -EINVAL; 207 goto end; 208 } 209 layer_set_up(servl, NULL); 210 ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer); 211 if (ret) 212 goto end; 213 caif_assert(channel_id == servl->id); 214 if (adap_layer->dn != NULL) { 215 phyid = cfsrvl_getphyid(adap_layer->dn); 216 217 phyinfo = cfcnfg_get_phyinfo(cnfg, phyid); 218 if (phyinfo == NULL) { 219 pr_warn("No interface to send disconnect to\n"); 220 ret = -ENODEV; 221 goto end; 222 } 223 if (phyinfo->id != phyid || 224 phyinfo->phy_layer->id != phyid || 225 phyinfo->frm_layer->id != phyid) { 226 pr_err("Inconsistency in phy registration\n"); 227 ret = -EINVAL; 228 goto end; 229 } 230 } 231 if (phyinfo != NULL && --phyinfo->phy_ref_count == 0 && 232 phyinfo->phy_layer != NULL && 233 phyinfo->phy_layer->modemcmd != NULL) { 234 phyinfo->phy_layer->modemcmd(phyinfo->phy_layer, 235 _CAIF_MODEMCMD_PHYIF_USELESS); 236 } 237 end: 238 cfsrvl_put(servl); 239 cfctrl_cancel_req(cnfg->ctrl, adap_layer); 240 if (adap_layer->ctrlcmd != NULL) 241 adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0); 242 return ret; 243 244 } 245 EXPORT_SYMBOL(cfcnfg_disconn_adapt_layer); 246 247 void cfcnfg_release_adap_layer(struct cflayer *adap_layer) 248 { 249 if (adap_layer->dn) 250 cfsrvl_put(adap_layer->dn); 251 } 252 EXPORT_SYMBOL(cfcnfg_release_adap_layer); 253 254 static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id) 255 { 256 } 257 258 int protohead[CFCTRL_SRV_MASK] = { 259 [CFCTRL_SRV_VEI] = 4, 260 [CFCTRL_SRV_DATAGRAM] = 7, 261 [CFCTRL_SRV_UTIL] = 4, 262 [CFCTRL_SRV_RFM] = 3, 263 [CFCTRL_SRV_DBG] = 3, 264 }; 265 266 int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg, 267 struct cfctrl_link_param *param, 268 struct cflayer *adap_layer, 269 int *ifindex, 270 int *proto_head, 271 int *proto_tail) 272 { 273 struct cflayer *frml; 274 if (adap_layer == NULL) { 275 pr_err("adap_layer is zero\n"); 276 return -EINVAL; 277 } 278 if (adap_layer->receive == NULL) { 279 pr_err("adap_layer->receive is NULL\n"); 280 return -EINVAL; 281 } 282 if (adap_layer->ctrlcmd == NULL) { 283 pr_err("adap_layer->ctrlcmd == NULL\n"); 284 return -EINVAL; 285 } 286 frml = cnfg->phy_layers[param->phyid].frm_layer; 287 if (frml == NULL) { 288 pr_err("Specified PHY type does not exist!\n"); 289 return -ENODEV; 290 } 291 caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id); 292 caif_assert(cnfg->phy_layers[param->phyid].frm_layer->id == 293 param->phyid); 294 caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id == 295 param->phyid); 296 297 *ifindex = cnfg->phy_layers[param->phyid].ifindex; 298 *proto_head = 299 protohead[param->linktype]+ 300 (cnfg->phy_layers[param->phyid].use_stx ? 1 : 0); 301 302 *proto_tail = 2; 303 304 /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */ 305 cfctrl_enum_req(cnfg->ctrl, param->phyid); 306 return cfctrl_linkup_request(cnfg->ctrl, param, adap_layer); 307 } 308 EXPORT_SYMBOL(cfcnfg_add_adaptation_layer); 309 310 static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id, 311 struct cflayer *adapt_layer) 312 { 313 if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL) 314 adapt_layer->ctrlcmd(adapt_layer, 315 CAIF_CTRLCMD_INIT_FAIL_RSP, 0); 316 } 317 318 static void 319 cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, 320 u8 phyid, struct cflayer *adapt_layer) 321 { 322 struct cfcnfg *cnfg = container_obj(layer); 323 struct cflayer *servicel = NULL; 324 struct cfcnfg_phyinfo *phyinfo; 325 struct net_device *netdev; 326 327 if (adapt_layer == NULL) { 328 pr_debug("link setup response but no client exist, send linkdown back\n"); 329 cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL); 330 return; 331 } 332 333 caif_assert(cnfg != NULL); 334 caif_assert(phyid != 0); 335 phyinfo = &cnfg->phy_layers[phyid]; 336 caif_assert(phyinfo->id == phyid); 337 caif_assert(phyinfo->phy_layer != NULL); 338 caif_assert(phyinfo->phy_layer->id == phyid); 339 340 phyinfo->phy_ref_count++; 341 if (phyinfo->phy_ref_count == 1 && 342 phyinfo->phy_layer->modemcmd != NULL) { 343 phyinfo->phy_layer->modemcmd(phyinfo->phy_layer, 344 _CAIF_MODEMCMD_PHYIF_USEFULL); 345 } 346 adapt_layer->id = channel_id; 347 348 switch (serv) { 349 case CFCTRL_SRV_VEI: 350 servicel = cfvei_create(channel_id, &phyinfo->dev_info); 351 break; 352 case CFCTRL_SRV_DATAGRAM: 353 servicel = cfdgml_create(channel_id, &phyinfo->dev_info); 354 break; 355 case CFCTRL_SRV_RFM: 356 netdev = phyinfo->dev_info.dev; 357 servicel = cfrfml_create(channel_id, &phyinfo->dev_info, 358 netdev->mtu); 359 break; 360 case CFCTRL_SRV_UTIL: 361 servicel = cfutill_create(channel_id, &phyinfo->dev_info); 362 break; 363 case CFCTRL_SRV_VIDEO: 364 servicel = cfvidl_create(channel_id, &phyinfo->dev_info); 365 break; 366 case CFCTRL_SRV_DBG: 367 servicel = cfdbgl_create(channel_id, &phyinfo->dev_info); 368 break; 369 default: 370 pr_err("Protocol error. Link setup response - unknown channel type\n"); 371 return; 372 } 373 if (!servicel) { 374 pr_warn("Out of memory\n"); 375 return; 376 } 377 layer_set_dn(servicel, cnfg->mux); 378 cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id); 379 layer_set_up(servicel, adapt_layer); 380 layer_set_dn(adapt_layer, servicel); 381 cfsrvl_get(servicel); 382 servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0); 383 } 384 385 void 386 cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, 387 struct net_device *dev, struct cflayer *phy_layer, 388 u16 *phyid, enum cfcnfg_phy_preference pref, 389 bool fcs, bool stx) 390 { 391 struct cflayer *frml; 392 struct cflayer *phy_driver = NULL; 393 int i; 394 395 396 if (cnfg->phy_layers[cnfg->last_phyid].frm_layer == NULL) { 397 *phyid = cnfg->last_phyid; 398 399 /* range: * 1..(MAX_PHY_LAYERS-1) */ 400 cnfg->last_phyid = 401 (cnfg->last_phyid % (MAX_PHY_LAYERS - 1)) + 1; 402 } else { 403 *phyid = 0; 404 for (i = 1; i < MAX_PHY_LAYERS; i++) { 405 if (cnfg->phy_layers[i].frm_layer == NULL) { 406 *phyid = i; 407 break; 408 } 409 } 410 } 411 if (*phyid == 0) { 412 pr_err("No Available PHY ID\n"); 413 return; 414 } 415 416 switch (phy_type) { 417 case CFPHYTYPE_FRAG: 418 phy_driver = 419 cfserl_create(CFPHYTYPE_FRAG, *phyid, stx); 420 if (!phy_driver) { 421 pr_warn("Out of memory\n"); 422 return; 423 } 424 425 break; 426 case CFPHYTYPE_CAIF: 427 phy_driver = NULL; 428 break; 429 default: 430 pr_err("%d\n", phy_type); 431 return; 432 break; 433 } 434 435 phy_layer->id = *phyid; 436 cnfg->phy_layers[*phyid].pref = pref; 437 cnfg->phy_layers[*phyid].id = *phyid; 438 cnfg->phy_layers[*phyid].dev_info.id = *phyid; 439 cnfg->phy_layers[*phyid].dev_info.dev = dev; 440 cnfg->phy_layers[*phyid].phy_layer = phy_layer; 441 cnfg->phy_layers[*phyid].phy_ref_count = 0; 442 cnfg->phy_layers[*phyid].ifindex = dev->ifindex; 443 cnfg->phy_layers[*phyid].use_stx = stx; 444 cnfg->phy_layers[*phyid].use_fcs = fcs; 445 446 phy_layer->type = phy_type; 447 frml = cffrml_create(*phyid, fcs); 448 if (!frml) { 449 pr_warn("Out of memory\n"); 450 return; 451 } 452 cnfg->phy_layers[*phyid].frm_layer = frml; 453 cfmuxl_set_dnlayer(cnfg->mux, frml, *phyid); 454 layer_set_up(frml, cnfg->mux); 455 456 if (phy_driver != NULL) { 457 phy_driver->id = *phyid; 458 layer_set_dn(frml, phy_driver); 459 layer_set_up(phy_driver, frml); 460 layer_set_dn(phy_driver, phy_layer); 461 layer_set_up(phy_layer, phy_driver); 462 } else { 463 layer_set_dn(frml, phy_layer); 464 layer_set_up(phy_layer, frml); 465 } 466 } 467 EXPORT_SYMBOL(cfcnfg_add_phy_layer); 468 469 int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer) 470 { 471 struct cflayer *frml, *frml_dn; 472 u16 phyid; 473 phyid = phy_layer->id; 474 caif_assert(phyid == cnfg->phy_layers[phyid].id); 475 caif_assert(phy_layer == cnfg->phy_layers[phyid].phy_layer); 476 caif_assert(phy_layer->id == phyid); 477 caif_assert(cnfg->phy_layers[phyid].frm_layer->id == phyid); 478 479 memset(&cnfg->phy_layers[phy_layer->id], 0, 480 sizeof(struct cfcnfg_phyinfo)); 481 frml = cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id); 482 frml_dn = frml->dn; 483 cffrml_set_uplayer(frml, NULL); 484 cffrml_set_dnlayer(frml, NULL); 485 kfree(frml); 486 487 if (phy_layer != frml_dn) { 488 layer_set_up(frml_dn, NULL); 489 layer_set_dn(frml_dn, NULL); 490 kfree(frml_dn); 491 } 492 layer_set_up(phy_layer, NULL); 493 return 0; 494 } 495 EXPORT_SYMBOL(cfcnfg_del_phy_layer); 496