Lines Matching refs:pp

1085 	struct smu_private *pp;  in smu_open()  local
1088 pp = kzalloc(sizeof(struct smu_private), GFP_KERNEL); in smu_open()
1089 if (!pp) in smu_open()
1091 spin_lock_init(&pp->lock); in smu_open()
1092 pp->mode = smu_file_commands; in smu_open()
1093 init_waitqueue_head(&pp->wait); in smu_open()
1097 list_add(&pp->list, &smu_clist); in smu_open()
1099 file->private_data = pp; in smu_open()
1108 struct smu_private *pp = misc; in smu_user_cmd_done() local
1110 wake_up_all(&pp->wait); in smu_user_cmd_done()
1117 struct smu_private *pp = file->private_data; in smu_write() local
1122 if (pp->busy) in smu_write()
1127 pp->mode = smu_file_events; in smu_write()
1139 else if (pp->mode != smu_file_commands) in smu_write()
1144 spin_lock_irqsave(&pp->lock, flags); in smu_write()
1145 if (pp->busy) { in smu_write()
1146 spin_unlock_irqrestore(&pp->lock, flags); in smu_write()
1149 pp->busy = 1; in smu_write()
1150 pp->cmd.status = 1; in smu_write()
1151 spin_unlock_irqrestore(&pp->lock, flags); in smu_write()
1153 if (copy_from_user(pp->buffer, buf + sizeof(hdr), hdr.data_len)) { in smu_write()
1154 pp->busy = 0; in smu_write()
1158 pp->cmd.cmd = hdr.cmd; in smu_write()
1159 pp->cmd.data_len = hdr.data_len; in smu_write()
1160 pp->cmd.reply_len = SMU_MAX_DATA; in smu_write()
1161 pp->cmd.data_buf = pp->buffer; in smu_write()
1162 pp->cmd.reply_buf = pp->buffer; in smu_write()
1163 pp->cmd.done = smu_user_cmd_done; in smu_write()
1164 pp->cmd.misc = pp; in smu_write()
1165 rc = smu_queue_cmd(&pp->cmd); in smu_write()
1172 static ssize_t smu_read_command(struct file *file, struct smu_private *pp, in smu_read_command() argument
1180 if (!pp->busy) in smu_read_command()
1184 spin_lock_irqsave(&pp->lock, flags); in smu_read_command()
1185 if (pp->cmd.status == 1) { in smu_read_command()
1187 spin_unlock_irqrestore(&pp->lock, flags); in smu_read_command()
1190 add_wait_queue(&pp->wait, &wait); in smu_read_command()
1194 if (pp->cmd.status != 1) in smu_read_command()
1199 spin_unlock_irqrestore(&pp->lock, flags); in smu_read_command()
1201 spin_lock_irqsave(&pp->lock, flags); in smu_read_command()
1204 remove_wait_queue(&pp->wait, &wait); in smu_read_command()
1206 spin_unlock_irqrestore(&pp->lock, flags); in smu_read_command()
1209 if (pp->cmd.status != 0) in smu_read_command()
1210 pp->cmd.reply_len = 0; in smu_read_command()
1211 size = sizeof(hdr) + pp->cmd.reply_len; in smu_read_command()
1215 hdr.status = pp->cmd.status; in smu_read_command()
1216 hdr.reply_len = pp->cmd.reply_len; in smu_read_command()
1220 if (size && copy_to_user(buf + sizeof(hdr), pp->buffer, size)) in smu_read_command()
1222 pp->busy = 0; in smu_read_command()
1228 static ssize_t smu_read_events(struct file *file, struct smu_private *pp, in smu_read_events() argument
1240 struct smu_private *pp = file->private_data; in smu_read() local
1242 if (pp->mode == smu_file_commands) in smu_read()
1243 return smu_read_command(file, pp, buf, count); in smu_read()
1244 if (pp->mode == smu_file_events) in smu_read()
1245 return smu_read_events(file, pp, buf, count); in smu_read()
1252 struct smu_private *pp = file->private_data; in smu_fpoll() local
1256 if (!pp) in smu_fpoll()
1259 if (pp->mode == smu_file_commands) { in smu_fpoll()
1260 poll_wait(file, &pp->wait, wait); in smu_fpoll()
1262 spin_lock_irqsave(&pp->lock, flags); in smu_fpoll()
1263 if (pp->busy && pp->cmd.status != 1) in smu_fpoll()
1265 spin_unlock_irqrestore(&pp->lock, flags); in smu_fpoll()
1267 if (pp->mode == smu_file_events) { in smu_fpoll()
1275 struct smu_private *pp = file->private_data; in smu_release() local
1279 if (!pp) in smu_release()
1285 spin_lock_irqsave(&pp->lock, flags); in smu_release()
1286 pp->mode = smu_file_closing; in smu_release()
1287 busy = pp->busy; in smu_release()
1290 if (busy && pp->cmd.status == 1) { in smu_release()
1293 add_wait_queue(&pp->wait, &wait); in smu_release()
1296 if (pp->cmd.status != 1) in smu_release()
1298 spin_unlock_irqrestore(&pp->lock, flags); in smu_release()
1300 spin_lock_irqsave(&pp->lock, flags); in smu_release()
1303 remove_wait_queue(&pp->wait, &wait); in smu_release()
1305 spin_unlock_irqrestore(&pp->lock, flags); in smu_release()
1308 list_del(&pp->list); in smu_release()
1310 kfree(pp); in smu_release()