1c0c77d8fSBjörn Töpel // SPDX-License-Identifier: GPL-2.0 2c0c77d8fSBjörn Töpel /* XDP user-space packet buffer 3c0c77d8fSBjörn Töpel * Copyright(c) 2018 Intel Corporation. 4c0c77d8fSBjörn Töpel */ 5c0c77d8fSBjörn Töpel 6c0c77d8fSBjörn Töpel #include <linux/init.h> 7c0c77d8fSBjörn Töpel #include <linux/sched/mm.h> 8c0c77d8fSBjörn Töpel #include <linux/sched/signal.h> 9c0c77d8fSBjörn Töpel #include <linux/sched/task.h> 10c0c77d8fSBjörn Töpel #include <linux/uaccess.h> 11c0c77d8fSBjörn Töpel #include <linux/slab.h> 12c0c77d8fSBjörn Töpel #include <linux/bpf.h> 13c0c77d8fSBjörn Töpel #include <linux/mm.h> 1484c6b868SJakub Kicinski #include <linux/netdevice.h> 1584c6b868SJakub Kicinski #include <linux/rtnetlink.h> 16c0c77d8fSBjörn Töpel 17c0c77d8fSBjörn Töpel #include "xdp_umem.h" 18e61e62b9SBjörn Töpel #include "xsk_queue.h" 19c0c77d8fSBjörn Töpel 20bbff2f32SBjörn Töpel #define XDP_UMEM_MIN_CHUNK_SIZE 2048 21c0c77d8fSBjörn Töpel 22ac98d8aaSMagnus Karlsson void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) 23ac98d8aaSMagnus Karlsson { 24ac98d8aaSMagnus Karlsson unsigned long flags; 25ac98d8aaSMagnus Karlsson 26ac98d8aaSMagnus Karlsson spin_lock_irqsave(&umem->xsk_list_lock, flags); 27ac98d8aaSMagnus Karlsson list_add_rcu(&xs->list, &umem->xsk_list); 28ac98d8aaSMagnus Karlsson spin_unlock_irqrestore(&umem->xsk_list_lock, flags); 29ac98d8aaSMagnus Karlsson } 30ac98d8aaSMagnus Karlsson 31ac98d8aaSMagnus Karlsson void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) 32ac98d8aaSMagnus Karlsson { 33ac98d8aaSMagnus Karlsson unsigned long flags; 34ac98d8aaSMagnus Karlsson 35ac98d8aaSMagnus Karlsson if (xs->dev) { 36ac98d8aaSMagnus Karlsson spin_lock_irqsave(&umem->xsk_list_lock, flags); 37ac98d8aaSMagnus Karlsson list_del_rcu(&xs->list); 38ac98d8aaSMagnus Karlsson spin_unlock_irqrestore(&umem->xsk_list_lock, flags); 39ac98d8aaSMagnus Karlsson 40ac98d8aaSMagnus Karlsson if (umem->zc) 41ac98d8aaSMagnus Karlsson synchronize_net(); 42ac98d8aaSMagnus Karlsson } 43ac98d8aaSMagnus Karlsson } 44ac98d8aaSMagnus Karlsson 4584c6b868SJakub Kicinski int xdp_umem_query(struct net_device *dev, u16 queue_id) 4684c6b868SJakub Kicinski { 4784c6b868SJakub Kicinski struct netdev_bpf bpf; 4884c6b868SJakub Kicinski 4984c6b868SJakub Kicinski ASSERT_RTNL(); 5084c6b868SJakub Kicinski 5184c6b868SJakub Kicinski memset(&bpf, 0, sizeof(bpf)); 5284c6b868SJakub Kicinski bpf.command = XDP_QUERY_XSK_UMEM; 5384c6b868SJakub Kicinski bpf.xsk.queue_id = queue_id; 5484c6b868SJakub Kicinski 5584c6b868SJakub Kicinski if (!dev->netdev_ops->ndo_bpf) 5684c6b868SJakub Kicinski return 0; 5784c6b868SJakub Kicinski return dev->netdev_ops->ndo_bpf(dev, &bpf) ?: !!bpf.xsk.umem; 5884c6b868SJakub Kicinski } 5984c6b868SJakub Kicinski 60173d3adbSBjörn Töpel int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, 61173d3adbSBjörn Töpel u32 queue_id, u16 flags) 62173d3adbSBjörn Töpel { 63173d3adbSBjörn Töpel bool force_zc, force_copy; 64173d3adbSBjörn Töpel struct netdev_bpf bpf; 65173d3adbSBjörn Töpel int err; 66173d3adbSBjörn Töpel 67173d3adbSBjörn Töpel force_zc = flags & XDP_ZEROCOPY; 68173d3adbSBjörn Töpel force_copy = flags & XDP_COPY; 69173d3adbSBjörn Töpel 70173d3adbSBjörn Töpel if (force_zc && force_copy) 71173d3adbSBjörn Töpel return -EINVAL; 72173d3adbSBjörn Töpel 73173d3adbSBjörn Töpel if (force_copy) 74173d3adbSBjörn Töpel return 0; 75173d3adbSBjörn Töpel 76f734607eSJakub Kicinski if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit) 7796c26e04SPrashant Bhole return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */ 78173d3adbSBjörn Töpel 79173d3adbSBjörn Töpel rtnl_lock(); 8084c6b868SJakub Kicinski err = xdp_umem_query(dev, queue_id); 8184c6b868SJakub Kicinski if (err) { 8296c26e04SPrashant Bhole err = err < 0 ? -EOPNOTSUPP : -EBUSY; 8384c6b868SJakub Kicinski goto err_rtnl_unlock; 8484c6b868SJakub Kicinski } 85173d3adbSBjörn Töpel 86173d3adbSBjörn Töpel bpf.command = XDP_SETUP_XSK_UMEM; 87173d3adbSBjörn Töpel bpf.xsk.umem = umem; 88173d3adbSBjörn Töpel bpf.xsk.queue_id = queue_id; 89173d3adbSBjörn Töpel 90173d3adbSBjörn Töpel err = dev->netdev_ops->ndo_bpf(dev, &bpf); 91f734607eSJakub Kicinski if (err) 9284c6b868SJakub Kicinski goto err_rtnl_unlock; 9384c6b868SJakub Kicinski rtnl_unlock(); 94173d3adbSBjörn Töpel 95f734607eSJakub Kicinski dev_hold(dev); 96173d3adbSBjörn Töpel umem->dev = dev; 97173d3adbSBjörn Töpel umem->queue_id = queue_id; 98173d3adbSBjörn Töpel umem->zc = true; 99173d3adbSBjörn Töpel return 0; 10084c6b868SJakub Kicinski 10184c6b868SJakub Kicinski err_rtnl_unlock: 10284c6b868SJakub Kicinski rtnl_unlock(); 10384c6b868SJakub Kicinski return force_zc ? err : 0; /* fail or fallback */ 104173d3adbSBjörn Töpel } 105173d3adbSBjörn Töpel 106ac98d8aaSMagnus Karlsson static void xdp_umem_clear_dev(struct xdp_umem *umem) 107173d3adbSBjörn Töpel { 108173d3adbSBjörn Töpel struct netdev_bpf bpf; 109173d3adbSBjörn Töpel int err; 110173d3adbSBjörn Töpel 111173d3adbSBjörn Töpel if (umem->dev) { 112173d3adbSBjörn Töpel bpf.command = XDP_SETUP_XSK_UMEM; 113173d3adbSBjörn Töpel bpf.xsk.umem = NULL; 114173d3adbSBjörn Töpel bpf.xsk.queue_id = umem->queue_id; 115173d3adbSBjörn Töpel 116173d3adbSBjörn Töpel rtnl_lock(); 117173d3adbSBjörn Töpel err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf); 118173d3adbSBjörn Töpel rtnl_unlock(); 119173d3adbSBjörn Töpel 120173d3adbSBjörn Töpel if (err) 121173d3adbSBjörn Töpel WARN(1, "failed to disable umem!\n"); 122173d3adbSBjörn Töpel 123173d3adbSBjörn Töpel dev_put(umem->dev); 124173d3adbSBjörn Töpel umem->dev = NULL; 125173d3adbSBjörn Töpel } 126173d3adbSBjörn Töpel } 127173d3adbSBjörn Töpel 128c0c77d8fSBjörn Töpel static void xdp_umem_unpin_pages(struct xdp_umem *umem) 129c0c77d8fSBjörn Töpel { 130c0c77d8fSBjörn Töpel unsigned int i; 131c0c77d8fSBjörn Töpel 132c0c77d8fSBjörn Töpel for (i = 0; i < umem->npgs; i++) { 133c0c77d8fSBjörn Töpel struct page *page = umem->pgs[i]; 134c0c77d8fSBjörn Töpel 135c0c77d8fSBjörn Töpel set_page_dirty_lock(page); 136c0c77d8fSBjörn Töpel put_page(page); 137c0c77d8fSBjörn Töpel } 138c0c77d8fSBjörn Töpel 139c0c77d8fSBjörn Töpel kfree(umem->pgs); 140c0c77d8fSBjörn Töpel umem->pgs = NULL; 141c0c77d8fSBjörn Töpel } 142c0c77d8fSBjörn Töpel 143c0c77d8fSBjörn Töpel static void xdp_umem_unaccount_pages(struct xdp_umem *umem) 144c0c77d8fSBjörn Töpel { 145c09290c5SDaniel Borkmann if (umem->user) { 146c0c77d8fSBjörn Töpel atomic_long_sub(umem->npgs, &umem->user->locked_vm); 147c0c77d8fSBjörn Töpel free_uid(umem->user); 148c0c77d8fSBjörn Töpel } 149c09290c5SDaniel Borkmann } 150c0c77d8fSBjörn Töpel 151c0c77d8fSBjörn Töpel static void xdp_umem_release(struct xdp_umem *umem) 152c0c77d8fSBjörn Töpel { 153c0c77d8fSBjörn Töpel struct task_struct *task; 154c0c77d8fSBjörn Töpel struct mm_struct *mm; 155c0c77d8fSBjörn Töpel 156173d3adbSBjörn Töpel xdp_umem_clear_dev(umem); 157173d3adbSBjörn Töpel 158423f3832SMagnus Karlsson if (umem->fq) { 159423f3832SMagnus Karlsson xskq_destroy(umem->fq); 160423f3832SMagnus Karlsson umem->fq = NULL; 161423f3832SMagnus Karlsson } 162423f3832SMagnus Karlsson 163fe230832SMagnus Karlsson if (umem->cq) { 164fe230832SMagnus Karlsson xskq_destroy(umem->cq); 165fe230832SMagnus Karlsson umem->cq = NULL; 166fe230832SMagnus Karlsson } 167fe230832SMagnus Karlsson 168c0c77d8fSBjörn Töpel xdp_umem_unpin_pages(umem); 169c0c77d8fSBjörn Töpel 170c0c77d8fSBjörn Töpel task = get_pid_task(umem->pid, PIDTYPE_PID); 171c0c77d8fSBjörn Töpel put_pid(umem->pid); 172c0c77d8fSBjörn Töpel if (!task) 173c0c77d8fSBjörn Töpel goto out; 174c0c77d8fSBjörn Töpel mm = get_task_mm(task); 175c0c77d8fSBjörn Töpel put_task_struct(task); 176c0c77d8fSBjörn Töpel if (!mm) 177c0c77d8fSBjörn Töpel goto out; 178c0c77d8fSBjörn Töpel 179c0c77d8fSBjörn Töpel mmput(mm); 1808aef7340SBjörn Töpel kfree(umem->pages); 1818aef7340SBjörn Töpel umem->pages = NULL; 1828aef7340SBjörn Töpel 183c0c77d8fSBjörn Töpel xdp_umem_unaccount_pages(umem); 184c0c77d8fSBjörn Töpel out: 185c0c77d8fSBjörn Töpel kfree(umem); 186c0c77d8fSBjörn Töpel } 187c0c77d8fSBjörn Töpel 188c0c77d8fSBjörn Töpel static void xdp_umem_release_deferred(struct work_struct *work) 189c0c77d8fSBjörn Töpel { 190c0c77d8fSBjörn Töpel struct xdp_umem *umem = container_of(work, struct xdp_umem, work); 191c0c77d8fSBjörn Töpel 192c0c77d8fSBjörn Töpel xdp_umem_release(umem); 193c0c77d8fSBjörn Töpel } 194c0c77d8fSBjörn Töpel 195c0c77d8fSBjörn Töpel void xdp_get_umem(struct xdp_umem *umem) 196c0c77d8fSBjörn Töpel { 197d3b42f14SBjörn Töpel refcount_inc(&umem->users); 198c0c77d8fSBjörn Töpel } 199c0c77d8fSBjörn Töpel 200c0c77d8fSBjörn Töpel void xdp_put_umem(struct xdp_umem *umem) 201c0c77d8fSBjörn Töpel { 202c0c77d8fSBjörn Töpel if (!umem) 203c0c77d8fSBjörn Töpel return; 204c0c77d8fSBjörn Töpel 205d3b42f14SBjörn Töpel if (refcount_dec_and_test(&umem->users)) { 206c0c77d8fSBjörn Töpel INIT_WORK(&umem->work, xdp_umem_release_deferred); 207c0c77d8fSBjörn Töpel schedule_work(&umem->work); 208c0c77d8fSBjörn Töpel } 209c0c77d8fSBjörn Töpel } 210c0c77d8fSBjörn Töpel 211c0c77d8fSBjörn Töpel static int xdp_umem_pin_pages(struct xdp_umem *umem) 212c0c77d8fSBjörn Töpel { 213c0c77d8fSBjörn Töpel unsigned int gup_flags = FOLL_WRITE; 214c0c77d8fSBjörn Töpel long npgs; 215c0c77d8fSBjörn Töpel int err; 216c0c77d8fSBjörn Töpel 217a343993cSBjörn Töpel umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs), 218a343993cSBjörn Töpel GFP_KERNEL | __GFP_NOWARN); 219c0c77d8fSBjörn Töpel if (!umem->pgs) 220c0c77d8fSBjörn Töpel return -ENOMEM; 221c0c77d8fSBjörn Töpel 222c0c77d8fSBjörn Töpel down_write(¤t->mm->mmap_sem); 223c0c77d8fSBjörn Töpel npgs = get_user_pages(umem->address, umem->npgs, 224c0c77d8fSBjörn Töpel gup_flags, &umem->pgs[0], NULL); 225c0c77d8fSBjörn Töpel up_write(¤t->mm->mmap_sem); 226c0c77d8fSBjörn Töpel 227c0c77d8fSBjörn Töpel if (npgs != umem->npgs) { 228c0c77d8fSBjörn Töpel if (npgs >= 0) { 229c0c77d8fSBjörn Töpel umem->npgs = npgs; 230c0c77d8fSBjörn Töpel err = -ENOMEM; 231c0c77d8fSBjörn Töpel goto out_pin; 232c0c77d8fSBjörn Töpel } 233c0c77d8fSBjörn Töpel err = npgs; 234c0c77d8fSBjörn Töpel goto out_pgs; 235c0c77d8fSBjörn Töpel } 236c0c77d8fSBjörn Töpel return 0; 237c0c77d8fSBjörn Töpel 238c0c77d8fSBjörn Töpel out_pin: 239c0c77d8fSBjörn Töpel xdp_umem_unpin_pages(umem); 240c0c77d8fSBjörn Töpel out_pgs: 241c0c77d8fSBjörn Töpel kfree(umem->pgs); 242c0c77d8fSBjörn Töpel umem->pgs = NULL; 243c0c77d8fSBjörn Töpel return err; 244c0c77d8fSBjörn Töpel } 245c0c77d8fSBjörn Töpel 246c0c77d8fSBjörn Töpel static int xdp_umem_account_pages(struct xdp_umem *umem) 247c0c77d8fSBjörn Töpel { 248c0c77d8fSBjörn Töpel unsigned long lock_limit, new_npgs, old_npgs; 249c0c77d8fSBjörn Töpel 250c0c77d8fSBjörn Töpel if (capable(CAP_IPC_LOCK)) 251c0c77d8fSBjörn Töpel return 0; 252c0c77d8fSBjörn Töpel 253c0c77d8fSBjörn Töpel lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 254c0c77d8fSBjörn Töpel umem->user = get_uid(current_user()); 255c0c77d8fSBjörn Töpel 256c0c77d8fSBjörn Töpel do { 257c0c77d8fSBjörn Töpel old_npgs = atomic_long_read(&umem->user->locked_vm); 258c0c77d8fSBjörn Töpel new_npgs = old_npgs + umem->npgs; 259c0c77d8fSBjörn Töpel if (new_npgs > lock_limit) { 260c0c77d8fSBjörn Töpel free_uid(umem->user); 261c0c77d8fSBjörn Töpel umem->user = NULL; 262c0c77d8fSBjörn Töpel return -ENOBUFS; 263c0c77d8fSBjörn Töpel } 264c0c77d8fSBjörn Töpel } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs, 265c0c77d8fSBjörn Töpel new_npgs) != old_npgs); 266c0c77d8fSBjörn Töpel return 0; 267c0c77d8fSBjörn Töpel } 268c0c77d8fSBjörn Töpel 269a49049eaSBjörn Töpel static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) 270c0c77d8fSBjörn Töpel { 271bbff2f32SBjörn Töpel u32 chunk_size = mr->chunk_size, headroom = mr->headroom; 272bbff2f32SBjörn Töpel unsigned int chunks, chunks_per_page; 273c0c77d8fSBjörn Töpel u64 addr = mr->addr, size = mr->len; 2748aef7340SBjörn Töpel int size_chk, err, i; 275c0c77d8fSBjörn Töpel 276bbff2f32SBjörn Töpel if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) { 277c0c77d8fSBjörn Töpel /* Strictly speaking we could support this, if: 278c0c77d8fSBjörn Töpel * - huge pages, or* 279c0c77d8fSBjörn Töpel * - using an IOMMU, or 280c0c77d8fSBjörn Töpel * - making sure the memory area is consecutive 281c0c77d8fSBjörn Töpel * but for now, we simply say "computer says no". 282c0c77d8fSBjörn Töpel */ 283c0c77d8fSBjörn Töpel return -EINVAL; 284c0c77d8fSBjörn Töpel } 285c0c77d8fSBjörn Töpel 286bbff2f32SBjörn Töpel if (!is_power_of_2(chunk_size)) 287c0c77d8fSBjörn Töpel return -EINVAL; 288c0c77d8fSBjörn Töpel 289c0c77d8fSBjörn Töpel if (!PAGE_ALIGNED(addr)) { 290c0c77d8fSBjörn Töpel /* Memory area has to be page size aligned. For 291c0c77d8fSBjörn Töpel * simplicity, this might change. 292c0c77d8fSBjörn Töpel */ 293c0c77d8fSBjörn Töpel return -EINVAL; 294c0c77d8fSBjörn Töpel } 295c0c77d8fSBjörn Töpel 296c0c77d8fSBjörn Töpel if ((addr + size) < addr) 297c0c77d8fSBjörn Töpel return -EINVAL; 298c0c77d8fSBjörn Töpel 299bbff2f32SBjörn Töpel chunks = (unsigned int)div_u64(size, chunk_size); 300bbff2f32SBjörn Töpel if (chunks == 0) 301c0c77d8fSBjörn Töpel return -EINVAL; 302c0c77d8fSBjörn Töpel 303bbff2f32SBjörn Töpel chunks_per_page = PAGE_SIZE / chunk_size; 304bbff2f32SBjörn Töpel if (chunks < chunks_per_page || chunks % chunks_per_page) 305c0c77d8fSBjörn Töpel return -EINVAL; 306c0c77d8fSBjörn Töpel 307bbff2f32SBjörn Töpel headroom = ALIGN(headroom, 64); 308c0c77d8fSBjörn Töpel 309bbff2f32SBjörn Töpel size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM; 310c0c77d8fSBjörn Töpel if (size_chk < 0) 311c0c77d8fSBjörn Töpel return -EINVAL; 312c0c77d8fSBjörn Töpel 313c0c77d8fSBjörn Töpel umem->pid = get_task_pid(current, PIDTYPE_PID); 314c0c77d8fSBjörn Töpel umem->address = (unsigned long)addr; 315*93ee30f3SMagnus Karlsson umem->chunk_mask = ~((u64)chunk_size - 1); 316*93ee30f3SMagnus Karlsson umem->size = size; 317bbff2f32SBjörn Töpel umem->headroom = headroom; 318bbff2f32SBjörn Töpel umem->chunk_size_nohr = chunk_size - headroom; 319c0c77d8fSBjörn Töpel umem->npgs = size / PAGE_SIZE; 320c0c77d8fSBjörn Töpel umem->pgs = NULL; 321c0c77d8fSBjörn Töpel umem->user = NULL; 322ac98d8aaSMagnus Karlsson INIT_LIST_HEAD(&umem->xsk_list); 323ac98d8aaSMagnus Karlsson spin_lock_init(&umem->xsk_list_lock); 324c0c77d8fSBjörn Töpel 325d3b42f14SBjörn Töpel refcount_set(&umem->users, 1); 326c0c77d8fSBjörn Töpel 327c0c77d8fSBjörn Töpel err = xdp_umem_account_pages(umem); 328c0c77d8fSBjörn Töpel if (err) 329c0c77d8fSBjörn Töpel goto out; 330c0c77d8fSBjörn Töpel 331c0c77d8fSBjörn Töpel err = xdp_umem_pin_pages(umem); 332c0c77d8fSBjörn Töpel if (err) 333c0c77d8fSBjörn Töpel goto out_account; 3348aef7340SBjörn Töpel 3358aef7340SBjörn Töpel umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL); 3368aef7340SBjörn Töpel if (!umem->pages) { 3378aef7340SBjörn Töpel err = -ENOMEM; 3388aef7340SBjörn Töpel goto out_account; 3398aef7340SBjörn Töpel } 3408aef7340SBjörn Töpel 3418aef7340SBjörn Töpel for (i = 0; i < umem->npgs; i++) 3428aef7340SBjörn Töpel umem->pages[i].addr = page_address(umem->pgs[i]); 3438aef7340SBjörn Töpel 344c0c77d8fSBjörn Töpel return 0; 345c0c77d8fSBjörn Töpel 346c0c77d8fSBjörn Töpel out_account: 347c0c77d8fSBjörn Töpel xdp_umem_unaccount_pages(umem); 348c0c77d8fSBjörn Töpel out: 349c0c77d8fSBjörn Töpel put_pid(umem->pid); 350c0c77d8fSBjörn Töpel return err; 351c0c77d8fSBjörn Töpel } 352965a9909SMagnus Karlsson 353a49049eaSBjörn Töpel struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr) 354a49049eaSBjörn Töpel { 355a49049eaSBjörn Töpel struct xdp_umem *umem; 356a49049eaSBjörn Töpel int err; 357a49049eaSBjörn Töpel 358a49049eaSBjörn Töpel umem = kzalloc(sizeof(*umem), GFP_KERNEL); 359a49049eaSBjörn Töpel if (!umem) 360a49049eaSBjörn Töpel return ERR_PTR(-ENOMEM); 361a49049eaSBjörn Töpel 362a49049eaSBjörn Töpel err = xdp_umem_reg(umem, mr); 363a49049eaSBjörn Töpel if (err) { 364a49049eaSBjörn Töpel kfree(umem); 365a49049eaSBjörn Töpel return ERR_PTR(err); 366a49049eaSBjörn Töpel } 367a49049eaSBjörn Töpel 368a49049eaSBjörn Töpel return umem; 369a49049eaSBjörn Töpel } 370a49049eaSBjörn Töpel 371965a9909SMagnus Karlsson bool xdp_umem_validate_queues(struct xdp_umem *umem) 372965a9909SMagnus Karlsson { 373da60cf00SBjörn Töpel return umem->fq && umem->cq; 374965a9909SMagnus Karlsson } 375