108dbd0f8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
239e89c9fSRichard Kuo /*
339e89c9fSRichard Kuo * Ptrace support for Hexagon
439e89c9fSRichard Kuo *
57c6a5df4SRichard Kuo * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
639e89c9fSRichard Kuo */
739e89c9fSRichard Kuo
839e89c9fSRichard Kuo #include <linux/kernel.h>
939e89c9fSRichard Kuo #include <linux/sched.h>
1068db0cf1SIngo Molnar #include <linux/sched/task_stack.h>
1139e89c9fSRichard Kuo #include <linux/mm.h>
1239e89c9fSRichard Kuo #include <linux/smp.h>
1339e89c9fSRichard Kuo #include <linux/errno.h>
1439e89c9fSRichard Kuo #include <linux/ptrace.h>
1539e89c9fSRichard Kuo #include <linux/regset.h>
1639e89c9fSRichard Kuo #include <linux/user.h>
176bbbc30cSRichard Kuo #include <linux/elf.h>
1839e89c9fSRichard Kuo
1939e89c9fSRichard Kuo #include <asm/user.h>
2039e89c9fSRichard Kuo
217777746cSRichard Kuo #if arch_has_single_step()
227777746cSRichard Kuo /* Both called from ptrace_resume */
user_enable_single_step(struct task_struct * child)237777746cSRichard Kuo void user_enable_single_step(struct task_struct *child)
247777746cSRichard Kuo {
257777746cSRichard Kuo pt_set_singlestep(task_pt_regs(child));
267777746cSRichard Kuo set_tsk_thread_flag(child, TIF_SINGLESTEP);
277777746cSRichard Kuo }
287777746cSRichard Kuo
user_disable_single_step(struct task_struct * child)297777746cSRichard Kuo void user_disable_single_step(struct task_struct *child)
307777746cSRichard Kuo {
317777746cSRichard Kuo pt_clr_singlestep(task_pt_regs(child));
327777746cSRichard Kuo clear_tsk_thread_flag(child, TIF_SINGLESTEP);
337777746cSRichard Kuo }
347777746cSRichard Kuo #endif
357777746cSRichard Kuo
genregs_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)3639e89c9fSRichard Kuo static int genregs_get(struct task_struct *target,
3739e89c9fSRichard Kuo const struct user_regset *regset,
38788dcee0SSid Manning struct membuf to)
3939e89c9fSRichard Kuo {
4039e89c9fSRichard Kuo struct pt_regs *regs = task_pt_regs(target);
4139e89c9fSRichard Kuo
4239e89c9fSRichard Kuo /* The general idea here is that the copyout must happen in
4339e89c9fSRichard Kuo * exactly the same order in which the userspace expects these
4439e89c9fSRichard Kuo * regs. Now, the sequence in userspace does not match the
4539e89c9fSRichard Kuo * sequence in the kernel, so everything past the 32 gprs
4639e89c9fSRichard Kuo * happens one at a time.
4739e89c9fSRichard Kuo */
488320514cSAl Viro membuf_write(&to, ®s->r00, 32*sizeof(unsigned long));
4939e89c9fSRichard Kuo /* Must be exactly same sequence as struct user_regs_struct */
508320514cSAl Viro membuf_store(&to, regs->sa0);
518320514cSAl Viro membuf_store(&to, regs->lc0);
528320514cSAl Viro membuf_store(&to, regs->sa1);
538320514cSAl Viro membuf_store(&to, regs->lc1);
548320514cSAl Viro membuf_store(&to, regs->m0);
558320514cSAl Viro membuf_store(&to, regs->m1);
568320514cSAl Viro membuf_store(&to, regs->usr);
57788dcee0SSid Manning membuf_store(&to, regs->preds);
588320514cSAl Viro membuf_store(&to, regs->gp);
598320514cSAl Viro membuf_store(&to, regs->ugp);
608320514cSAl Viro membuf_store(&to, pt_elr(regs)); // pc
618320514cSAl Viro membuf_store(&to, (unsigned long)pt_cause(regs)); // cause
628320514cSAl Viro membuf_store(&to, pt_badva(regs)); // badva
6360c4ba99SRichard Kuo #if CONFIG_HEXAGON_ARCH_VERSION >=4
648320514cSAl Viro membuf_store(&to, regs->cs0);
658320514cSAl Viro membuf_store(&to, regs->cs1);
668320514cSAl Viro return membuf_zero(&to, sizeof(unsigned long));
678320514cSAl Viro #else
688320514cSAl Viro return membuf_zero(&to, 3 * sizeof(unsigned long));
6960c4ba99SRichard Kuo #endif
7039e89c9fSRichard Kuo }
7139e89c9fSRichard Kuo
genregs_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)7239e89c9fSRichard Kuo static int genregs_set(struct task_struct *target,
7339e89c9fSRichard Kuo const struct user_regset *regset,
7439e89c9fSRichard Kuo unsigned int pos, unsigned int count,
7539e89c9fSRichard Kuo const void *kbuf, const void __user *ubuf)
7639e89c9fSRichard Kuo {
7739e89c9fSRichard Kuo int ret;
7839e89c9fSRichard Kuo unsigned long bucket;
7939e89c9fSRichard Kuo struct pt_regs *regs = task_pt_regs(target);
8039e89c9fSRichard Kuo
8139e89c9fSRichard Kuo if (!regs)
8239e89c9fSRichard Kuo return -EIO;
8339e89c9fSRichard Kuo
8439e89c9fSRichard Kuo ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
8539e89c9fSRichard Kuo ®s->r00, 0, 32*sizeof(unsigned long));
8639e89c9fSRichard Kuo
8739e89c9fSRichard Kuo #define INEXT(KPT_REG, USR_REG) \
8839e89c9fSRichard Kuo if (!ret) \
8939e89c9fSRichard Kuo ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
9039e89c9fSRichard Kuo KPT_REG, offsetof(struct user_regs_struct, USR_REG), \
9139e89c9fSRichard Kuo offsetof(struct user_regs_struct, USR_REG) + \
9239e89c9fSRichard Kuo sizeof(unsigned long));
9339e89c9fSRichard Kuo
9439e89c9fSRichard Kuo /* Must be exactly same sequence as struct user_regs_struct */
9539e89c9fSRichard Kuo INEXT(®s->sa0, sa0);
9639e89c9fSRichard Kuo INEXT(®s->lc0, lc0);
9739e89c9fSRichard Kuo INEXT(®s->sa1, sa1);
9839e89c9fSRichard Kuo INEXT(®s->lc1, lc1);
9939e89c9fSRichard Kuo INEXT(®s->m0, m0);
10039e89c9fSRichard Kuo INEXT(®s->m1, m1);
10139e89c9fSRichard Kuo INEXT(®s->usr, usr);
10239e89c9fSRichard Kuo INEXT(®s->preds, p3_0);
10339e89c9fSRichard Kuo INEXT(®s->gp, gp);
10439e89c9fSRichard Kuo INEXT(®s->ugp, ugp);
10539e89c9fSRichard Kuo INEXT(&pt_elr(regs), pc);
10639e89c9fSRichard Kuo
10739e89c9fSRichard Kuo /* CAUSE and BADVA aren't writeable. */
10839e89c9fSRichard Kuo INEXT(&bucket, cause);
10939e89c9fSRichard Kuo INEXT(&bucket, badva);
11039e89c9fSRichard Kuo
11160c4ba99SRichard Kuo #if CONFIG_HEXAGON_ARCH_VERSION >=4
11260c4ba99SRichard Kuo INEXT(®s->cs0, cs0);
11360c4ba99SRichard Kuo INEXT(®s->cs1, cs1);
11460c4ba99SRichard Kuo #endif
11560c4ba99SRichard Kuo
11639e89c9fSRichard Kuo /* Ignore the rest, if needed */
11739e89c9fSRichard Kuo if (!ret)
118*4d2cfea8SSergey Shtylyov user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
11939e89c9fSRichard Kuo offsetof(struct user_regs_struct, pad1), -1);
120*4d2cfea8SSergey Shtylyov else
12139e89c9fSRichard Kuo return ret;
12239e89c9fSRichard Kuo
12339e89c9fSRichard Kuo /*
12439e89c9fSRichard Kuo * This is special; SP is actually restored by the VM via the
12539e89c9fSRichard Kuo * special event record which is set by the special trap.
12639e89c9fSRichard Kuo */
12739e89c9fSRichard Kuo regs->hvmer.vmpsp = regs->r29;
12839e89c9fSRichard Kuo return 0;
12939e89c9fSRichard Kuo }
13039e89c9fSRichard Kuo
13139e89c9fSRichard Kuo enum hexagon_regset {
13239e89c9fSRichard Kuo REGSET_GENERAL,
13339e89c9fSRichard Kuo };
13439e89c9fSRichard Kuo
13539e89c9fSRichard Kuo static const struct user_regset hexagon_regsets[] = {
13639e89c9fSRichard Kuo [REGSET_GENERAL] = {
13739e89c9fSRichard Kuo .core_note_type = NT_PRSTATUS,
13839e89c9fSRichard Kuo .n = ELF_NGREG,
13939e89c9fSRichard Kuo .size = sizeof(unsigned long),
14039e89c9fSRichard Kuo .align = sizeof(unsigned long),
1418320514cSAl Viro .regset_get = genregs_get,
14239e89c9fSRichard Kuo .set = genregs_set,
14339e89c9fSRichard Kuo },
14439e89c9fSRichard Kuo };
14539e89c9fSRichard Kuo
14639e89c9fSRichard Kuo static const struct user_regset_view hexagon_user_view = {
147f231e433SMasahiro Yamada .name = "hexagon",
14839e89c9fSRichard Kuo .e_machine = ELF_ARCH,
14939e89c9fSRichard Kuo .ei_osabi = ELF_OSABI,
15039e89c9fSRichard Kuo .regsets = hexagon_regsets,
151446b6cb8SRichard Kuo .e_flags = ELF_CORE_EFLAGS,
15239e89c9fSRichard Kuo .n = ARRAY_SIZE(hexagon_regsets)
15339e89c9fSRichard Kuo };
15439e89c9fSRichard Kuo
task_user_regset_view(struct task_struct * task)15539e89c9fSRichard Kuo const struct user_regset_view *task_user_regset_view(struct task_struct *task)
15639e89c9fSRichard Kuo {
15739e89c9fSRichard Kuo return &hexagon_user_view;
15839e89c9fSRichard Kuo }
15939e89c9fSRichard Kuo
ptrace_disable(struct task_struct * child)16039e89c9fSRichard Kuo void ptrace_disable(struct task_struct *child)
16139e89c9fSRichard Kuo {
16239e89c9fSRichard Kuo /* Boilerplate - resolves to null inline if no HW single-step */
16339e89c9fSRichard Kuo user_disable_single_step(child);
16439e89c9fSRichard Kuo }
16539e89c9fSRichard Kuo
arch_ptrace(struct task_struct * child,long request,unsigned long addr,unsigned long data)16639e89c9fSRichard Kuo long arch_ptrace(struct task_struct *child, long request,
16739e89c9fSRichard Kuo unsigned long addr, unsigned long data)
16839e89c9fSRichard Kuo {
16939e89c9fSRichard Kuo return ptrace_request(child, request, addr, data);
17039e89c9fSRichard Kuo }
171