Merge tag 'edac_for_4.20' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp
[linux] / kernel / trace / trace_event_perf.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace event based perf event profiling/tracing
4  *
5  * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
6  * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
7  */
8
9 #include <linux/module.h>
10 #include <linux/kprobes.h>
11 #include "trace.h"
12 #include "trace_probe.h"
13
14 static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
15
16 /*
17  * Force it to be aligned to unsigned long to avoid misaligned accesses
18  * suprises
19  */
20 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
21         perf_trace_t;
22
23 /* Count the events in use (per event id, not per instance) */
24 static int      total_ref_count;
25
26 static int perf_trace_event_perm(struct trace_event_call *tp_event,
27                                  struct perf_event *p_event)
28 {
29         if (tp_event->perf_perm) {
30                 int ret = tp_event->perf_perm(tp_event, p_event);
31                 if (ret)
32                         return ret;
33         }
34
35         /*
36          * We checked and allowed to create parent,
37          * allow children without checking.
38          */
39         if (p_event->parent)
40                 return 0;
41
42         /*
43          * It's ok to check current process (owner) permissions in here,
44          * because code below is called only via perf_event_open syscall.
45          */
46
47         /* The ftrace function trace is allowed only for root. */
48         if (ftrace_event_is_function(tp_event)) {
49                 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
50                         return -EPERM;
51
52                 if (!is_sampling_event(p_event))
53                         return 0;
54
55                 /*
56                  * We don't allow user space callchains for  function trace
57                  * event, due to issues with page faults while tracing page
58                  * fault handler and its overall trickiness nature.
59                  */
60                 if (!p_event->attr.exclude_callchain_user)
61                         return -EINVAL;
62
63                 /*
64                  * Same reason to disable user stack dump as for user space
65                  * callchains above.
66                  */
67                 if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
68                         return -EINVAL;
69         }
70
71         /* No tracing, just counting, so no obvious leak */
72         if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
73                 return 0;
74
75         /* Some events are ok to be traced by non-root users... */
76         if (p_event->attach_state == PERF_ATTACH_TASK) {
77                 if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
78                         return 0;
79         }
80
81         /*
82          * ...otherwise raw tracepoint data can be a severe data leak,
83          * only allow root to have these.
84          */
85         if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
86                 return -EPERM;
87
88         return 0;
89 }
90
91 static int perf_trace_event_reg(struct trace_event_call *tp_event,
92                                 struct perf_event *p_event)
93 {
94         struct hlist_head __percpu *list;
95         int ret = -ENOMEM;
96         int cpu;
97
98         p_event->tp_event = tp_event;
99         if (tp_event->perf_refcount++ > 0)
100                 return 0;
101
102         list = alloc_percpu(struct hlist_head);
103         if (!list)
104                 goto fail;
105
106         for_each_possible_cpu(cpu)
107                 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
108
109         tp_event->perf_events = list;
110
111         if (!total_ref_count) {
112                 char __percpu *buf;
113                 int i;
114
115                 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
116                         buf = (char __percpu *)alloc_percpu(perf_trace_t);
117                         if (!buf)
118                                 goto fail;
119
120                         perf_trace_buf[i] = buf;
121                 }
122         }
123
124         ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
125         if (ret)
126                 goto fail;
127
128         total_ref_count++;
129         return 0;
130
131 fail:
132         if (!total_ref_count) {
133                 int i;
134
135                 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
136                         free_percpu(perf_trace_buf[i]);
137                         perf_trace_buf[i] = NULL;
138                 }
139         }
140
141         if (!--tp_event->perf_refcount) {
142                 free_percpu(tp_event->perf_events);
143                 tp_event->perf_events = NULL;
144         }
145
146         return ret;
147 }
148
149 static void perf_trace_event_unreg(struct perf_event *p_event)
150 {
151         struct trace_event_call *tp_event = p_event->tp_event;
152         int i;
153
154         if (--tp_event->perf_refcount > 0)
155                 goto out;
156
157         tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
158
159         /*
160          * Ensure our callback won't be called anymore. The buffers
161          * will be freed after that.
162          */
163         tracepoint_synchronize_unregister();
164
165         free_percpu(tp_event->perf_events);
166         tp_event->perf_events = NULL;
167
168         if (!--total_ref_count) {
169                 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
170                         free_percpu(perf_trace_buf[i]);
171                         perf_trace_buf[i] = NULL;
172                 }
173         }
174 out:
175         module_put(tp_event->mod);
176 }
177
178 static int perf_trace_event_open(struct perf_event *p_event)
179 {
180         struct trace_event_call *tp_event = p_event->tp_event;
181         return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
182 }
183
184 static void perf_trace_event_close(struct perf_event *p_event)
185 {
186         struct trace_event_call *tp_event = p_event->tp_event;
187         tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
188 }
189
190 static int perf_trace_event_init(struct trace_event_call *tp_event,
191                                  struct perf_event *p_event)
192 {
193         int ret;
194
195         ret = perf_trace_event_perm(tp_event, p_event);
196         if (ret)
197                 return ret;
198
199         ret = perf_trace_event_reg(tp_event, p_event);
200         if (ret)
201                 return ret;
202
203         ret = perf_trace_event_open(p_event);
204         if (ret) {
205                 perf_trace_event_unreg(p_event);
206                 return ret;
207         }
208
209         return 0;
210 }
211
212 int perf_trace_init(struct perf_event *p_event)
213 {
214         struct trace_event_call *tp_event;
215         u64 event_id = p_event->attr.config;
216         int ret = -EINVAL;
217
218         mutex_lock(&event_mutex);
219         list_for_each_entry(tp_event, &ftrace_events, list) {
220                 if (tp_event->event.type == event_id &&
221                     tp_event->class && tp_event->class->reg &&
222                     try_module_get(tp_event->mod)) {
223                         ret = perf_trace_event_init(tp_event, p_event);
224                         if (ret)
225                                 module_put(tp_event->mod);
226                         break;
227                 }
228         }
229         mutex_unlock(&event_mutex);
230
231         return ret;
232 }
233
234 void perf_trace_destroy(struct perf_event *p_event)
235 {
236         mutex_lock(&event_mutex);
237         perf_trace_event_close(p_event);
238         perf_trace_event_unreg(p_event);
239         mutex_unlock(&event_mutex);
240 }
241
242 #ifdef CONFIG_KPROBE_EVENTS
243 int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
244 {
245         int ret;
246         char *func = NULL;
247         struct trace_event_call *tp_event;
248
249         if (p_event->attr.kprobe_func) {
250                 func = kzalloc(KSYM_NAME_LEN, GFP_KERNEL);
251                 if (!func)
252                         return -ENOMEM;
253                 ret = strncpy_from_user(
254                         func, u64_to_user_ptr(p_event->attr.kprobe_func),
255                         KSYM_NAME_LEN);
256                 if (ret == KSYM_NAME_LEN)
257                         ret = -E2BIG;
258                 if (ret < 0)
259                         goto out;
260
261                 if (func[0] == '\0') {
262                         kfree(func);
263                         func = NULL;
264                 }
265         }
266
267         tp_event = create_local_trace_kprobe(
268                 func, (void *)(unsigned long)(p_event->attr.kprobe_addr),
269                 p_event->attr.probe_offset, is_retprobe);
270         if (IS_ERR(tp_event)) {
271                 ret = PTR_ERR(tp_event);
272                 goto out;
273         }
274
275         ret = perf_trace_event_init(tp_event, p_event);
276         if (ret)
277                 destroy_local_trace_kprobe(tp_event);
278 out:
279         kfree(func);
280         return ret;
281 }
282
283 void perf_kprobe_destroy(struct perf_event *p_event)
284 {
285         perf_trace_event_close(p_event);
286         perf_trace_event_unreg(p_event);
287
288         destroy_local_trace_kprobe(p_event->tp_event);
289 }
290 #endif /* CONFIG_KPROBE_EVENTS */
291
292 #ifdef CONFIG_UPROBE_EVENTS
293 int perf_uprobe_init(struct perf_event *p_event, bool is_retprobe)
294 {
295         int ret;
296         char *path = NULL;
297         struct trace_event_call *tp_event;
298
299         if (!p_event->attr.uprobe_path)
300                 return -EINVAL;
301         path = kzalloc(PATH_MAX, GFP_KERNEL);
302         if (!path)
303                 return -ENOMEM;
304         ret = strncpy_from_user(
305                 path, u64_to_user_ptr(p_event->attr.uprobe_path), PATH_MAX);
306         if (ret == PATH_MAX)
307                 return -E2BIG;
308         if (ret < 0)
309                 goto out;
310         if (path[0] == '\0') {
311                 ret = -EINVAL;
312                 goto out;
313         }
314
315         tp_event = create_local_trace_uprobe(
316                 path, p_event->attr.probe_offset, is_retprobe);
317         if (IS_ERR(tp_event)) {
318                 ret = PTR_ERR(tp_event);
319                 goto out;
320         }
321
322         /*
323          * local trace_uprobe need to hold event_mutex to call
324          * uprobe_buffer_enable() and uprobe_buffer_disable().
325          * event_mutex is not required for local trace_kprobes.
326          */
327         mutex_lock(&event_mutex);
328         ret = perf_trace_event_init(tp_event, p_event);
329         if (ret)
330                 destroy_local_trace_uprobe(tp_event);
331         mutex_unlock(&event_mutex);
332 out:
333         kfree(path);
334         return ret;
335 }
336
337 void perf_uprobe_destroy(struct perf_event *p_event)
338 {
339         mutex_lock(&event_mutex);
340         perf_trace_event_close(p_event);
341         perf_trace_event_unreg(p_event);
342         mutex_unlock(&event_mutex);
343         destroy_local_trace_uprobe(p_event->tp_event);
344 }
345 #endif /* CONFIG_UPROBE_EVENTS */
346
347 int perf_trace_add(struct perf_event *p_event, int flags)
348 {
349         struct trace_event_call *tp_event = p_event->tp_event;
350
351         if (!(flags & PERF_EF_START))
352                 p_event->hw.state = PERF_HES_STOPPED;
353
354         /*
355          * If TRACE_REG_PERF_ADD returns false; no custom action was performed
356          * and we need to take the default action of enqueueing our event on
357          * the right per-cpu hlist.
358          */
359         if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event)) {
360                 struct hlist_head __percpu *pcpu_list;
361                 struct hlist_head *list;
362
363                 pcpu_list = tp_event->perf_events;
364                 if (WARN_ON_ONCE(!pcpu_list))
365                         return -EINVAL;
366
367                 list = this_cpu_ptr(pcpu_list);
368                 hlist_add_head_rcu(&p_event->hlist_entry, list);
369         }
370
371         return 0;
372 }
373
374 void perf_trace_del(struct perf_event *p_event, int flags)
375 {
376         struct trace_event_call *tp_event = p_event->tp_event;
377
378         /*
379          * If TRACE_REG_PERF_DEL returns false; no custom action was performed
380          * and we need to take the default action of dequeueing our event from
381          * the right per-cpu hlist.
382          */
383         if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event))
384                 hlist_del_rcu(&p_event->hlist_entry);
385 }
386
387 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
388 {
389         char *raw_data;
390         int rctx;
391
392         BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
393
394         if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
395                       "perf buffer not large enough"))
396                 return NULL;
397
398         *rctxp = rctx = perf_swevent_get_recursion_context();
399         if (rctx < 0)
400                 return NULL;
401
402         if (regs)
403                 *regs = this_cpu_ptr(&__perf_regs[rctx]);
404         raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
405
406         /* zero the dead bytes from align to not leak stack to user */
407         memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
408         return raw_data;
409 }
410 EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
411 NOKPROBE_SYMBOL(perf_trace_buf_alloc);
412
413 void perf_trace_buf_update(void *record, u16 type)
414 {
415         struct trace_entry *entry = record;
416         int pc = preempt_count();
417         unsigned long flags;
418
419         local_save_flags(flags);
420         tracing_generic_entry_update(entry, flags, pc);
421         entry->type = type;
422 }
423 NOKPROBE_SYMBOL(perf_trace_buf_update);
424
425 #ifdef CONFIG_FUNCTION_TRACER
426 static void
427 perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
428                           struct ftrace_ops *ops, struct pt_regs *pt_regs)
429 {
430         struct ftrace_entry *entry;
431         struct perf_event *event;
432         struct hlist_head head;
433         struct pt_regs regs;
434         int rctx;
435
436         if ((unsigned long)ops->private != smp_processor_id())
437                 return;
438
439         event = container_of(ops, struct perf_event, ftrace_ops);
440
441         /*
442          * @event->hlist entry is NULL (per INIT_HLIST_NODE), and all
443          * the perf code does is hlist_for_each_entry_rcu(), so we can
444          * get away with simply setting the @head.first pointer in order
445          * to create a singular list.
446          */
447         head.first = &event->hlist_entry;
448
449 #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
450                     sizeof(u64)) - sizeof(u32))
451
452         BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
453
454         memset(&regs, 0, sizeof(regs));
455         perf_fetch_caller_regs(&regs);
456
457         entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
458         if (!entry)
459                 return;
460
461         entry->ip = ip;
462         entry->parent_ip = parent_ip;
463         perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
464                               1, &regs, &head, NULL);
465
466 #undef ENTRY_SIZE
467 }
468
469 static int perf_ftrace_function_register(struct perf_event *event)
470 {
471         struct ftrace_ops *ops = &event->ftrace_ops;
472
473         ops->flags   = FTRACE_OPS_FL_RCU;
474         ops->func    = perf_ftrace_function_call;
475         ops->private = (void *)(unsigned long)nr_cpu_ids;
476
477         return register_ftrace_function(ops);
478 }
479
480 static int perf_ftrace_function_unregister(struct perf_event *event)
481 {
482         struct ftrace_ops *ops = &event->ftrace_ops;
483         int ret = unregister_ftrace_function(ops);
484         ftrace_free_filter(ops);
485         return ret;
486 }
487
488 int perf_ftrace_event_register(struct trace_event_call *call,
489                                enum trace_reg type, void *data)
490 {
491         struct perf_event *event = data;
492
493         switch (type) {
494         case TRACE_REG_REGISTER:
495         case TRACE_REG_UNREGISTER:
496                 break;
497         case TRACE_REG_PERF_REGISTER:
498         case TRACE_REG_PERF_UNREGISTER:
499                 return 0;
500         case TRACE_REG_PERF_OPEN:
501                 return perf_ftrace_function_register(data);
502         case TRACE_REG_PERF_CLOSE:
503                 return perf_ftrace_function_unregister(data);
504         case TRACE_REG_PERF_ADD:
505                 event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id();
506                 return 1;
507         case TRACE_REG_PERF_DEL:
508                 event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids;
509                 return 1;
510         }
511
512         return -EINVAL;
513 }
514 #endif /* CONFIG_FUNCTION_TRACER */