int i, count;
unsigned long cookie = 0;
off_t offset;
+ struct op_entry entry;
struct op_sample *sample;
- sample = op_cpu_buffer_read_entry(cpu);
+ sample = op_cpu_buffer_read_entry(&entry, cpu);
if (!sample)
return;
pc = sample->eip;
count = IBS_OP_CODE_SIZE; /*IBS OP is 5 int64s*/
for (i = 0; i < count; i++) {
- sample = op_cpu_buffer_read_entry(cpu);
+ sample = op_cpu_buffer_read_entry(&entry, cpu);
if (!sample)
return;
add_event_entry(sample->eip);
sync_buffer_state state = sb_buffer_start;
unsigned int i;
unsigned long available;
+ struct op_entry entry;
+ struct op_sample *sample;
mutex_lock(&buffer_mutex);
available = op_cpu_buffer_entries(cpu);
for (i = 0; i < available; ++i) {
- struct op_sample *s = op_cpu_buffer_read_entry(cpu);
- if (!s)
+ sample = op_cpu_buffer_read_entry(&entry, cpu);
+ if (!sample)
break;
- if (is_code(s->eip)) {
- switch (s->event) {
+ if (is_code(sample->eip)) {
+ switch (sample->event) {
case 0:
case CPU_IS_KERNEL:
/* kernel/userspace switch */
- in_kernel = s->event;
+ in_kernel = sample->event;
if (state == sb_buffer_start)
state = sb_sample_start;
- add_kernel_ctx_switch(s->event);
+ add_kernel_ctx_switch(sample->event);
break;
case CPU_TRACE_BEGIN:
state = sb_bt_start;
default:
/* userspace context switch */
oldmm = mm;
- new = (struct task_struct *)s->event;
+ new = (struct task_struct *)sample->event;
release_mm(oldmm);
mm = take_tasks_mm(new);
if (mm != oldmm)
/* ignore sample */
continue;
- if (add_sample(mm, s, in_kernel))
+ if (add_sample(mm, sample, in_kernel))
continue;
/* ignore backtraces if failed to add a sample */
entry->irq_flags);
}
-struct op_sample *op_cpu_buffer_read_entry(int cpu)
+struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
{
struct ring_buffer_event *e;
e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
if (e)
- return ring_buffer_event_data(e);
+ goto event;
if (ring_buffer_swap_cpu(op_ring_buffer_read,
op_ring_buffer_write,
cpu))
return NULL;
e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
if (e)
- return ring_buffer_event_data(e);
+ goto event;
return NULL;
+
+event:
+ entry->event = e;
+ entry->sample = ring_buffer_event_data(e);
+ entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
+ / sizeof(entry->sample->data[0]);
+ entry->data = entry->sample->data;
+ return entry->sample;
}
unsigned long op_cpu_buffer_entries(int cpu)
struct op_sample
*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size);
int op_cpu_buffer_write_commit(struct op_entry *entry);
-struct op_sample *op_cpu_buffer_read_entry(int cpu);
+struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu);
unsigned long op_cpu_buffer_entries(int cpu);
/* transient events for the CPU buffer -> event buffer */