linux/tools/perf/util/header.h
Xiao Guangrong bcf6edcd6f perf kvm: Events analysis tool
Add 'perf kvm stat' support to analyze kvm vmexit/mmio/ioport smartly

Usage:
- kvm stat
  run a command and gather performance counter statistics, it is the alias of
  perf stat

- trace kvm events:
  perf kvm stat record, or, if other tracepoints are interesting as well, we
  can append the events like this:
  perf kvm stat record -e timer:* -a

  If many guests are running, we can track the specified guest by using -p or
  --pid, -a is used to track events generated by all guests.

- show the result:
  perf kvm stat report

The output example is following:
13005
13059

total 2 guests are running on the host

Then, track the guest whose pid is 13059:
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.253 MB perf.data.guest (~11065 samples) ]

See the vmexit events:

Analyze events for all VCPUs:

             VM-EXIT    Samples  Samples%     Time%         Avg time

         APIC_ACCESS        460    70.55%     0.01%     22.44us ( +-   1.75% )
                 HLT         93    14.26%    99.98% 832077.26us ( +-  10.42% )
  EXTERNAL_INTERRUPT         64     9.82%     0.00%     35.35us ( +-  14.21% )
   PENDING_INTERRUPT         24     3.68%     0.00%      9.29us ( +-  31.39% )
           CR_ACCESS          7     1.07%     0.00%      8.12us ( +-   5.76% )
      IO_INSTRUCTION          3     0.46%     0.00%     18.00us ( +-  11.79% )
       EXCEPTION_NMI          1     0.15%     0.00%      5.83us ( +-   -nan% )

Total Samples:652, Total events handled time:77396109.80us.

See the mmio events:

Analyze events for all VCPUs:

         MMIO Access    Samples  Samples%     Time%         Avg time

        0xfee00380:W        387    84.31%    79.28%      8.29us ( +-   3.32% )
        0xfee00300:W         24     5.23%     9.96%     16.79us ( +-   1.97% )
        0xfee00300:R         24     5.23%     7.83%     13.20us ( +-   3.00% )
        0xfee00310:W         24     5.23%     2.93%      4.94us ( +-   3.84% )

Total Samples:459, Total events handled time:4044.59us.

See the ioport event:

Analyze events for all VCPUs:

      IO Port Access    Samples  Samples%     Time%         Avg time

         0xc050:POUT          3   100.00%   100.00%     13.75us ( +-  10.83% )

Total Samples:3, Total events handled time:41.26us.

And, --vcpu is used to track the specified vcpu and --key is used to sort the
result:

Analyze events for VCPU 0:

             VM-EXIT    Samples  Samples%     Time%         Avg time

                 HLT         27    13.85%    99.97% 405790.24us ( +-  12.70% )
  EXTERNAL_INTERRUPT         13     6.67%     0.00%     27.94us ( +-  22.26% )
         APIC_ACCESS        146    74.87%     0.03%     21.69us ( +-   2.91% )
      IO_INSTRUCTION          2     1.03%     0.00%     17.77us ( +-  20.56% )
           CR_ACCESS          2     1.03%     0.00%      8.55us ( +-   6.47% )
   PENDING_INTERRUPT          5     2.56%     0.00%      6.27us ( +-   3.94% )

Total Samples:195, Total events handled time:10959950.90us.

Signed-off-by: Dong Hao <haodong@linux.vnet.ibm.com>
Signed-off-by: Runzhen Wang <runzhen@linux.vnet.ibm.com>
[ Dong Hao <haodong@linux.vnet.ibm.com>
  Runzhen Wang <runzhen@linux.vnet.ibm.com>:
     - rebase it on current acme's tree
     - fix the compiling-error on i386 ]
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Avi Kivity <avi@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: kvm@vger.kernel.org
Cc: Runzhen Wang <runzhen@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/1347870675-31495-4-git-send-email-haodong@linux.vnet.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-09-21 12:51:22 -03:00

141 lines
3.8 KiB
C

#ifndef __PERF_HEADER_H
#define __PERF_HEADER_H
#include "../../../include/linux/perf_event.h"
#include <sys/types.h>
#include <stdbool.h>
#include "types.h"
#include "event.h"
#include <linux/bitmap.h>
enum {
HEADER_RESERVED = 0, /* always cleared */
HEADER_FIRST_FEATURE = 1,
HEADER_TRACING_DATA = 1,
HEADER_BUILD_ID,
HEADER_HOSTNAME,
HEADER_OSRELEASE,
HEADER_VERSION,
HEADER_ARCH,
HEADER_NRCPUS,
HEADER_CPUDESC,
HEADER_CPUID,
HEADER_TOTAL_MEM,
HEADER_CMDLINE,
HEADER_EVENT_DESC,
HEADER_CPU_TOPOLOGY,
HEADER_NUMA_TOPOLOGY,
HEADER_BRANCH_STACK,
HEADER_PMU_MAPPINGS,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};
struct perf_file_section {
u64 offset;
u64 size;
};
struct perf_file_header {
u64 magic;
u64 size;
u64 attr_size;
struct perf_file_section attrs;
struct perf_file_section data;
struct perf_file_section event_types;
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
};
struct perf_pipe_file_header {
u64 magic;
u64 size;
};
struct perf_header;
int perf_file_header__read(struct perf_file_header *header,
struct perf_header *ph, int fd);
struct perf_header {
int frozen;
bool needs_swap;
s64 attr_offset;
u64 data_offset;
u64 data_size;
u64 event_offset;
u64 event_size;
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
};
struct perf_evlist;
struct perf_session;
int perf_session__read_header(struct perf_session *session, int fd);
int perf_session__write_header(struct perf_session *session,
struct perf_evlist *evlist,
int fd, bool at_exit);
int perf_header__write_pipe(int fd);
int perf_header__push_event(u64 id, const char *name);
char *perf_header__find_event(u64 id);
void perf_header__set_feat(struct perf_header *header, int feat);
void perf_header__clear_feat(struct perf_header *header, int feat);
bool perf_header__has_feat(const struct perf_header *header, int feat);
int perf_header__set_cmdline(int argc, const char **argv);
int perf_header__process_sections(struct perf_header *header, int fd,
void *data,
int (*process)(struct perf_file_section *section,
struct perf_header *ph,
int feat, int fd, void *data));
int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full);
char *perf_header__read_feature(struct perf_session *session, int feat);
int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
const char *name, bool is_kallsyms, bool is_vdso);
int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
int perf_event__synthesize_attr(struct perf_tool *tool,
struct perf_event_attr *attr, u32 ids, u64 *id,
perf_event__handler_t process);
int perf_event__synthesize_attrs(struct perf_tool *tool,
struct perf_session *session,
perf_event__handler_t process);
int perf_event__process_attr(union perf_event *event, struct perf_evlist **pevlist);
int perf_event__synthesize_event_type(struct perf_tool *tool,
u64 event_id, char *name,
perf_event__handler_t process,
struct machine *machine);
int perf_event__synthesize_event_types(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine);
int perf_event__process_event_type(struct perf_tool *tool,
union perf_event *event);
int perf_event__synthesize_tracing_data(struct perf_tool *tool,
int fd, struct perf_evlist *evlist,
perf_event__handler_t process);
int perf_event__process_tracing_data(union perf_event *event,
struct perf_session *session);
int perf_event__synthesize_build_id(struct perf_tool *tool,
struct dso *pos, u16 misc,
perf_event__handler_t process,
struct machine *machine);
int perf_event__process_build_id(struct perf_tool *tool,
union perf_event *event,
struct perf_session *session);
/*
* arch specific callback
*/
int get_cpuid(char *buffer, size_t sz);
#endif /* __PERF_HEADER_H */