|
|
d5df0a |
commit 6d692179cb44e68a3cfaeac213e3244f858676b8
|
|
|
d5df0a |
Author: Andi Kleen <ak@linux.intel.com>
|
|
|
d5df0a |
Date: Wed Jul 16 08:03:54 2014 -0500
|
|
|
d5df0a |
|
|
|
d5df0a |
Add oprofile support for Broadwell microarchitecture
|
|
|
d5df0a |
|
|
|
d5df0a |
This patch adds the event list of the Intel Broadwell architecture.
|
|
|
d5df0a |
Hopefully this can still make 1.0
|
|
|
d5df0a |
|
|
|
d5df0a |
The patch is very straight forward: just add the model numbers and
|
|
|
d5df0a |
type in the usual places and add the event list.
|
|
|
d5df0a |
|
|
|
d5df0a |
Passes make check
|
|
|
d5df0a |
|
|
|
d5df0a |
Some notes:
|
|
|
d5df0a |
- Haswell included one Broadwell model number by mistake. I moved
|
|
|
d5df0a |
that to Broadwell now.
|
|
|
d5df0a |
- oprofile doesn't support umask sub events with different counter
|
|
|
d5df0a |
constraints than other events. This affects a few events on Broadwell.
|
|
|
d5df0a |
However it's not a problem when oprofile uses perf as a backend,
|
|
|
d5df0a |
as perf will know how to schedule these events (once it gets the
|
|
|
d5df0a |
Broadwell support). It won't work correctly with the old driver.
|
|
|
d5df0a |
Most of these events are not too useful for sampling, so in practice
|
|
|
d5df0a |
it's not a real problem.
|
|
|
d5df0a |
- As usual PEBS events and events with offcore mask and uncore
|
|
|
d5df0a |
events are missing.
|
|
|
d5df0a |
|
|
|
d5df0a |
Signed-off-by: Andi Kleen <ak@linux.intel.com>
|
|
|
d5df0a |
|
|
|
d5df0a |
diff --git a/events/Makefile.am b/events/Makefile.am
|
|
|
d5df0a |
index 3e43d10..f6fd3d7 100644
|
|
|
d5df0a |
--- a/events/Makefile.am
|
|
|
d5df0a |
+++ b/events/Makefile.am
|
|
|
d5df0a |
@@ -21,6 +21,7 @@ event_files = \
|
|
|
d5df0a |
i386/sandybridge/events i386/sandybridge/unit_masks \
|
|
|
d5df0a |
i386/ivybridge/events i386/ivybridge/unit_masks \
|
|
|
d5df0a |
i386/haswell/events i386/haswell/unit_masks \
|
|
|
d5df0a |
+ i386/broadwell/events i386/broadwell/unit_masks \
|
|
|
d5df0a |
i386/silvermont/events i386/silvermont/unit_masks \
|
|
|
d5df0a |
ia64/ia64/events ia64/ia64/unit_masks \
|
|
|
d5df0a |
ia64/itanium2/events ia64/itanium2/unit_masks \
|
|
|
d5df0a |
diff --git a/events/i386/broadwell/events b/events/i386/broadwell/events
|
|
|
d5df0a |
new file mode 100644
|
|
|
d5df0a |
index 0000000..6a4b388
|
|
|
d5df0a |
--- /dev/null
|
|
|
d5df0a |
+++ b/events/i386/broadwell/events
|
|
|
d5df0a |
@@ -0,0 +1,65 @@
|
|
|
d5df0a |
+#
|
|
|
d5df0a |
+# Intel "Broadwell" microarchitecture core events.
|
|
|
d5df0a |
+#
|
|
|
d5df0a |
+# See http://ark.intel.com/ for help in identifying Broadwell based CPUs
|
|
|
d5df0a |
+#
|
|
|
d5df0a |
+# Note the minimum counts are not discovered experimentally and could be likely
|
|
|
d5df0a |
+# lowered in many cases without ill effect.
|
|
|
d5df0a |
+#
|
|
|
d5df0a |
+include:i386/arch_perfmon
|
|
|
d5df0a |
+event:0x03 counters:cpuid um:ld_blocks minimum:100003 name:ld_blocks :
|
|
|
d5df0a |
+event:0x05 counters:cpuid um:misalign_mem_ref minimum:2000003 name:misalign_mem_ref :
|
|
|
d5df0a |
+event:0x07 counters:cpuid um:one minimum:100003 name:ld_blocks_partial_address_alias :
|
|
|
d5df0a |
+event:0x08 counters:cpuid um:dtlb_load_misses minimum:2000003 name:dtlb_load_misses :
|
|
|
d5df0a |
+event:0x0d counters:cpuid um:x03 minimum:2000003 name:int_misc_recovery_cycles :
|
|
|
d5df0a |
+event:0x0e counters:cpuid um:uops_issued minimum:2000003 name:uops_issued :
|
|
|
d5df0a |
+event:0x14 counters:cpuid um:one minimum:2000003 name:arith_fpu_div_active :
|
|
|
d5df0a |
+event:0x24 counters:cpuid um:l2_rqsts minimum:200003 name:l2_rqsts :
|
|
|
d5df0a |
+event:0x27 counters:cpuid um:x50 minimum:200003 name:l2_demand_rqsts_wb_hit :
|
|
|
d5df0a |
+event:0x48 counters:2 um:l1d_pend_miss minimum:2000003 name:l1d_pend_miss :
|
|
|
d5df0a |
+event:0x49 counters:cpuid um:dtlb_store_misses minimum:100003 name:dtlb_store_misses :
|
|
|
d5df0a |
+event:0x4c counters:cpuid um:x02 minimum:100003 name:load_hit_pre_hw_pf :
|
|
|
d5df0a |
+event:0x4f counters:cpuid um:x10 minimum:2000003 name:ept_walk_cycles :
|
|
|
d5df0a |
+event:0x51 counters:cpuid um:one minimum:2000003 name:l1d_replacement :
|
|
|
d5df0a |
+event:0x54 counters:cpuid um:tx_mem minimum:2000003 name:tx_mem :
|
|
|
d5df0a |
+event:0x58 counters:cpuid um:move_elimination minimum:1000003 name:move_elimination :
|
|
|
d5df0a |
+event:0x5c counters:cpuid um:cpl_cycles minimum:2000003 name:cpl_cycles :
|
|
|
d5df0a |
+event:0x5d counters:cpuid um:tx_exec minimum:2000003 name:tx_exec :
|
|
|
d5df0a |
+event:0x5e counters:cpuid um:rs_events minimum:2000003 name:rs_events :
|
|
|
d5df0a |
+event:0x60 counters:cpuid um:offcore_requests_outstanding minimum:2000003 name:offcore_requests_outstanding :
|
|
|
d5df0a |
+event:0x63 counters:cpuid um:lock_cycles minimum:2000003 name:lock_cycles :
|
|
|
d5df0a |
+event:0x79 counters:0,1,2,3 um:idq minimum:2000003 name:idq :
|
|
|
d5df0a |
+event:0x80 counters:cpuid um:x02 minimum:200003 name:icache_misses :
|
|
|
d5df0a |
+event:0x85 counters:cpuid um:itlb_misses minimum:100003 name:itlb_misses :
|
|
|
d5df0a |
+event:0x87 counters:cpuid um:one minimum:2000003 name:ild_stall_lcp :
|
|
|
d5df0a |
+event:0x88 counters:cpuid um:br_inst_exec minimum:200003 name:br_inst_exec :
|
|
|
d5df0a |
+event:0x89 counters:cpuid um:br_misp_exec minimum:200003 name:br_misp_exec :
|
|
|
d5df0a |
+event:0x9c counters:0,1,2,3 um:idq_uops_not_delivered minimum:2000003 name:idq_uops_not_delivered :
|
|
|
d5df0a |
+event:0xa1 counters:cpuid um:uops_executed_port minimum:2000003 name:uops_executed_port :
|
|
|
d5df0a |
+event:0xa1 counters:cpuid um:uops_dispatched_port minimum:2000003 name:uops_dispatched_port :
|
|
|
d5df0a |
+event:0xa2 counters:cpuid um:resource_stalls minimum:2000003 name:resource_stalls :
|
|
|
d5df0a |
+event:0xa3 counters:2 um:cycle_activity minimum:2000003 name:cycle_activity :
|
|
|
d5df0a |
+event:0xa8 counters:cpuid um:lsd minimum:2000003 name:lsd :
|
|
|
d5df0a |
+event:0xab counters:cpuid um:x02 minimum:2000003 name:dsb2mite_switches_penalty_cycles :
|
|
|
d5df0a |
+event:0xae counters:cpuid um:one minimum:100007 name:itlb_itlb_flush :
|
|
|
d5df0a |
+event:0xb0 counters:cpuid um:offcore_requests minimum:100003 name:offcore_requests :
|
|
|
d5df0a |
+event:0xb1 counters:cpuid um:uops_executed minimum:2000003 name:uops_executed :
|
|
|
d5df0a |
+event:0xbc counters:0,1,2,3 um:page_walker_loads minimum:2000003 name:page_walker_loads :
|
|
|
d5df0a |
+event:0xc0 counters:1 um:inst_retired minimum:2000003 name:inst_retired :
|
|
|
d5df0a |
+event:0xc1 counters:cpuid um:other_assists minimum:100003 name:other_assists :
|
|
|
d5df0a |
+event:0xc2 counters:cpuid um:uops_retired minimum:2000003 name:uops_retired :
|
|
|
d5df0a |
+event:0xc3 counters:cpuid um:machine_clears minimum:2000003 name:machine_clears :
|
|
|
d5df0a |
+event:0xc4 counters:cpuid um:br_inst_retired minimum:400009 name:br_inst_retired :
|
|
|
d5df0a |
+event:0xc5 counters:cpuid um:br_misp_retired minimum:400009 name:br_misp_retired :
|
|
|
d5df0a |
+event:0xc8 counters:cpuid um:hle_retired minimum:2000003 name:hle_retired :
|
|
|
d5df0a |
+event:0xc9 counters:0,1,2,3 um:rtm_retired minimum:2000003 name:rtm_retired :
|
|
|
d5df0a |
+event:0xca counters:cpuid um:fp_assist minimum:100003 name:fp_assist :
|
|
|
d5df0a |
+event:0xcc counters:cpuid um:x20 minimum:2000003 name:rob_misc_events_lbr_inserts :
|
|
|
d5df0a |
+event:0xd0 counters:0,1,2,3 um:mem_uops_retired minimum:2000003 name:mem_uops_retired :
|
|
|
d5df0a |
+event:0xd1 counters:0,1,2,3 um:mem_load_uops_retired minimum:2000003 name:mem_load_uops_retired :
|
|
|
d5df0a |
+event:0xd2 counters:0,1,2,3 um:mem_load_uops_l3_hit_retired minimum:100003 name:mem_load_uops_l3_hit_retired :
|
|
|
d5df0a |
+event:0xd3 counters:0,1,2,3 um:one minimum:100007 name:mem_load_uops_l3_miss_retired_local_dram :
|
|
|
d5df0a |
+event:0xe6 counters:cpuid um:x1f minimum:100003 name:baclears_any :
|
|
|
d5df0a |
+event:0xf0 counters:cpuid um:l2_trans minimum:200003 name:l2_trans :
|
|
|
d5df0a |
+event:0xf1 counters:cpuid um:l2_lines_in minimum:100003 name:l2_lines_in :
|
|
|
d5df0a |
+event:0xf2 counters:cpuid um:x05 minimum:100003 name:l2_lines_out_demand_clean :
|
|
|
d5df0a |
diff --git a/events/i386/broadwell/unit_masks b/events/i386/broadwell/unit_masks
|
|
|
d5df0a |
new file mode 100644
|
|
|
d5df0a |
index 0000000..470e9e9
|
|
|
d5df0a |
--- /dev/null
|
|
|
d5df0a |
+++ b/events/i386/broadwell/unit_masks
|
|
|
d5df0a |
@@ -0,0 +1,316 @@
|
|
|
d5df0a |
+#
|
|
|
d5df0a |
+# Unit masks for the Intel "Broadwell" micro architecture
|
|
|
d5df0a |
+#
|
|
|
d5df0a |
+# See http://ark.intel.com/ for help in identifying Broadwell based CPUs
|
|
|
d5df0a |
+#
|
|
|
d5df0a |
+include:i386/arch_perfmon
|
|
|
d5df0a |
+name:x02 type:mandatory default:0x2
|
|
|
d5df0a |
+ 0x2 No unit mask
|
|
|
d5df0a |
+name:x03 type:mandatory default:0x3
|
|
|
d5df0a |
+ 0x3 No unit mask
|
|
|
d5df0a |
+name:x05 type:mandatory default:0x5
|
|
|
d5df0a |
+ 0x5 No unit mask
|
|
|
d5df0a |
+name:x10 type:mandatory default:0x10
|
|
|
d5df0a |
+ 0x10 No unit mask
|
|
|
d5df0a |
+name:x1f type:mandatory default:0x1f
|
|
|
d5df0a |
+ 0x1f No unit mask
|
|
|
d5df0a |
+name:x20 type:mandatory default:0x20
|
|
|
d5df0a |
+ 0x20 No unit mask
|
|
|
d5df0a |
+name:x50 type:mandatory default:0x50
|
|
|
d5df0a |
+ 0x50 No unit mask
|
|
|
d5df0a |
+name:ld_blocks type:exclusive default:0x2
|
|
|
d5df0a |
+ 0x2 extra: store_forward This event counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when: - preceding store conflicts with the load (incomplete overlap); - store forwarding is impossible due to u-arch limitations; - preceding lock RMW operations are not forwarded; - store has the no-forward bit set (uncacheable/page-split/masked stores); - all-blocking stores are used (mostly, fences and port I/O); and others. The most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events. See the table of not supported store forwards in the Optimization Guide.
|
|
|
d5df0a |
+ 0x8 extra: no_sr This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.
|
|
|
d5df0a |
+name:misalign_mem_ref type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: loads This event counts speculative cache-line split load uops dispatched to the L1 cache.
|
|
|
d5df0a |
+ 0x2 extra: stores This event counts speculative cache line split store-address (STA) uops dispatched to the L1 cache.
|
|
|
d5df0a |
+name:dtlb_load_misses type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: miss_causes_a_walk This event counts load misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).
|
|
|
d5df0a |
+ 0x2 extra: walk_completed_4k This event counts load misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.
|
|
|
d5df0a |
+ 0x10 extra: walk_duration This event counts the number of cycles while PMH is busy with the page walk.
|
|
|
d5df0a |
+ 0x20 extra: stlb_hit_4k Load misses that miss the DTLB and hit the STLB (4K)
|
|
|
d5df0a |
+ 0xe extra: walk_completed Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.
|
|
|
d5df0a |
+ 0x60 extra: stlb_hit Load operations that miss the first DTLB level but hit the second and do not cause page walks
|
|
|
d5df0a |
+name:uops_issued type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: any This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS).
|
|
|
d5df0a |
+ 0x10 extra: flags_merge Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.
|
|
|
d5df0a |
+ 0x20 extra: slow_lea Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.
|
|
|
d5df0a |
+ 0x40 extra: single_mul Number of Multiply packed/scalar single precision uops allocated
|
|
|
d5df0a |
+ 0x1 extra:inv stall_cycles This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.
|
|
|
d5df0a |
+name:l2_rqsts type:exclusive default:0x21
|
|
|
d5df0a |
+ 0x21 extra: demand_data_rd_miss This event counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.
|
|
|
d5df0a |
+ 0x41 extra: demand_data_rd_hit This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.
|
|
|
d5df0a |
+ 0x30 extra: l2_pf_miss This event counts the number of requests from the L2 hardware prefetchers that miss L2 cache.
|
|
|
d5df0a |
+ 0x50 extra: l2_pf_hit This event counts the number of requests from the L2 hardware prefetchers that hit L2 cache. L3 prefetch new types
|
|
|
d5df0a |
+ 0xe1 extra: all_demand_data_rd This event counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.
|
|
|
d5df0a |
+ 0xe2 extra: all_rfo This event counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.
|
|
|
d5df0a |
+ 0xe4 extra: all_code_rd This event counts the total number of L2 code requests.
|
|
|
d5df0a |
+ 0xf8 extra: all_pf This event counts the total number of requests from the L2 hardware prefetchers.
|
|
|
d5df0a |
+ 0x42 extra: rfo_hit RFO requests that hit L2 cache
|
|
|
d5df0a |
+ 0x22 extra: rfo_miss RFO requests that miss L2 cache
|
|
|
d5df0a |
+ 0x44 extra: code_rd_hit L2 cache hits when fetching instructions, code reads.
|
|
|
d5df0a |
+ 0x24 extra: code_rd_miss L2 cache misses when fetching instructions
|
|
|
d5df0a |
+ 0x27 extra: all_demand_miss Demand requests that miss L2 cache
|
|
|
d5df0a |
+ 0xe7 extra: all_demand_references Demand requests to L2 cache
|
|
|
d5df0a |
+ 0x3f extra: miss All requests that miss L2 cache
|
|
|
d5df0a |
+ 0xff extra: references All L2 requests
|
|
|
d5df0a |
+name:l1d_pend_miss type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: pending This event counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand; from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.
|
|
|
d5df0a |
+ 0x1 extra: pending_cycles This event counts duration of L1D miss outstanding in cycles.
|
|
|
d5df0a |
+name:dtlb_store_misses type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: miss_causes_a_walk This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).
|
|
|
d5df0a |
+ 0x2 extra: walk_completed_4k This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.
|
|
|
d5df0a |
+ 0x10 extra: walk_duration This event counts the number of cycles while PMH is busy with the page walk.
|
|
|
d5df0a |
+ 0x20 extra: stlb_hit_4k Store misses that miss the DTLB and hit the STLB (4K)
|
|
|
d5df0a |
+ 0xe extra: walk_completed Store misses in all DTLB levels that cause completed page walks
|
|
|
d5df0a |
+ 0x60 extra: stlb_hit Store operations that miss the first TLB level but hit the second and do not cause page walks
|
|
|
d5df0a |
+name:tx_mem type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: abort_conflict Number of times a TSX line had a cache conflict
|
|
|
d5df0a |
+ 0x2 extra: abort_capacity_write Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow
|
|
|
d5df0a |
+ 0x4 extra: abort_hle_store_to_elided_lock Number of times a TSX Abort was triggered due to a non-release/commit store to lock
|
|
|
d5df0a |
+ 0x8 extra: abort_hle_elision_buffer_not_empty Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty
|
|
|
d5df0a |
+ 0x10 extra: abort_hle_elision_buffer_mismatch Number of times a TSX Abort was triggered due to release/commit but data and address mismatch
|
|
|
d5df0a |
+ 0x20 extra: abort_hle_elision_buffer_unsupported_alignment Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer
|
|
|
d5df0a |
+ 0x40 extra: hle_elision_buffer_full Number of times we could not allocate Lock Buffer
|
|
|
d5df0a |
+name:move_elimination type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: int_eliminated Number of integer Move Elimination candidate uops that were eliminated.
|
|
|
d5df0a |
+ 0x2 extra: simd_eliminated Number of SIMD Move Elimination candidate uops that were eliminated.
|
|
|
d5df0a |
+ 0x4 extra: int_not_eliminated Number of integer Move Elimination candidate uops that were not eliminated.
|
|
|
d5df0a |
+ 0x8 extra: simd_not_eliminated Number of SIMD Move Elimination candidate uops that were not eliminated.
|
|
|
d5df0a |
+name:cpl_cycles type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: ring0 This event counts the unhalted core cycles during which the thread is in the ring 0 privileged mode.
|
|
|
d5df0a |
+ 0x2 extra: ring123 This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.
|
|
|
d5df0a |
+ 0x1 extra:edge ring0_trans This event counts when there is a transition from ring 1,2 or 3 to ring0.
|
|
|
d5df0a |
+name:tx_exec type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: misc1 Unfriendly TSX abort triggered by a flowmarker
|
|
|
d5df0a |
+ 0x2 extra: misc2 Unfriendly TSX abort triggered by a vzeroupper instruction
|
|
|
d5df0a |
+ 0x4 extra: misc3 Unfriendly TSX abort triggered by a nest count that is too deep
|
|
|
d5df0a |
+ 0x8 extra: misc4 RTM region detected inside HLE
|
|
|
d5df0a |
+ 0x10 extra: misc5 # HLE inside HLE+
|
|
|
d5df0a |
+name:rs_events type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: empty_cycles This event counts cycles during which the reservation station (RS) is empty for the thread. Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.
|
|
|
d5df0a |
+ 0x1 extra:inv,edge empty_end Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.
|
|
|
d5df0a |
+name:offcore_requests_outstanding type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: demand_data_rd This event counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS. Note: A prefetch promoted to Demand is counted from the promotion point.
|
|
|
d5df0a |
+ 0x2 extra: demand_code_rd This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The "Offcore outstanding" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.
|
|
|
d5df0a |
+ 0x4 extra: demand_rfo This event counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.
|
|
|
d5df0a |
+ 0x8 extra: all_data_rd This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.
|
|
|
d5df0a |
+ 0x1 extra: cycles_with_demand_data_rd This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).
|
|
|
d5df0a |
+ 0x8 extra: cycles_with_data_rd This event counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.
|
|
|
d5df0a |
+name:lock_cycles type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: split_lock_uc_lock_duration This event counts cycles in which the L1 and L2 are locked due to a UC lock or split lock. A lock is asserted in case of locked memory access, due to noncacheable memory, locked operation that spans two cache lines, or a page walk from the noncacheable page table. L1D and L2 locks have a very high performance penalty and it is highly recommended to avoid such access.
|
|
|
d5df0a |
+ 0x2 extra: cache_lock_duration This event counts the number of cycles when the L1D is locked. It is a superset of the 0x1 mask (BUS_LOCK_CLOCKS.BUS_LOCK_DURATION).
|
|
|
d5df0a |
+name:idq type:exclusive default:0x2
|
|
|
d5df0a |
+ 0x2 extra: empty This counts the number of cycles that the instruction decoder queue is empty and can indicate that the application may be bound in the front end. It does not determine whether there are uops being delivered to the Alloc stage since uops can be delivered by bypass skipping the Instruction Decode Queue (IDQ) when it is empty.
|
|
|
d5df0a |
+ 0x4 extra: mite_uops This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may "bypass" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).
|
|
|
d5df0a |
+ 0x8 extra: dsb_uops This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
+ 0x10 extra: ms_dsb_uops This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
+ 0x20 extra: ms_mite_uops This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
+ 0x30 extra: ms_uops This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may "bypass" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.
|
|
|
d5df0a |
+ 0x30 extra: ms_cycles This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may "bypass" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.
|
|
|
d5df0a |
+ 0x4 extra: mite_cycles This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
+ 0x8 extra: dsb_cycles This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
+ 0x10 extra: ms_dsb_cycles This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
+ 0x10 extra:edge ms_dsb_occur This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
+ 0x18 extra: all_dsb_cycles_4_uops This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
+ 0x18 extra: all_dsb_cycles_any_uops This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
+ 0x24 extra: all_mite_cycles_4_uops This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may "bypass" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).
|
|
|
d5df0a |
+ 0x24 extra: all_mite_cycles_any_uops This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may "bypass" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).
|
|
|
d5df0a |
+ 0x3c extra: mite_all_uops This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may "bypass" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).
|
|
|
d5df0a |
+ 0x30 extra:edge ms_switches Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer
|
|
|
d5df0a |
+name:itlb_misses type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: miss_causes_a_walk This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).
|
|
|
d5df0a |
+ 0x2 extra: walk_completed_4k This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.
|
|
|
d5df0a |
+ 0x10 extra: walk_duration This event counts the number of cycles while PMH is busy with the page walk.
|
|
|
d5df0a |
+ 0x20 extra: stlb_hit_4k Core misses that miss the DTLB and hit the STLB (4K)
|
|
|
d5df0a |
+ 0xe extra: walk_completed Misses in all ITLB levels that cause completed page walks
|
|
|
d5df0a |
+ 0x60 extra: stlb_hit Operations that miss the first ITLB level but hit the second and do not cause any page walks
|
|
|
d5df0a |
+name:br_inst_exec type:exclusive default:0xff
|
|
|
d5df0a |
+ 0xff extra: all_branches This event counts both taken and not taken speculative and retired branch instructions.
|
|
|
d5df0a |
+ 0x41 extra: nontaken_conditional This event counts not taken macro-conditional branch instructions.
|
|
|
d5df0a |
+ 0x81 extra: taken_conditional This event counts taken speculative and retired macro-conditional branch instructions.
|
|
|
d5df0a |
+ 0x82 extra: taken_direct_jump This event counts taken speculative and retired macro-conditional branch instructions excluding calls and indirect branches.
|
|
|
d5df0a |
+ 0x84 extra: taken_indirect_jump_non_call_ret This event counts taken speculative and retired indirect branches excluding calls and return branches.
|
|
|
d5df0a |
+ 0x88 extra: taken_indirect_near_return This event counts taken speculative and retired indirect branches that have a return mnemonic.
|
|
|
d5df0a |
+ 0x90 extra: taken_direct_near_call This event counts taken speculative and retired direct near calls.
|
|
|
d5df0a |
+ 0xa0 extra: taken_indirect_near_call This event counts taken speculative and retired indirect calls including both register and memory indirect.
|
|
|
d5df0a |
+ 0xc1 extra: all_conditional This event counts both taken and not taken speculative and retired macro-conditional branch instructions.
|
|
|
d5df0a |
+ 0xc2 extra: all_direct_jmp This event counts both taken and not taken speculative and retired macro-unconditional branch instructions, excluding calls and indirects.
|
|
|
d5df0a |
+ 0xc4 extra: all_indirect_jump_non_call_ret This event counts both taken and not taken speculative and retired indirect branches excluding calls and return branches.
|
|
|
d5df0a |
+ 0xc8 extra: all_indirect_near_return This event counts both taken and not taken speculative and retired indirect branches that have a return mnemonic.
|
|
|
d5df0a |
+ 0xd0 extra: all_direct_near_call This event counts both taken and not taken speculative and retired direct near calls.
|
|
|
d5df0a |
+name:br_misp_exec type:exclusive default:0xff
|
|
|
d5df0a |
+ 0xff extra: all_branches This event counts both taken and not taken speculative and retired mispredicted branch instructions.
|
|
|
d5df0a |
+ 0x41 extra: nontaken_conditional This event counts not taken speculative and retired mispredicted macro conditional branch instructions.
|
|
|
d5df0a |
+ 0x81 extra: taken_conditional This event counts taken speculative and retired mispredicted macro conditional branch instructions.
|
|
|
d5df0a |
+ 0x84 extra: taken_indirect_jump_non_call_ret This event counts taken speculative and retired mispredicted indirect branches excluding calls and returns.
|
|
|
d5df0a |
+ 0xc1 extra: all_conditional This event counts both taken and not taken speculative and retired mispredicted macro conditional branch instructions.
|
|
|
d5df0a |
+ 0xc4 extra: all_indirect_jump_non_call_ret This event counts both taken and not taken mispredicted indirect branches excluding calls and returns.
|
|
|
d5df0a |
+ 0xa0 extra: taken_indirect_near_call Taken speculative and retired mispredicted indirect calls
|
|
|
d5df0a |
+name:idq_uops_not_delivered type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: core This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding ?4 ? x? when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread; b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); c. Instruction Decode Queue (IDQ) delivers four uops.
|
|
|
d5df0a |
+ 0x1 extra: cycles_0_uops_deliv_core This event counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.
|
|
|
d5df0a |
+ 0x1 extra: cycles_le_1_uop_deliv_core This event counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >=3.
|
|
|
d5df0a |
+ 0x1 extra: cycles_le_2_uop_deliv_core Cycles with less than 2 uops delivered by the front end
|
|
|
d5df0a |
+ 0x1 extra: cycles_le_3_uop_deliv_core Cycles with less than 3 uops delivered by the front end
|
|
|
d5df0a |
+ 0x1 extra:inv cycles_fe_was_ok Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.
|
|
|
d5df0a |
+name:uops_executed_port type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra:any port_0_core Cycles per core when uops are exectuted in port 0
|
|
|
d5df0a |
+ 0x2 extra:any port_1_core Cycles per core when uops are exectuted in port 1
|
|
|
d5df0a |
+ 0x4 extra:any port_2_core Cycles per core when uops are dispatched to port 2
|
|
|
d5df0a |
+ 0x8 extra:any port_3_core Cycles per core when uops are dispatched to port 3
|
|
|
d5df0a |
+ 0x10 extra:any port_4_core Cycles per core when uops are exectuted in port 4
|
|
|
d5df0a |
+ 0x20 extra:any port_5_core Cycles per core when uops are exectuted in port 5
|
|
|
d5df0a |
+ 0x40 extra:any port_6_core Cycles per core when uops are exectuted in port 6
|
|
|
d5df0a |
+ 0x80 extra:any port_7_core Cycles per core when uops are dispatched to port 7
|
|
|
d5df0a |
+ 0x1 extra: port_0 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.
|
|
|
d5df0a |
+ 0x2 extra: port_1 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.
|
|
|
d5df0a |
+ 0x4 extra: port_2 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.
|
|
|
d5df0a |
+ 0x8 extra: port_3 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.
|
|
|
d5df0a |
+ 0x10 extra: port_4 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.
|
|
|
d5df0a |
+ 0x20 extra: port_5 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.
|
|
|
d5df0a |
+ 0x40 extra: port_6 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.
|
|
|
d5df0a |
+ 0x80 extra: port_7 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.
|
|
|
d5df0a |
+name:uops_dispatched_port type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: port_0 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.
|
|
|
d5df0a |
+ 0x2 extra: port_1 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.
|
|
|
d5df0a |
+ 0x4 extra: port_2 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.
|
|
|
d5df0a |
+ 0x8 extra: port_3 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.
|
|
|
d5df0a |
+ 0x10 extra: port_4 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.
|
|
|
d5df0a |
+ 0x20 extra: port_5 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.
|
|
|
d5df0a |
+ 0x40 extra: port_6 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.
|
|
|
d5df0a |
+ 0x80 extra: port_7 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.
|
|
|
d5df0a |
+name:resource_stalls type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: any This event counts resource-related stall cycles. Reasons for stalls can be as follows: - *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots) - *any* u-arch structure got empty (like INT/SIMD FreeLists) - FPU control word (FPCW), MXCSR and others. This counts cycles that the pipeline backend blocked uop delivery from the front end.
|
|
|
d5df0a |
+ 0x4 extra: rs This event counts stall cycles caused by absence of eligible entries in the reservation station (RS). This may result from RS overflow, or from RS deallocation because of the RS array Write Port allocation scheme (each RS entry has two write ports instead of four. As a result, empty entries could not be used, although RS is not really full). This counts cycles that the pipeline backend blocked uop delivery from the front end.
|
|
|
d5df0a |
+ 0x8 extra: sb This event counts stall cycles caused by the store buffer (SB) overflow (excluding draining from synch). This counts cycles that the pipeline backend blocked uop delivery from the front end.
|
|
|
d5df0a |
+ 0x10 extra: rob This event counts ROB full stall cycles. This counts cycles that the pipeline backend blocked uop delivery from the front end.
|
|
|
d5df0a |
+name:cycle_activity type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: cycles_l2_pending Counts number of cycles the CPU has at least one pending demand* load request missing the L2 cache.
|
|
|
d5df0a |
+ 0x8 extra: cycles_l1d_pending Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.
|
|
|
d5df0a |
+ 0x2 extra: cycles_ldm_pending Counts number of cycles the CPU has at least one pending demand load request (that is cycles with non-completed load waiting for its data from memory subsystem)
|
|
|
d5df0a |
+ 0x4 extra: cycles_no_execute Counts number of cycles nothing is executed on any execution port.
|
|
|
d5df0a |
+ 0x5 extra: stalls_l2_pending Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand* load request missing the L2 cache. (as a footprint) * includes also L1 HW prefetch requests that may or may not be required by demands
|
|
|
d5df0a |
+ 0x6 extra: stalls_ldm_pending Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request.
|
|
|
d5df0a |
+ 0xc extra: stalls_l1d_pending Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request missing the L1 data cache.
|
|
|
d5df0a |
+ 0x8 extra: cycles_l1d_miss Cycles while L1 cache miss demand load is outstanding.
|
|
|
d5df0a |
+ 0x1 extra: cycles_l2_miss Cycles while L2 cache miss demand load is outstanding.
|
|
|
d5df0a |
+ 0x2 extra: cycles_mem_any Cycles while memory subsystem has an outstanding load.
|
|
|
d5df0a |
+ 0x4 extra: stalls_total Total execution stalls.
|
|
|
d5df0a |
+ 0xc extra: stalls_l1d_miss Execution stalls while L1 cache miss demand load is outstanding.
|
|
|
d5df0a |
+ 0x5 extra: stalls_l2_miss Execution stalls while L2 cache miss demand load is outstanding.
|
|
|
d5df0a |
+ 0x6 extra: stalls_mem_any Execution stalls while memory subsystem has an outstanding load.
|
|
|
d5df0a |
+name:lsd type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: uops Number of Uops delivered by the LSD. Read more on LSD under LSD_REPLAY.REPLAY
|
|
|
d5df0a |
+ 0x1 extra: cycles_4_uops Cycles 4 Uops delivered by the LSD, but didn't come from the decoder
|
|
|
d5df0a |
+ 0x1 extra: cycles_active Cycles Uops delivered by the LSD, but didn't come from the decoder
|
|
|
d5df0a |
+name:offcore_requests type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: demand_data_rd This event counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.
|
|
|
d5df0a |
+ 0x2 extra: demand_code_rd This event counts both cacheable and noncachaeble code read requests.
|
|
|
d5df0a |
+ 0x4 extra: demand_rfo This event counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.
|
|
|
d5df0a |
+ 0x8 extra: all_data_rd This event counts the demand and prefetch data reads. All Core Data Reads include cacheable "Demands" and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.
|
|
|
d5df0a |
+name:uops_executed type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: thread Number of uops to be executed per-thread each cycle.
|
|
|
d5df0a |
+ 0x2 extra: core Number of uops executed from any thread
|
|
|
d5df0a |
+ 0x1 extra:inv stall_cycles This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.
|
|
|
d5df0a |
+ 0x1 extra: cycles_ge_1_uop_exec Cycles where at least 1 uop was executed per-thread
|
|
|
d5df0a |
+ 0x1 extra: cycles_ge_2_uops_exec Cycles where at least 2 uops were executed per-thread
|
|
|
d5df0a |
+ 0x1 extra: cycles_ge_3_uops_exec Cycles where at least 3 uops were executed per-thread
|
|
|
d5df0a |
+ 0x1 extra: cycles_ge_4_uops_exec Cycles where at least 4 uops were executed per-thread
|
|
|
d5df0a |
+name:page_walker_loads type:exclusive default:0x11
|
|
|
d5df0a |
+ 0x11 extra: dtlb_l1 Number of DTLB page walker hits in the L1+FB
|
|
|
d5df0a |
+ 0x21 extra: itlb_l1 Number of ITLB page walker hits in the L1+FB
|
|
|
d5df0a |
+ 0x12 extra: dtlb_l2 Number of DTLB page walker hits in the L2
|
|
|
d5df0a |
+ 0x22 extra: itlb_l2 Number of ITLB page walker hits in the L2
|
|
|
d5df0a |
+ 0x14 extra: dtlb_l3 Number of DTLB page walker hits in the L3 + XSNP
|
|
|
d5df0a |
+ 0x24 extra: itlb_l3 Number of ITLB page walker hits in the L3 + XSNP
|
|
|
d5df0a |
+ 0x18 extra: dtlb_memory Number of DTLB page walker hits in Memory
|
|
|
d5df0a |
+name:inst_retired type:exclusive default:0x2
|
|
|
d5df0a |
+ 0x2 extra: x87 This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.
|
|
|
d5df0a |
+ 0x1 extra: prec_dist This is a precise version (that is, uses PEBS) of the event that counts instructions retired.
|
|
|
d5df0a |
+name:other_assists type:exclusive default:0x8
|
|
|
d5df0a |
+ 0x8 extra: avx_to_sse This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.
|
|
|
d5df0a |
+ 0x10 extra: sse_to_avx This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.
|
|
|
d5df0a |
+ 0x40 extra: any_wb_assist Number of times any microcode assist is invoked by HW upon uop writeback.
|
|
|
d5df0a |
+name:uops_retired type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: all This is a non-precise version (that is, does not use PEBS) of the event that counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.
|
|
|
d5df0a |
+ 0x2 extra: retire_slots This is a non-precise version (that is, does not use PEBS) of the event that counts the number of retirement slots used.
|
|
|
d5df0a |
+ 0x1 extra:inv stall_cycles This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.
|
|
|
d5df0a |
+ 0x1 extra:inv total_cycles Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.
|
|
|
d5df0a |
+name:machine_clears type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: cycles This event counts both thread-specific (TS) and all-thread (AT) nukes.
|
|
|
d5df0a |
+ 0x2 extra: memory_ordering This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following: 1. memory disambiguation, 2. external snoop, or 3. cross SMT-HW-thread snoop (stores) hitting load buffer.
|
|
|
d5df0a |
+ 0x4 extra: smc This event counts self-modifying code (SMC) detected, which causes a machine clear.
|
|
|
d5df0a |
+ 0x20 extra: maskmov Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.
|
|
|
d5df0a |
+ 0x1 extra:edge count Number of machine clears (nukes) of any type.
|
|
|
d5df0a |
+name:br_inst_retired type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: conditional This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.
|
|
|
d5df0a |
+ 0x2 extra: near_call This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.
|
|
|
d5df0a |
+ 0x8 extra: near_return This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired.
|
|
|
d5df0a |
+ 0x10 extra: not_taken This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.
|
|
|
d5df0a |
+ 0x20 extra: near_taken This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired.
|
|
|
d5df0a |
+ 0x40 extra: far_branch This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.
|
|
|
d5df0a |
+ 0x4 extra: all_branches_pebs This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.
|
|
|
d5df0a |
+name:br_misp_retired type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: conditional This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.
|
|
|
d5df0a |
+ 0x4 extra: all_branches_pebs This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.
|
|
|
d5df0a |
+ 0x20 extra: near_taken number of near branch instructions retired that were mispredicted and taken.
|
|
|
d5df0a |
+name:hle_retired type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: start Number of times we entered an HLE region; does not count nested transactions
|
|
|
d5df0a |
+ 0x2 extra: commit Number of times HLE commit succeeded
|
|
|
d5df0a |
+ 0x4 extra: aborted Number of times HLE abort was triggered
|
|
|
d5df0a |
+ 0x8 extra: aborted_misc1 Number of times an HLE abort was attributed to a Memory condition (See TSX_Memory event for additional details)
|
|
|
d5df0a |
+ 0x10 extra: aborted_misc2 Number of times the TSX watchdog signaled an HLE abort
|
|
|
d5df0a |
+ 0x20 extra: aborted_misc3 Number of times a disallowed operation caused an HLE abort
|
|
|
d5df0a |
+ 0x40 extra: aborted_misc4 Number of times HLE caused a fault
|
|
|
d5df0a |
+ 0x80 extra: aborted_misc5 Number of times HLE aborted and was not due to the abort conditions in subevents 3-6
|
|
|
d5df0a |
+name:rtm_retired type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: start Number of times we entered an RTM region; does not count nested transactions
|
|
|
d5df0a |
+ 0x2 extra: commit Number of times RTM commit succeeded
|
|
|
d5df0a |
+ 0x4 extra: aborted Number of times RTM abort was triggered
|
|
|
d5df0a |
+ 0x8 extra: aborted_misc1 Number of times an RTM abort was attributed to a Memory condition (See TSX_Memory event for additional details)
|
|
|
d5df0a |
+ 0x10 extra: aborted_misc2 Number of times the TSX watchdog signaled an RTM abort
|
|
|
d5df0a |
+ 0x20 extra: aborted_misc3 Number of times a disallowed operation caused an RTM abort
|
|
|
d5df0a |
+ 0x40 extra: aborted_misc4 Number of times a RTM caused a fault
|
|
|
d5df0a |
+ 0x80 extra: aborted_misc5 Number of times RTM aborted and was not due to the abort conditions in subevents 3-6
|
|
|
d5df0a |
+name:fp_assist type:exclusive default:0x1e
|
|
|
d5df0a |
+ 0x1e extra: any This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.
|
|
|
d5df0a |
+ 0x2 extra: x87_output This is a non-precise version (that is, does not use PEBS) of the event that counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.
|
|
|
d5df0a |
+ 0x4 extra: x87_input This is a non-precise version (that is, does not use PEBS) of the event that counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.
|
|
|
d5df0a |
+ 0x8 extra: simd_output This is a non-precise version (that is, does not use PEBS) of the event that counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.
|
|
|
d5df0a |
+ 0x10 extra: simd_input This is a non-precise version (that is, does not use PEBS) of the event that counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.
|
|
|
d5df0a |
+name:mem_uops_retired type:exclusive default:0x11
|
|
|
d5df0a |
+ 0x11 extra: stlb_miss_loads This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.
|
|
|
d5df0a |
+ 0x12 extra: stlb_miss_stores This is a non-precise version (that is, does not use PEBS) of the event that counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.
|
|
|
d5df0a |
+ 0x21 extra: lock_loads This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with locked access retired to the architected path.
|
|
|
d5df0a |
+ 0x41 extra: split_loads This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).
|
|
|
d5df0a |
+ 0x42 extra: split_stores This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).
|
|
|
d5df0a |
+ 0x81 extra: all_loads This is a non-precise version (that is, does not use PEBS) of the event that counts load uops retired to the architected path with a filter on bits 0 and 1 applied. Note: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.
|
|
|
d5df0a |
+ 0x82 extra: all_stores This is a non-precise version (that is, does not use PEBS) of the event that counts store uops retired to the architected path with a filter on bits 0 and 1 applied. Note: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement.
|
|
|
d5df0a |
+name:mem_load_uops_retired type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: l1_hit This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the nearest-level (L1) cache. Note: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source
|
|
|
d5df0a |
+ 0x2 extra: l2_hit This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.
|
|
|
d5df0a |
+ 0x4 extra: l3_hit This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.
|
|
|
d5df0a |
+ 0x8 extra: l1_miss This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.
|
|
|
d5df0a |
+ 0x10 extra: l2_miss This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.
|
|
|
d5df0a |
+ 0x20 extra: l3_miss Miss in last-level (L3) cache. Excludes Unknown data-source.
|
|
|
d5df0a |
+ 0x40 extra: hit_lfb This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready. Note: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.
|
|
|
d5df0a |
+name:mem_load_uops_l3_hit_retired type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: xsnp_miss This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.
|
|
|
d5df0a |
+ 0x2 extra: xsnp_hit This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.
|
|
|
d5df0a |
+ 0x4 extra: xsnp_hitm This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).
|
|
|
d5df0a |
+ 0x8 extra: xsnp_none This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.
|
|
|
d5df0a |
+name:l2_trans type:exclusive default:0x80
|
|
|
d5df0a |
+ 0x80 extra: all_requests This event counts transactions that access the L2 pipe including snoops, pagewalks, and so on.
|
|
|
d5df0a |
+ 0x1 extra: demand_data_rd This event counts Demand Data Read requests that access L2 cache, including rejects.
|
|
|
d5df0a |
+ 0x2 extra: rfo This event counts Read for Ownership (RFO) requests that access L2 cache.
|
|
|
d5df0a |
+ 0x4 extra: code_rd This event counts the number of L2 cache accesses when fetching instructions.
|
|
|
d5df0a |
+ 0x8 extra: all_pf This event counts L2 or L3 HW prefetches that access L2 cache including rejects.
|
|
|
d5df0a |
+ 0x10 extra: l1d_wb This event counts L1D writebacks that access L2 cache.
|
|
|
d5df0a |
+ 0x20 extra: l2_fill This event counts L2 fill requests that access L2 cache.
|
|
|
d5df0a |
+ 0x40 extra: l2_wb This event counts L2 writebacks that access L2 cache.
|
|
|
d5df0a |
+name:l2_lines_in type:exclusive default:0x7
|
|
|
d5df0a |
+ 0x7 extra: all This event counts the number of L2 cache lines filling the L2. Counting does not cover rejects.
|
|
|
d5df0a |
+ 0x1 extra: i This event counts the number of L2 cache lines in the Invalidate state filling the L2. Counting does not cover rejects.
|
|
|
d5df0a |
+ 0x2 extra: s This event counts the number of L2 cache lines in the Shared state filling the L2. Counting does not cover rejects.
|
|
|
d5df0a |
+ 0x4 extra: e This event counts the number of L2 cache lines in the Exclusive state filling the L2. Counting does not cover rejects.
|
|
|
d5df0a |
diff --git a/libop/op_cpu_type.c b/libop/op_cpu_type.c
|
|
|
d5df0a |
index 0cfb4ea..bce230a 100644
|
|
|
d5df0a |
--- a/libop/op_cpu_type.c
|
|
|
d5df0a |
+++ b/libop/op_cpu_type.c
|
|
|
d5df0a |
@@ -130,6 +130,7 @@ static struct cpu_descr const cpu_descrs[MAX_CPU_TYPE] = {
|
|
|
d5df0a |
{ "IBM Power Architected Events V1", "ppc64/architected_events_v1", CPU_PPC64_ARCH_V1, 6 },
|
|
|
d5df0a |
{ "ppc64 POWER8", "ppc64/power8", CPU_PPC64_POWER8, 6 },
|
|
|
d5df0a |
{ "Intel Silvermont microarchitecture", "i386/silvermont", CPU_SILVERMONT, 2 },
|
|
|
d5df0a |
+ { "Intel Broadwell microarchitecture", "i386/broadwell", CPU_BROADWELL, 4 },
|
|
|
d5df0a |
};
|
|
|
d5df0a |
|
|
|
d5df0a |
static size_t const nr_cpu_descrs = sizeof(cpu_descrs) / sizeof(struct cpu_descr);
|
|
|
d5df0a |
@@ -670,6 +671,7 @@ op_cpu op_cpu_base_type(op_cpu cpu_type)
|
|
|
d5df0a |
case CPU_ATOM:
|
|
|
d5df0a |
case CPU_NEHALEM:
|
|
|
d5df0a |
case CPU_HASWELL:
|
|
|
d5df0a |
+ case CPU_BROADWELL:
|
|
|
d5df0a |
case CPU_SILVERMONT:
|
|
|
d5df0a |
case CPU_WESTMERE:
|
|
|
d5df0a |
case CPU_SANDYBRIDGE:
|
|
|
d5df0a |
diff --git a/libop/op_cpu_type.h b/libop/op_cpu_type.h
|
|
|
d5df0a |
index 7c478ad..3754156 100644
|
|
|
d5df0a |
--- a/libop/op_cpu_type.h
|
|
|
d5df0a |
+++ b/libop/op_cpu_type.h
|
|
|
d5df0a |
@@ -110,6 +110,7 @@ typedef enum {
|
|
|
d5df0a |
CPU_PPC64_ARCH_V1, /** < IBM Power architected events version 1 */
|
|
|
d5df0a |
CPU_PPC64_POWER8, /**< ppc64 POWER8 family */
|
|
|
d5df0a |
CPU_SILVERMONT, /** < Intel Silvermont microarchitecture */
|
|
|
d5df0a |
+ CPU_BROADWELL, /** < Intel Broadwell (Core-M) microarchitecture */
|
|
|
d5df0a |
MAX_CPU_TYPE
|
|
|
d5df0a |
} op_cpu;
|
|
|
d5df0a |
|
|
|
d5df0a |
diff --git a/libop/op_events.c b/libop/op_events.c
|
|
|
d5df0a |
index 968ff04..9c27e6c 100644
|
|
|
d5df0a |
--- a/libop/op_events.c
|
|
|
d5df0a |
+++ b/libop/op_events.c
|
|
|
d5df0a |
@@ -1201,6 +1201,7 @@ void op_default_event(op_cpu cpu_type, struct op_default_event_descr * descr)
|
|
|
d5df0a |
case CPU_CORE_I7:
|
|
|
d5df0a |
case CPU_NEHALEM:
|
|
|
d5df0a |
case CPU_HASWELL:
|
|
|
d5df0a |
+ case CPU_BROADWELL:
|
|
|
d5df0a |
case CPU_SILVERMONT:
|
|
|
d5df0a |
case CPU_WESTMERE:
|
|
|
d5df0a |
case CPU_SANDYBRIDGE:
|
|
|
d5df0a |
diff --git a/libop/op_hw_specific.h b/libop/op_hw_specific.h
|
|
|
d5df0a |
index e86dcae..1d39692 100644
|
|
|
d5df0a |
--- a/libop/op_hw_specific.h
|
|
|
d5df0a |
+++ b/libop/op_hw_specific.h
|
|
|
d5df0a |
@@ -148,8 +148,11 @@ static inline op_cpu op_cpu_specific_type(op_cpu cpu_type)
|
|
|
d5df0a |
case 0x3f:
|
|
|
d5df0a |
case 0x45:
|
|
|
d5df0a |
case 0x46:
|
|
|
d5df0a |
- case 0x47:
|
|
|
d5df0a |
return CPU_HASWELL;
|
|
|
d5df0a |
+ case 0x3d:
|
|
|
d5df0a |
+ case 0x47:
|
|
|
d5df0a |
+ case 0x4f:
|
|
|
d5df0a |
+ return CPU_BROADWELL;
|
|
|
d5df0a |
case 0x37:
|
|
|
d5df0a |
case 0x4d:
|
|
|
d5df0a |
return CPU_SILVERMONT;
|
|
|
d5df0a |
diff --git a/utils/ophelp.c b/utils/ophelp.c
|
|
|
d5df0a |
index 35f47bc..bf3fbcb 100644
|
|
|
d5df0a |
--- a/utils/ophelp.c
|
|
|
d5df0a |
+++ b/utils/ophelp.c
|
|
|
d5df0a |
@@ -555,6 +555,7 @@ int main(int argc, char const * argv[])
|
|
|
d5df0a |
case CPU_CORE_I7:
|
|
|
d5df0a |
case CPU_NEHALEM:
|
|
|
d5df0a |
case CPU_HASWELL:
|
|
|
d5df0a |
+ case CPU_BROADWELL:
|
|
|
d5df0a |
case CPU_SILVERMONT:
|
|
|
d5df0a |
case CPU_WESTMERE:
|
|
|
d5df0a |
case CPU_SANDYBRIDGE:
|
|
|
d5df0a |
commit 5ce12ed9d20a91f19cba6e8ecadc478fcd57db6c
|
|
|
d5df0a |
Author: Andi Kleen <ak@linux.intel.com>
|
|
|
d5df0a |
Date: Thu Jul 17 12:45:09 2014 -0500
|
|
|
d5df0a |
|
|
|
d5df0a |
Fix some problems in the Broadwell events
|
|
|
d5df0a |
|
|
|
d5df0a |
Fix some problems in the previous commit of the Broadwell events.
|
|
|
d5df0a |
Most flags were missing due to a bug in the generation script.
|
|
|
d5df0a |
This patch also re-adds proper PEBS events.
|
|
|
d5df0a |
|
|
|
d5df0a |
Signed-off-by: Andi Kleen <ak@linux.intel.com>
|
|
|
d5df0a |
|
|
|
d5df0a |
diff --git a/events/i386/broadwell/events b/events/i386/broadwell/events
|
|
|
d5df0a |
index 6a4b388..ec55836 100644
|
|
|
d5df0a |
--- a/events/i386/broadwell/events
|
|
|
d5df0a |
+++ b/events/i386/broadwell/events
|
|
|
d5df0a |
@@ -58,7 +58,7 @@ event:0xcc counters:cpuid um:x20 minimum:2000003 name:rob_misc_events_lbr_insert
|
|
|
d5df0a |
event:0xd0 counters:0,1,2,3 um:mem_uops_retired minimum:2000003 name:mem_uops_retired :
|
|
|
d5df0a |
event:0xd1 counters:0,1,2,3 um:mem_load_uops_retired minimum:2000003 name:mem_load_uops_retired :
|
|
|
d5df0a |
event:0xd2 counters:0,1,2,3 um:mem_load_uops_l3_hit_retired minimum:100003 name:mem_load_uops_l3_hit_retired :
|
|
|
d5df0a |
-event:0xd3 counters:0,1,2,3 um:one minimum:100007 name:mem_load_uops_l3_miss_retired_local_dram :
|
|
|
d5df0a |
+event:0xd3 counters:0,1,2,3 um:mem_load_uops_l3_miss_retired minimum:100007 name:mem_load_uops_l3_miss_retired :
|
|
|
d5df0a |
event:0xe6 counters:cpuid um:x1f minimum:100003 name:baclears_any :
|
|
|
d5df0a |
event:0xf0 counters:cpuid um:l2_trans minimum:200003 name:l2_trans :
|
|
|
d5df0a |
event:0xf1 counters:cpuid um:l2_lines_in minimum:100003 name:l2_lines_in :
|
|
|
d5df0a |
diff --git a/events/i386/broadwell/unit_masks b/events/i386/broadwell/unit_masks
|
|
|
d5df0a |
index 470e9e9..0d6ccd5 100644
|
|
|
d5df0a |
--- a/events/i386/broadwell/unit_masks
|
|
|
d5df0a |
+++ b/events/i386/broadwell/unit_masks
|
|
|
d5df0a |
@@ -36,7 +36,7 @@ name:uops_issued type:exclusive default:0x1
|
|
|
d5df0a |
0x10 extra: flags_merge Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.
|
|
|
d5df0a |
0x20 extra: slow_lea Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.
|
|
|
d5df0a |
0x40 extra: single_mul Number of Multiply packed/scalar single precision uops allocated
|
|
|
d5df0a |
- 0x1 extra:inv stall_cycles This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.
|
|
|
d5df0a |
+ 0x1 extra:cmask=1,inv stall_cycles This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.
|
|
|
d5df0a |
name:l2_rqsts type:exclusive default:0x21
|
|
|
d5df0a |
0x21 extra: demand_data_rd_miss This event counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.
|
|
|
d5df0a |
0x41 extra: demand_data_rd_hit This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.
|
|
|
d5df0a |
@@ -56,7 +56,7 @@ name:l2_rqsts type:exclusive default:0x21
|
|
|
d5df0a |
0xff extra: references All L2 requests
|
|
|
d5df0a |
name:l1d_pend_miss type:exclusive default:0x1
|
|
|
d5df0a |
0x1 extra: pending This event counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand; from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.
|
|
|
d5df0a |
- 0x1 extra: pending_cycles This event counts duration of L1D miss outstanding in cycles.
|
|
|
d5df0a |
+ 0x1 extra:cmask=1 pending_cycles This event counts duration of L1D miss outstanding in cycles.
|
|
|
d5df0a |
name:dtlb_store_misses type:exclusive default:0x1
|
|
|
d5df0a |
0x1 extra: miss_causes_a_walk This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).
|
|
|
d5df0a |
0x2 extra: walk_completed_4k This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.
|
|
|
d5df0a |
@@ -80,7 +80,7 @@ name:move_elimination type:exclusive default:0x1
|
|
|
d5df0a |
name:cpl_cycles type:exclusive default:0x1
|
|
|
d5df0a |
0x1 extra: ring0 This event counts the unhalted core cycles during which the thread is in the ring 0 privileged mode.
|
|
|
d5df0a |
0x2 extra: ring123 This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.
|
|
|
d5df0a |
- 0x1 extra:edge ring0_trans This event counts when there is a transition from ring 1,2 or 3 to ring0.
|
|
|
d5df0a |
+ 0x1 extra:cmask=1,edge ring0_trans This event counts when there is a transition from ring 1,2 or 3 to ring0.
|
|
|
d5df0a |
name:tx_exec type:exclusive default:0x1
|
|
|
d5df0a |
0x1 extra: misc1 Unfriendly TSX abort triggered by a flowmarker
|
|
|
d5df0a |
0x2 extra: misc2 Unfriendly TSX abort triggered by a vzeroupper instruction
|
|
|
d5df0a |
@@ -89,14 +89,14 @@ name:tx_exec type:exclusive default:0x1
|
|
|
d5df0a |
0x10 extra: misc5 # HLE inside HLE+
|
|
|
d5df0a |
name:rs_events type:exclusive default:0x1
|
|
|
d5df0a |
0x1 extra: empty_cycles This event counts cycles during which the reservation station (RS) is empty for the thread. Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.
|
|
|
d5df0a |
- 0x1 extra:inv,edge empty_end Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.
|
|
|
d5df0a |
+ 0x1 extra:cmask=1,inv,edge empty_end Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.
|
|
|
d5df0a |
name:offcore_requests_outstanding type:exclusive default:0x1
|
|
|
d5df0a |
0x1 extra: demand_data_rd This event counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS. Note: A prefetch promoted to Demand is counted from the promotion point.
|
|
|
d5df0a |
0x2 extra: demand_code_rd This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The "Offcore outstanding" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.
|
|
|
d5df0a |
0x4 extra: demand_rfo This event counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.
|
|
|
d5df0a |
0x8 extra: all_data_rd This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.
|
|
|
d5df0a |
- 0x1 extra: cycles_with_demand_data_rd This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).
|
|
|
d5df0a |
- 0x8 extra: cycles_with_data_rd This event counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.
|
|
|
d5df0a |
+ 0x1 extra:cmask=1 cycles_with_demand_data_rd This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).
|
|
|
d5df0a |
+ 0x8 extra:cmask=1 cycles_with_data_rd This event counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.
|
|
|
d5df0a |
name:lock_cycles type:exclusive default:0x1
|
|
|
d5df0a |
0x1 extra: split_lock_uc_lock_duration This event counts cycles in which the L1 and L2 are locked due to a UC lock or split lock. A lock is asserted in case of locked memory access, due to noncacheable memory, locked operation that spans two cache lines, or a page walk from the noncacheable page table. L1D and L2 locks have a very high performance penalty and it is highly recommended to avoid such access.
|
|
|
d5df0a |
0x2 extra: cache_lock_duration This event counts the number of cycles when the L1D is locked. It is a superset of the 0x1 mask (BUS_LOCK_CLOCKS.BUS_LOCK_DURATION).
|
|
|
d5df0a |
@@ -107,17 +107,17 @@ name:idq type:exclusive default:0x2
|
|
|
d5df0a |
0x10 extra: ms_dsb_uops This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
0x20 extra: ms_mite_uops This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
0x30 extra: ms_uops This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may "bypass" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.
|
|
|
d5df0a |
- 0x30 extra: ms_cycles This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may "bypass" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.
|
|
|
d5df0a |
- 0x4 extra: mite_cycles This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
- 0x8 extra: dsb_cycles This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
- 0x10 extra: ms_dsb_cycles This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
- 0x10 extra:edge ms_dsb_occur This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
- 0x18 extra: all_dsb_cycles_4_uops This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
- 0x18 extra: all_dsb_cycles_any_uops This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
- 0x24 extra: all_mite_cycles_4_uops This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may "bypass" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).
|
|
|
d5df0a |
- 0x24 extra: all_mite_cycles_any_uops This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may "bypass" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).
|
|
|
d5df0a |
+ 0x30 extra:cmask=1 ms_cycles This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may "bypass" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.
|
|
|
d5df0a |
+ 0x4 extra:cmask=1 mite_cycles This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
+ 0x8 extra:cmask=1 dsb_cycles This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
+ 0x10 extra:cmask=1 ms_dsb_cycles This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
+ 0x10 extra:cmask=1,edge ms_dsb_occur This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
+ 0x18 extra:cmask=4 all_dsb_cycles_4_uops This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
+ 0x18 extra:cmask=1 all_dsb_cycles_any_uops This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may "bypass" the IDQ.
|
|
|
d5df0a |
+ 0x24 extra:cmask=4 all_mite_cycles_4_uops This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may "bypass" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).
|
|
|
d5df0a |
+ 0x24 extra:cmask=1 all_mite_cycles_any_uops This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may "bypass" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).
|
|
|
d5df0a |
0x3c extra: mite_all_uops This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may "bypass" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).
|
|
|
d5df0a |
- 0x30 extra:edge ms_switches Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer
|
|
|
d5df0a |
+ 0x30 extra:cmask=1,edge ms_switches Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer
|
|
|
d5df0a |
name:itlb_misses type:exclusive default:0x1
|
|
|
d5df0a |
0x1 extra: miss_causes_a_walk This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).
|
|
|
d5df0a |
0x2 extra: walk_completed_4k This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.
|
|
|
d5df0a |
@@ -149,11 +149,11 @@ name:br_misp_exec type:exclusive default:0xff
|
|
|
d5df0a |
0xa0 extra: taken_indirect_near_call Taken speculative and retired mispredicted indirect calls
|
|
|
d5df0a |
name:idq_uops_not_delivered type:exclusive default:0x1
|
|
|
d5df0a |
0x1 extra: core This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding ?4 ? x? when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread; b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); c. Instruction Decode Queue (IDQ) delivers four uops.
|
|
|
d5df0a |
- 0x1 extra: cycles_0_uops_deliv_core This event counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.
|
|
|
d5df0a |
- 0x1 extra: cycles_le_1_uop_deliv_core This event counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >=3.
|
|
|
d5df0a |
- 0x1 extra: cycles_le_2_uop_deliv_core Cycles with less than 2 uops delivered by the front end
|
|
|
d5df0a |
- 0x1 extra: cycles_le_3_uop_deliv_core Cycles with less than 3 uops delivered by the front end
|
|
|
d5df0a |
- 0x1 extra:inv cycles_fe_was_ok Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.
|
|
|
d5df0a |
+ 0x1 extra:cmask=4 cycles_0_uops_deliv_core This event counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.
|
|
|
d5df0a |
+ 0x1 extra:cmask=3 cycles_le_1_uop_deliv_core This event counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >=3.
|
|
|
d5df0a |
+ 0x1 extra:cmask=2 cycles_le_2_uop_deliv_core Cycles with less than 2 uops delivered by the front end
|
|
|
d5df0a |
+ 0x1 extra:cmask=1 cycles_le_3_uop_deliv_core Cycles with less than 3 uops delivered by the front end
|
|
|
d5df0a |
+ 0x1 extra:cmask=1,inv cycles_fe_was_ok Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.
|
|
|
d5df0a |
name:uops_executed_port type:exclusive default:0x1
|
|
|
d5df0a |
0x1 extra:any port_0_core Cycles per core when uops are exectuted in port 0
|
|
|
d5df0a |
0x2 extra:any port_1_core Cycles per core when uops are exectuted in port 1
|
|
|
d5df0a |
@@ -186,24 +186,24 @@ name:resource_stalls type:exclusive default:0x1
|
|
|
d5df0a |
0x8 extra: sb This event counts stall cycles caused by the store buffer (SB) overflow (excluding draining from synch). This counts cycles that the pipeline backend blocked uop delivery from the front end.
|
|
|
d5df0a |
0x10 extra: rob This event counts ROB full stall cycles. This counts cycles that the pipeline backend blocked uop delivery from the front end.
|
|
|
d5df0a |
name:cycle_activity type:exclusive default:0x1
|
|
|
d5df0a |
- 0x1 extra: cycles_l2_pending Counts number of cycles the CPU has at least one pending demand* load request missing the L2 cache.
|
|
|
d5df0a |
- 0x8 extra: cycles_l1d_pending Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.
|
|
|
d5df0a |
- 0x2 extra: cycles_ldm_pending Counts number of cycles the CPU has at least one pending demand load request (that is cycles with non-completed load waiting for its data from memory subsystem)
|
|
|
d5df0a |
- 0x4 extra: cycles_no_execute Counts number of cycles nothing is executed on any execution port.
|
|
|
d5df0a |
- 0x5 extra: stalls_l2_pending Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand* load request missing the L2 cache. (as a footprint) * includes also L1 HW prefetch requests that may or may not be required by demands
|
|
|
d5df0a |
- 0x6 extra: stalls_ldm_pending Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request.
|
|
|
d5df0a |
- 0xc extra: stalls_l1d_pending Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request missing the L1 data cache.
|
|
|
d5df0a |
- 0x8 extra: cycles_l1d_miss Cycles while L1 cache miss demand load is outstanding.
|
|
|
d5df0a |
- 0x1 extra: cycles_l2_miss Cycles while L2 cache miss demand load is outstanding.
|
|
|
d5df0a |
- 0x2 extra: cycles_mem_any Cycles while memory subsystem has an outstanding load.
|
|
|
d5df0a |
- 0x4 extra: stalls_total Total execution stalls.
|
|
|
d5df0a |
- 0xc extra: stalls_l1d_miss Execution stalls while L1 cache miss demand load is outstanding.
|
|
|
d5df0a |
- 0x5 extra: stalls_l2_miss Execution stalls while L2 cache miss demand load is outstanding.
|
|
|
d5df0a |
- 0x6 extra: stalls_mem_any Execution stalls while memory subsystem has an outstanding load.
|
|
|
d5df0a |
+ 0x1 extra:cmask=1 cycles_l2_pending Counts number of cycles the CPU has at least one pending demand* load request missing the L2 cache.
|
|
|
d5df0a |
+ 0x8 extra:cmask=8 cycles_l1d_pending Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.
|
|
|
d5df0a |
+ 0x2 extra:cmask=2 cycles_ldm_pending Counts number of cycles the CPU has at least one pending demand load request (that is cycles with non-completed load waiting for its data from memory subsystem)
|
|
|
d5df0a |
+ 0x4 extra:cmask=4 cycles_no_execute Counts number of cycles nothing is executed on any execution port.
|
|
|
d5df0a |
+ 0x5 extra:cmask=5 stalls_l2_pending Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand* load request missing the L2 cache. (as a footprint) * includes also L1 HW prefetch requests that may or may not be required by demands
|
|
|
d5df0a |
+ 0x6 extra:cmask=6 stalls_ldm_pending Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request.
|
|
|
d5df0a |
+ 0xc extra:cmask=c stalls_l1d_pending Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request missing the L1 data cache.
|
|
|
d5df0a |
+ 0x8 extra:cmask=8 cycles_l1d_miss Cycles while L1 cache miss demand load is outstanding.
|
|
|
d5df0a |
+ 0x1 extra:cmask=1 cycles_l2_miss Cycles while L2 cache miss demand load is outstanding.
|
|
|
d5df0a |
+ 0x2 extra:cmask=2 cycles_mem_any Cycles while memory subsystem has an outstanding load.
|
|
|
d5df0a |
+ 0x4 extra:cmask=4 stalls_total Total execution stalls.
|
|
|
d5df0a |
+ 0xc extra:cmask=c stalls_l1d_miss Execution stalls while L1 cache miss demand load is outstanding.
|
|
|
d5df0a |
+ 0x5 extra:cmask=5 stalls_l2_miss Execution stalls while L2 cache miss demand load is outstanding.
|
|
|
d5df0a |
+ 0x6 extra:cmask=6 stalls_mem_any Execution stalls while memory subsystem has an outstanding load.
|
|
|
d5df0a |
name:lsd type:exclusive default:0x1
|
|
|
d5df0a |
0x1 extra: uops Number of Uops delivered by the LSD. Read more on LSD under LSD_REPLAY.REPLAY
|
|
|
d5df0a |
- 0x1 extra: cycles_4_uops Cycles 4 Uops delivered by the LSD, but didn't come from the decoder
|
|
|
d5df0a |
- 0x1 extra: cycles_active Cycles Uops delivered by the LSD, but didn't come from the decoder
|
|
|
d5df0a |
+ 0x1 extra:cmask=4 cycles_4_uops Cycles 4 Uops delivered by the LSD, but didn't come from the decoder
|
|
|
d5df0a |
+ 0x1 extra:cmask=1 cycles_active Cycles Uops delivered by the LSD, but didn't come from the decoder
|
|
|
d5df0a |
name:offcore_requests type:exclusive default:0x1
|
|
|
d5df0a |
0x1 extra: demand_data_rd This event counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.
|
|
|
d5df0a |
0x2 extra: demand_code_rd This event counts both cacheable and noncachaeble code read requests.
|
|
|
d5df0a |
@@ -212,11 +212,11 @@ name:offcore_requests type:exclusive default:0x1
|
|
|
d5df0a |
name:uops_executed type:exclusive default:0x1
|
|
|
d5df0a |
0x1 extra: thread Number of uops to be executed per-thread each cycle.
|
|
|
d5df0a |
0x2 extra: core Number of uops executed from any thread
|
|
|
d5df0a |
- 0x1 extra:inv stall_cycles This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.
|
|
|
d5df0a |
- 0x1 extra: cycles_ge_1_uop_exec Cycles where at least 1 uop was executed per-thread
|
|
|
d5df0a |
- 0x1 extra: cycles_ge_2_uops_exec Cycles where at least 2 uops were executed per-thread
|
|
|
d5df0a |
- 0x1 extra: cycles_ge_3_uops_exec Cycles where at least 3 uops were executed per-thread
|
|
|
d5df0a |
- 0x1 extra: cycles_ge_4_uops_exec Cycles where at least 4 uops were executed per-thread
|
|
|
d5df0a |
+ 0x1 extra:cmask=1,inv stall_cycles This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.
|
|
|
d5df0a |
+ 0x1 extra:cmask=1 cycles_ge_1_uop_exec Cycles where at least 1 uop was executed per-thread
|
|
|
d5df0a |
+ 0x1 extra:cmask=2 cycles_ge_2_uops_exec Cycles where at least 2 uops were executed per-thread
|
|
|
d5df0a |
+ 0x1 extra:cmask=3 cycles_ge_3_uops_exec Cycles where at least 3 uops were executed per-thread
|
|
|
d5df0a |
+ 0x1 extra:cmask=4 cycles_ge_4_uops_exec Cycles where at least 4 uops were executed per-thread
|
|
|
d5df0a |
name:page_walker_loads type:exclusive default:0x11
|
|
|
d5df0a |
0x11 extra: dtlb_l1 Number of DTLB page walker hits in the L1+FB
|
|
|
d5df0a |
0x21 extra: itlb_l1 Number of ITLB page walker hits in the L1+FB
|
|
|
d5df0a |
@@ -227,38 +227,47 @@ name:page_walker_loads type:exclusive default:0x11
|
|
|
d5df0a |
0x18 extra: dtlb_memory Number of DTLB page walker hits in Memory
|
|
|
d5df0a |
name:inst_retired type:exclusive default:0x2
|
|
|
d5df0a |
0x2 extra: x87 This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.
|
|
|
d5df0a |
- 0x1 extra: prec_dist This is a precise version (that is, uses PEBS) of the event that counts instructions retired.
|
|
|
d5df0a |
+ 0x1 extra:pebs prec_dist This is a precise version (that is, uses PEBS) of the event that counts instructions retired.
|
|
|
d5df0a |
name:other_assists type:exclusive default:0x8
|
|
|
d5df0a |
0x8 extra: avx_to_sse This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.
|
|
|
d5df0a |
0x10 extra: sse_to_avx This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.
|
|
|
d5df0a |
0x40 extra: any_wb_assist Number of times any microcode assist is invoked by HW upon uop writeback.
|
|
|
d5df0a |
name:uops_retired type:exclusive default:0x1
|
|
|
d5df0a |
0x1 extra: all This is a non-precise version (that is, does not use PEBS) of the event that counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.
|
|
|
d5df0a |
+ 0x1 extra: all_pebs Counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.
|
|
|
d5df0a |
0x2 extra: retire_slots This is a non-precise version (that is, does not use PEBS) of the event that counts the number of retirement slots used.
|
|
|
d5df0a |
- 0x1 extra:inv stall_cycles This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.
|
|
|
d5df0a |
- 0x1 extra:inv total_cycles Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.
|
|
|
d5df0a |
+ 0x2 extra: retire_slots_pebs Counts the number of retirement slots used.
|
|
|
d5df0a |
+ 0x1 extra:cmask=1,inv stall_cycles This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.
|
|
|
d5df0a |
+ 0x1 extra:cmask=a,inv total_cycles Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.
|
|
|
d5df0a |
name:machine_clears type:exclusive default:0x1
|
|
|
d5df0a |
0x1 extra: cycles This event counts both thread-specific (TS) and all-thread (AT) nukes.
|
|
|
d5df0a |
0x2 extra: memory_ordering This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following: 1. memory disambiguation, 2. external snoop, or 3. cross SMT-HW-thread snoop (stores) hitting load buffer.
|
|
|
d5df0a |
0x4 extra: smc This event counts self-modifying code (SMC) detected, which causes a machine clear.
|
|
|
d5df0a |
0x20 extra: maskmov Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.
|
|
|
d5df0a |
- 0x1 extra:edge count Number of machine clears (nukes) of any type.
|
|
|
d5df0a |
+ 0x1 extra:cmask=1,edge count Number of machine clears (nukes) of any type.
|
|
|
d5df0a |
name:br_inst_retired type:exclusive default:0x1
|
|
|
d5df0a |
0x1 extra: conditional This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.
|
|
|
d5df0a |
+ 0x1 extra: conditional_pebs Counts conditional branch instructions retired.
|
|
|
d5df0a |
0x2 extra: near_call This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.
|
|
|
d5df0a |
+ 0x2 extra: near_call_pebs Counts both direct and indirect near call instructions retired.
|
|
|
d5df0a |
0x8 extra: near_return This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired.
|
|
|
d5df0a |
+ 0x8 extra: near_return_pebs Counts return instructions retired.
|
|
|
d5df0a |
0x10 extra: not_taken This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.
|
|
|
d5df0a |
0x20 extra: near_taken This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired.
|
|
|
d5df0a |
+ 0x20 extra: near_taken_pebs Counts taken branch instructions retired.
|
|
|
d5df0a |
0x40 extra: far_branch This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.
|
|
|
d5df0a |
- 0x4 extra: all_branches_pebs This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.
|
|
|
d5df0a |
+ 0x4 extra:pebs all_branches_pebs This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.
|
|
|
d5df0a |
name:br_misp_retired type:exclusive default:0x1
|
|
|
d5df0a |
0x1 extra: conditional This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.
|
|
|
d5df0a |
- 0x4 extra: all_branches_pebs This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.
|
|
|
d5df0a |
+ 0x1 extra: conditional_pebs Counts mispredicted conditional branch instructions retired.
|
|
|
d5df0a |
+ 0x4 extra:pebs all_branches_pebs This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.
|
|
|
d5df0a |
0x20 extra: near_taken number of near branch instructions retired that were mispredicted and taken.
|
|
|
d5df0a |
+ 0x20 extra: near_taken_pebs number of near branch instructions retired that were mispredicted and taken.
|
|
|
d5df0a |
name:hle_retired type:exclusive default:0x1
|
|
|
d5df0a |
0x1 extra: start Number of times we entered an HLE region; does not count nested transactions
|
|
|
d5df0a |
0x2 extra: commit Number of times HLE commit succeeded
|
|
|
d5df0a |
0x4 extra: aborted Number of times HLE abort was triggered
|
|
|
d5df0a |
+ 0x4 extra: aborted_pebs Number of times HLE abort was triggered
|
|
|
d5df0a |
0x8 extra: aborted_misc1 Number of times an HLE abort was attributed to a Memory condition (See TSX_Memory event for additional details)
|
|
|
d5df0a |
0x10 extra: aborted_misc2 Number of times the TSX watchdog signaled an HLE abort
|
|
|
d5df0a |
0x20 extra: aborted_misc3 Number of times a disallowed operation caused an HLE abort
|
|
|
d5df0a |
@@ -268,38 +277,60 @@ name:rtm_retired type:exclusive default:0x1
|
|
|
d5df0a |
0x1 extra: start Number of times we entered an RTM region; does not count nested transactions
|
|
|
d5df0a |
0x2 extra: commit Number of times RTM commit succeeded
|
|
|
d5df0a |
0x4 extra: aborted Number of times RTM abort was triggered
|
|
|
d5df0a |
+ 0x4 extra: aborted_pebs Number of times RTM abort was triggered
|
|
|
d5df0a |
0x8 extra: aborted_misc1 Number of times an RTM abort was attributed to a Memory condition (See TSX_Memory event for additional details)
|
|
|
d5df0a |
0x10 extra: aborted_misc2 Number of times the TSX watchdog signaled an RTM abort
|
|
|
d5df0a |
0x20 extra: aborted_misc3 Number of times a disallowed operation caused an RTM abort
|
|
|
d5df0a |
0x40 extra: aborted_misc4 Number of times a RTM caused a fault
|
|
|
d5df0a |
0x80 extra: aborted_misc5 Number of times RTM aborted and was not due to the abort conditions in subevents 3-6
|
|
|
d5df0a |
name:fp_assist type:exclusive default:0x1e
|
|
|
d5df0a |
- 0x1e extra: any This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.
|
|
|
d5df0a |
+ 0x1e extra:cmask=1 any This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.
|
|
|
d5df0a |
0x2 extra: x87_output This is a non-precise version (that is, does not use PEBS) of the event that counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.
|
|
|
d5df0a |
0x4 extra: x87_input This is a non-precise version (that is, does not use PEBS) of the event that counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.
|
|
|
d5df0a |
0x8 extra: simd_output This is a non-precise version (that is, does not use PEBS) of the event that counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.
|
|
|
d5df0a |
0x10 extra: simd_input This is a non-precise version (that is, does not use PEBS) of the event that counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.
|
|
|
d5df0a |
name:mem_uops_retired type:exclusive default:0x11
|
|
|
d5df0a |
0x11 extra: stlb_miss_loads This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.
|
|
|
d5df0a |
+ 0x11 extra: stlb_miss_loads_pebs Counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.
|
|
|
d5df0a |
0x12 extra: stlb_miss_stores This is a non-precise version (that is, does not use PEBS) of the event that counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.
|
|
|
d5df0a |
+ 0x12 extra: stlb_miss_stores_pebs Counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.
|
|
|
d5df0a |
0x21 extra: lock_loads This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with locked access retired to the architected path.
|
|
|
d5df0a |
+ 0x21 extra: lock_loads_pebs Counts load uops with locked access retired to the architected path.
|
|
|
d5df0a |
0x41 extra: split_loads This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).
|
|
|
d5df0a |
+ 0x41 extra: split_loads_pebs Counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).
|
|
|
d5df0a |
0x42 extra: split_stores This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).
|
|
|
d5df0a |
+ 0x42 extra: split_stores_pebs Counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).
|
|
|
d5df0a |
0x81 extra: all_loads This is a non-precise version (that is, does not use PEBS) of the event that counts load uops retired to the architected path with a filter on bits 0 and 1 applied. Note: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.
|
|
|
d5df0a |
+ 0x81 extra: all_loads_pebs Counts load uops retired to the architected path with a filter on bits 0 and 1 applied. Note: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.
|
|
|
d5df0a |
0x82 extra: all_stores This is a non-precise version (that is, does not use PEBS) of the event that counts store uops retired to the architected path with a filter on bits 0 and 1 applied. Note: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement.
|
|
|
d5df0a |
+ 0x82 extra: all_stores_pebs Counts store uops retired to the architected path with a filter on bits 0 and 1 applied. Note: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement.
|
|
|
d5df0a |
name:mem_load_uops_retired type:exclusive default:0x1
|
|
|
d5df0a |
0x1 extra: l1_hit This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the nearest-level (L1) cache. Note: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source
|
|
|
d5df0a |
+ 0x1 extra: l1_hit_pebs Counts retired load uops which data sources were hits in the nearest-level (L1) cache. Note: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source
|
|
|
d5df0a |
0x2 extra: l2_hit This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.
|
|
|
d5df0a |
+ 0x2 extra: l2_hit_pebs Counts retired load uops which data sources were hits in the mid-level (L2) cache.
|
|
|
d5df0a |
0x4 extra: l3_hit This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.
|
|
|
d5df0a |
+ 0x4 extra: l3_hit_pebs Counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.
|
|
|
d5df0a |
0x8 extra: l1_miss This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.
|
|
|
d5df0a |
+ 0x8 extra: l1_miss_pebs Counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.
|
|
|
d5df0a |
0x10 extra: l2_miss This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.
|
|
|
d5df0a |
+ 0x10 extra: l2_miss_pebs Counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.
|
|
|
d5df0a |
0x20 extra: l3_miss Miss in last-level (L3) cache. Excludes Unknown data-source.
|
|
|
d5df0a |
+ 0x20 extra: l3_miss_pebs Miss in last-level (L3) cache. Excludes Unknown data-source.
|
|
|
d5df0a |
0x40 extra: hit_lfb This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready. Note: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.
|
|
|
d5df0a |
+ 0x40 extra: hit_lfb_pebs Counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready. Note: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.
|
|
|
d5df0a |
name:mem_load_uops_l3_hit_retired type:exclusive default:0x1
|
|
|
d5df0a |
0x1 extra: xsnp_miss This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.
|
|
|
d5df0a |
+ 0x1 extra: xsnp_miss_pebs Counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.
|
|
|
d5df0a |
0x2 extra: xsnp_hit This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.
|
|
|
d5df0a |
+ 0x2 extra: xsnp_hit_pebs Counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.
|
|
|
d5df0a |
0x4 extra: xsnp_hitm This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).
|
|
|
d5df0a |
+ 0x4 extra: xsnp_hitm_pebs Counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).
|
|
|
d5df0a |
0x8 extra: xsnp_none This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.
|
|
|
d5df0a |
+ 0x8 extra: xsnp_none_pebs Counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.
|
|
|
d5df0a |
+name:mem_load_uops_l3_miss_retired type:exclusive default:0x1
|
|
|
d5df0a |
+ 0x1 extra: local_dram Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI)
|
|
|
d5df0a |
+ 0x1 extra: local_dram_pebs Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI)
|
|
|
d5df0a |
name:l2_trans type:exclusive default:0x80
|
|
|
d5df0a |
0x80 extra: all_requests This event counts transactions that access the L2 pipe including snoops, pagewalks, and so on.
|
|
|
d5df0a |
0x1 extra: demand_data_rd This event counts Demand Data Read requests that access L2 cache, including rejects.
|
|
|
d5df0a |
commit 893c18c2a2ba955bc77140bbd7696cc2d3f6e1dc
|
|
|
d5df0a |
Author: Andi Kleen <ak@linux.intel.com>
|
|
|
d5df0a |
Date: Thu Jul 17 12:55:42 2014 -0500
|
|
|
d5df0a |
|
|
|
d5df0a |
Improve error message for non-unique unit mask
|
|
|
d5df0a |
|
|
|
d5df0a |
For the case where the user does not specify a UM and the default UM
|
|
|
d5df0a |
is a non-unique hex value, the error message printed is the following:
|
|
|
d5df0a |
|
|
|
d5df0a |
Default unit mask not supported for this event.
|
|
|
d5df0a |
Please specify a unit mask by name, using the first word of the unit mask description.
|
|
|
d5df0a |
|
|
|
d5df0a |
For cases where the user wrongly specifies a non-unique hex value for a UM
|
|
|
d5df0a |
when they should have specified it by name, the message will be like the
|
|
|
d5df0a |
following example:
|
|
|
d5df0a |
|
|
|
d5df0a |
Unit mask (0x1) is non unique.
|
|
|
d5df0a |
Please specify a unit mask by name, using the first word of the unit mask description.
|
|
|
d5df0a |
|
|
|
d5df0a |
Signed-off-by: Andi Kleen <ak@linux.intel.com>
|
|
|
d5df0a |
|
|
|
d5df0a |
diff --git a/libop/op_events.c b/libop/op_events.c
|
|
|
d5df0a |
index 9c27e6c..b8900a5 100644
|
|
|
d5df0a |
--- a/libop/op_events.c
|
|
|
d5df0a |
+++ b/libop/op_events.c
|
|
|
d5df0a |
@@ -1389,6 +1389,7 @@ static void do_resolve_unit_mask(struct op_event *e,
|
|
|
d5df0a |
if (pe->unit_mask_name == NULL) {
|
|
|
d5df0a |
/* For numerical unit mask */
|
|
|
d5df0a |
int found = 0;
|
|
|
d5df0a |
+ int old_um_valid = pe->unit_mask_valid;
|
|
|
d5df0a |
|
|
|
d5df0a |
/* Use default unitmask if not specified */
|
|
|
d5df0a |
if (!pe->unit_mask_valid) {
|
|
|
d5df0a |
@@ -1404,9 +1405,16 @@ static void do_resolve_unit_mask(struct op_event *e,
|
|
|
d5df0a |
found++;
|
|
|
d5df0a |
}
|
|
|
d5df0a |
if (found > 1) {
|
|
|
d5df0a |
- fprintf(stderr, "Unit mask (0x%x) is non unique.\n"
|
|
|
d5df0a |
- "Please specify the unit mask using the first "
|
|
|
d5df0a |
- "word of the description\n",
|
|
|
d5df0a |
+ if (!old_um_valid)
|
|
|
d5df0a |
+ fprintf(stderr,
|
|
|
d5df0a |
+ "Default unit mask not supported for this event.\n"
|
|
|
d5df0a |
+ "Please speicfy a unit mask by name, using the first "
|
|
|
d5df0a |
+ "word of the unit mask description\n");
|
|
|
d5df0a |
+ else
|
|
|
d5df0a |
+ fprintf(stderr,
|
|
|
d5df0a |
+ "Unit mask (0x%x) is non unique.\n"
|
|
|
d5df0a |
+ "Please specify the unit mask using the first "
|
|
|
d5df0a |
+ "word of the description\n",
|
|
|
d5df0a |
pe->unit_mask);
|
|
|
d5df0a |
exit(EXIT_FAILURE);
|
|
|
d5df0a |
}
|
|
|
d5df0a |
commit 62e7814e8467230d8e283992ee6532d5f794359a
|
|
|
d5df0a |
Author: Michael Petlan <mpetlan@redhat.com>
|
|
|
d5df0a |
Date: Thu Jun 11 11:24:51 2015 -0400
|
|
|
d5df0a |
|
|
|
d5df0a |
Fix default unit masks for Intel Broadwell
|
|
|
d5df0a |
|
|
|
d5df0a |
Since some of the default unit masks for Intel Broadwell events cannot be
|
|
|
d5df0a |
uniquely specified by numbers, the defaults have had to be replaced
|
|
|
d5df0a |
by the named ones. When the affected events are used on Broadwell without
|
|
|
d5df0a |
specifying unit masks after applying this patch, the default masks
|
|
|
d5df0a |
are chosen correctly.
|
|
|
d5df0a |
|
|
|
d5df0a |
Signed-off-by: William Cohen <wcohen@redhat.com>
|
|
|
d5df0a |
|
|
|
d5df0a |
diff --git a/events/i386/broadwell/unit_masks b/events/i386/broadwell/unit_masks
|
|
|
d5df0a |
index 0d6ccd5..4e69363 100644
|
|
|
d5df0a |
--- a/events/i386/broadwell/unit_masks
|
|
|
d5df0a |
+++ b/events/i386/broadwell/unit_masks
|
|
|
d5df0a |
@@ -31,7 +31,7 @@ name:dtlb_load_misses type:exclusive default:0x1
|
|
|
d5df0a |
0x20 extra: stlb_hit_4k Load misses that miss the DTLB and hit the STLB (4K)
|
|
|
d5df0a |
0xe extra: walk_completed Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.
|
|
|
d5df0a |
0x60 extra: stlb_hit Load operations that miss the first DTLB level but hit the second and do not cause page walks
|
|
|
d5df0a |
-name:uops_issued type:exclusive default:0x1
|
|
|
d5df0a |
+name:uops_issued type:exclusive default:any
|
|
|
d5df0a |
0x1 extra: any This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS).
|
|
|
d5df0a |
0x10 extra: flags_merge Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.
|
|
|
d5df0a |
0x20 extra: slow_lea Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.
|
|
|
d5df0a |
@@ -54,7 +54,7 @@ name:l2_rqsts type:exclusive default:0x21
|
|
|
d5df0a |
0xe7 extra: all_demand_references Demand requests to L2 cache
|
|
|
d5df0a |
0x3f extra: miss All requests that miss L2 cache
|
|
|
d5df0a |
0xff extra: references All L2 requests
|
|
|
d5df0a |
-name:l1d_pend_miss type:exclusive default:0x1
|
|
|
d5df0a |
+name:l1d_pend_miss type:exclusive default:pending
|
|
|
d5df0a |
0x1 extra: pending This event counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand; from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.
|
|
|
d5df0a |
0x1 extra:cmask=1 pending_cycles This event counts duration of L1D miss outstanding in cycles.
|
|
|
d5df0a |
name:dtlb_store_misses type:exclusive default:0x1
|
|
|
d5df0a |
@@ -77,7 +77,7 @@ name:move_elimination type:exclusive default:0x1
|
|
|
d5df0a |
0x2 extra: simd_eliminated Number of SIMD Move Elimination candidate uops that were eliminated.
|
|
|
d5df0a |
0x4 extra: int_not_eliminated Number of integer Move Elimination candidate uops that were not eliminated.
|
|
|
d5df0a |
0x8 extra: simd_not_eliminated Number of SIMD Move Elimination candidate uops that were not eliminated.
|
|
|
d5df0a |
-name:cpl_cycles type:exclusive default:0x1
|
|
|
d5df0a |
+name:cpl_cycles type:exclusive default:ring0
|
|
|
d5df0a |
0x1 extra: ring0 This event counts the unhalted core cycles during which the thread is in the ring 0 privileged mode.
|
|
|
d5df0a |
0x2 extra: ring123 This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.
|
|
|
d5df0a |
0x1 extra:cmask=1,edge ring0_trans This event counts when there is a transition from ring 1,2 or 3 to ring0.
|
|
|
d5df0a |
@@ -87,10 +87,10 @@ name:tx_exec type:exclusive default:0x1
|
|
|
d5df0a |
0x4 extra: misc3 Unfriendly TSX abort triggered by a nest count that is too deep
|
|
|
d5df0a |
0x8 extra: misc4 RTM region detected inside HLE
|
|
|
d5df0a |
0x10 extra: misc5 # HLE inside HLE+
|
|
|
d5df0a |
-name:rs_events type:exclusive default:0x1
|
|
|
d5df0a |
+name:rs_events type:exclusive default:empty_cycles
|
|
|
d5df0a |
0x1 extra: empty_cycles This event counts cycles during which the reservation station (RS) is empty for the thread. Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.
|
|
|
d5df0a |
0x1 extra:cmask=1,inv,edge empty_end Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.
|
|
|
d5df0a |
-name:offcore_requests_outstanding type:exclusive default:0x1
|
|
|
d5df0a |
+name:offcore_requests_outstanding type:exclusive default:demand_data_rd
|
|
|
d5df0a |
0x1 extra: demand_data_rd This event counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS. Note: A prefetch promoted to Demand is counted from the promotion point.
|
|
|
d5df0a |
0x2 extra: demand_code_rd This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The "Offcore outstanding" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.
|
|
|
d5df0a |
0x4 extra: demand_rfo This event counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.
|
|
|
d5df0a |
@@ -147,14 +147,14 @@ name:br_misp_exec type:exclusive default:0xff
|
|
|
d5df0a |
0xc1 extra: all_conditional This event counts both taken and not taken speculative and retired mispredicted macro conditional branch instructions.
|
|
|
d5df0a |
0xc4 extra: all_indirect_jump_non_call_ret This event counts both taken and not taken mispredicted indirect branches excluding calls and returns.
|
|
|
d5df0a |
0xa0 extra: taken_indirect_near_call Taken speculative and retired mispredicted indirect calls
|
|
|
d5df0a |
-name:idq_uops_not_delivered type:exclusive default:0x1
|
|
|
d5df0a |
+name:idq_uops_not_delivered type:exclusive default:core
|
|
|
d5df0a |
0x1 extra: core This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding ?4 ? x? when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread; b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); c. Instruction Decode Queue (IDQ) delivers four uops.
|
|
|
d5df0a |
0x1 extra:cmask=4 cycles_0_uops_deliv_core This event counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.
|
|
|
d5df0a |
0x1 extra:cmask=3 cycles_le_1_uop_deliv_core This event counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >=3.
|
|
|
d5df0a |
0x1 extra:cmask=2 cycles_le_2_uop_deliv_core Cycles with less than 2 uops delivered by the front end
|
|
|
d5df0a |
0x1 extra:cmask=1 cycles_le_3_uop_deliv_core Cycles with less than 3 uops delivered by the front end
|
|
|
d5df0a |
0x1 extra:cmask=1,inv cycles_fe_was_ok Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.
|
|
|
d5df0a |
-name:uops_executed_port type:exclusive default:0x1
|
|
|
d5df0a |
+name:uops_executed_port type:exclusive default:port_0
|
|
|
d5df0a |
0x1 extra:any port_0_core Cycles per core when uops are exectuted in port 0
|
|
|
d5df0a |
0x2 extra:any port_1_core Cycles per core when uops are exectuted in port 1
|
|
|
d5df0a |
0x4 extra:any port_2_core Cycles per core when uops are dispatched to port 2
|
|
|
d5df0a |
@@ -200,7 +200,7 @@ name:cycle_activity type:exclusive default:0x1
|
|
|
d5df0a |
0xc extra:cmask=c stalls_l1d_miss Execution stalls while L1 cache miss demand load is outstanding.
|
|
|
d5df0a |
0x5 extra:cmask=5 stalls_l2_miss Execution stalls while L2 cache miss demand load is outstanding.
|
|
|
d5df0a |
0x6 extra:cmask=6 stalls_mem_any Execution stalls while memory subsystem has an outstanding load.
|
|
|
d5df0a |
-name:lsd type:exclusive default:0x1
|
|
|
d5df0a |
+name:lsd type:exclusive default:uops
|
|
|
d5df0a |
0x1 extra: uops Number of Uops delivered by the LSD. Read more on LSD under LSD_REPLAY.REPLAY
|
|
|
d5df0a |
0x1 extra:cmask=4 cycles_4_uops Cycles 4 Uops delivered by the LSD, but didn't come from the decoder
|
|
|
d5df0a |
0x1 extra:cmask=1 cycles_active Cycles Uops delivered by the LSD, but didn't come from the decoder
|
|
|
d5df0a |
@@ -209,7 +209,7 @@ name:offcore_requests type:exclusive default:0x1
|
|
|
d5df0a |
0x2 extra: demand_code_rd This event counts both cacheable and noncachaeble code read requests.
|
|
|
d5df0a |
0x4 extra: demand_rfo This event counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.
|
|
|
d5df0a |
0x8 extra: all_data_rd This event counts the demand and prefetch data reads. All Core Data Reads include cacheable "Demands" and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.
|
|
|
d5df0a |
-name:uops_executed type:exclusive default:0x1
|
|
|
d5df0a |
+name:uops_executed type:exclusive default:thread
|
|
|
d5df0a |
0x1 extra: thread Number of uops to be executed per-thread each cycle.
|
|
|
d5df0a |
0x2 extra: core Number of uops executed from any thread
|
|
|
d5df0a |
0x1 extra:cmask=1,inv stall_cycles This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.
|
|
|
d5df0a |
@@ -232,20 +232,20 @@ name:other_assists type:exclusive default:0x8
|
|
|
d5df0a |
0x8 extra: avx_to_sse This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.
|
|
|
d5df0a |
0x10 extra: sse_to_avx This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.
|
|
|
d5df0a |
0x40 extra: any_wb_assist Number of times any microcode assist is invoked by HW upon uop writeback.
|
|
|
d5df0a |
-name:uops_retired type:exclusive default:0x1
|
|
|
d5df0a |
+name:uops_retired type:exclusive default:all
|
|
|
d5df0a |
0x1 extra: all This is a non-precise version (that is, does not use PEBS) of the event that counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.
|
|
|
d5df0a |
0x1 extra: all_pebs Counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.
|
|
|
d5df0a |
0x2 extra: retire_slots This is a non-precise version (that is, does not use PEBS) of the event that counts the number of retirement slots used.
|
|
|
d5df0a |
0x2 extra: retire_slots_pebs Counts the number of retirement slots used.
|
|
|
d5df0a |
0x1 extra:cmask=1,inv stall_cycles This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.
|
|
|
d5df0a |
0x1 extra:cmask=a,inv total_cycles Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.
|
|
|
d5df0a |
-name:machine_clears type:exclusive default:0x1
|
|
|
d5df0a |
+name:machine_clears type:exclusive default:cycles
|
|
|
d5df0a |
0x1 extra: cycles This event counts both thread-specific (TS) and all-thread (AT) nukes.
|
|
|
d5df0a |
0x2 extra: memory_ordering This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following: 1. memory disambiguation, 2. external snoop, or 3. cross SMT-HW-thread snoop (stores) hitting load buffer.
|
|
|
d5df0a |
0x4 extra: smc This event counts self-modifying code (SMC) detected, which causes a machine clear.
|
|
|
d5df0a |
0x20 extra: maskmov Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.
|
|
|
d5df0a |
0x1 extra:cmask=1,edge count Number of machine clears (nukes) of any type.
|
|
|
d5df0a |
-name:br_inst_retired type:exclusive default:0x1
|
|
|
d5df0a |
+name:br_inst_retired type:exclusive default:conditional
|
|
|
d5df0a |
0x1 extra: conditional This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.
|
|
|
d5df0a |
0x1 extra: conditional_pebs Counts conditional branch instructions retired.
|
|
|
d5df0a |
0x2 extra: near_call This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.
|
|
|
d5df0a |
@@ -257,7 +257,7 @@ name:br_inst_retired type:exclusive default:0x1
|
|
|
d5df0a |
0x20 extra: near_taken_pebs Counts taken branch instructions retired.
|
|
|
d5df0a |
0x40 extra: far_branch This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.
|
|
|
d5df0a |
0x4 extra:pebs all_branches_pebs This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.
|
|
|
d5df0a |
-name:br_misp_retired type:exclusive default:0x1
|
|
|
d5df0a |
+name:br_misp_retired type:exclusive default:conditional
|
|
|
d5df0a |
0x1 extra: conditional This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.
|
|
|
d5df0a |
0x1 extra: conditional_pebs Counts mispredicted conditional branch instructions retired.
|
|
|
d5df0a |
0x4 extra:pebs all_branches_pebs This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.
|
|
|
d5df0a |
@@ -289,7 +289,7 @@ name:fp_assist type:exclusive default:0x1e
|
|
|
d5df0a |
0x4 extra: x87_input This is a non-precise version (that is, does not use PEBS) of the event that counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.
|
|
|
d5df0a |
0x8 extra: simd_output This is a non-precise version (that is, does not use PEBS) of the event that counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.
|
|
|
d5df0a |
0x10 extra: simd_input This is a non-precise version (that is, does not use PEBS) of the event that counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.
|
|
|
d5df0a |
-name:mem_uops_retired type:exclusive default:0x11
|
|
|
d5df0a |
+name:mem_uops_retired type:exclusive default:stlb_miss_loads
|
|
|
d5df0a |
0x11 extra: stlb_miss_loads This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.
|
|
|
d5df0a |
0x11 extra: stlb_miss_loads_pebs Counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.
|
|
|
d5df0a |
0x12 extra: stlb_miss_stores This is a non-precise version (that is, does not use PEBS) of the event that counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.
|
|
|
d5df0a |
@@ -304,7 +304,7 @@ name:mem_uops_retired type:exclusive default:0x11
|
|
|
d5df0a |
0x81 extra: all_loads_pebs Counts load uops retired to the architected path with a filter on bits 0 and 1 applied. Note: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.
|
|
|
d5df0a |
0x82 extra: all_stores This is a non-precise version (that is, does not use PEBS) of the event that counts store uops retired to the architected path with a filter on bits 0 and 1 applied. Note: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement.
|
|
|
d5df0a |
0x82 extra: all_stores_pebs Counts store uops retired to the architected path with a filter on bits 0 and 1 applied. Note: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement.
|
|
|
d5df0a |
-name:mem_load_uops_retired type:exclusive default:0x1
|
|
|
d5df0a |
+name:mem_load_uops_retired type:exclusive default:l1_hit
|
|
|
d5df0a |
0x1 extra: l1_hit This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the nearest-level (L1) cache. Note: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source
|
|
|
d5df0a |
0x1 extra: l1_hit_pebs Counts retired load uops which data sources were hits in the nearest-level (L1) cache. Note: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source
|
|
|
d5df0a |
0x2 extra: l2_hit This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.
|
|
|
d5df0a |
@@ -319,7 +319,7 @@ name:mem_load_uops_retired type:exclusive default:0x1
|
|
|
d5df0a |
0x20 extra: l3_miss_pebs Miss in last-level (L3) cache. Excludes Unknown data-source.
|
|
|
d5df0a |
0x40 extra: hit_lfb This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready. Note: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.
|
|
|
d5df0a |
0x40 extra: hit_lfb_pebs Counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready. Note: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.
|
|
|
d5df0a |
-name:mem_load_uops_l3_hit_retired type:exclusive default:0x1
|
|
|
d5df0a |
+name:mem_load_uops_l3_hit_retired type:exclusive default:xsnp_miss
|
|
|
d5df0a |
0x1 extra: xsnp_miss This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.
|
|
|
d5df0a |
0x1 extra: xsnp_miss_pebs Counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.
|
|
|
d5df0a |
0x2 extra: xsnp_hit This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.
|
|
|
d5df0a |
@@ -328,7 +328,7 @@ name:mem_load_uops_l3_hit_retired type:exclusive default:0x1
|
|
|
d5df0a |
0x4 extra: xsnp_hitm_pebs Counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).
|
|
|
d5df0a |
0x8 extra: xsnp_none This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.
|
|
|
d5df0a |
0x8 extra: xsnp_none_pebs Counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.
|
|
|
d5df0a |
-name:mem_load_uops_l3_miss_retired type:exclusive default:0x1
|
|
|
d5df0a |
+name:mem_load_uops_l3_miss_retired type:exclusive default:local_dram
|
|
|
d5df0a |
0x1 extra: local_dram Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI)
|
|
|
d5df0a |
0x1 extra: local_dram_pebs Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI)
|
|
|
d5df0a |
name:l2_trans type:exclusive default:0x80
|
|
|
d5df0a |
commit 723a3042bd23deca01a36f6d99cdf10fe935c0d0
|
|
|
d5df0a |
Author: William Cohen <wcohen@redhat.com>
|
|
|
d5df0a |
Date: Thu Jun 11 16:56:16 2015 -0400
|
|
|
d5df0a |
|
|
|
d5df0a |
Use a named default for the Intel Broadwell cycle_activity default unit_mask
|
|
|
d5df0a |
|
|
|
d5df0a |
Since default unit mask for Intel Broadwell cycle_activity cannot be
|
|
|
d5df0a |
uniquely specified by numbers, the default has to be replaced by a
|
|
|
d5df0a |
named one.
|
|
|
d5df0a |
|
|
|
d5df0a |
Signed-off-by: William Cohen <wcohen@redhat.com>
|
|
|
d5df0a |
|
|
|
d5df0a |
diff --git a/events/i386/broadwell/unit_masks b/events/i386/broadwell/unit_masks
|
|
|
d5df0a |
index 4e69363..505ba21 100644
|
|
|
d5df0a |
--- a/events/i386/broadwell/unit_masks
|
|
|
d5df0a |
+++ b/events/i386/broadwell/unit_masks
|
|
|
d5df0a |
@@ -185,7 +185,7 @@ name:resource_stalls type:exclusive default:0x1
|
|
|
d5df0a |
0x4 extra: rs This event counts stall cycles caused by absence of eligible entries in the reservation station (RS). This may result from RS overflow, or from RS deallocation because of the RS array Write Port allocation scheme (each RS entry has two write ports instead of four. As a result, empty entries could not be used, although RS is not really full). This counts cycles that the pipeline backend blocked uop delivery from the front end.
|
|
|
d5df0a |
0x8 extra: sb This event counts stall cycles caused by the store buffer (SB) overflow (excluding draining from synch). This counts cycles that the pipeline backend blocked uop delivery from the front end.
|
|
|
d5df0a |
0x10 extra: rob This event counts ROB full stall cycles. This counts cycles that the pipeline backend blocked uop delivery from the front end.
|
|
|
d5df0a |
-name:cycle_activity type:exclusive default:0x1
|
|
|
d5df0a |
+name:cycle_activity type:exclusive default:cycles_l2_pending
|
|
|
d5df0a |
0x1 extra:cmask=1 cycles_l2_pending Counts number of cycles the CPU has at least one pending demand* load request missing the L2 cache.
|
|
|
d5df0a |
0x8 extra:cmask=8 cycles_l1d_pending Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.
|
|
|
d5df0a |
0x2 extra:cmask=2 cycles_ldm_pending Counts number of cycles the CPU has at least one pending demand load request (that is cycles with non-completed load waiting for its data from memory subsystem)
|