aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS7
-rw-r--r--drivers/dma/edma.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2
-rw-r--r--drivers/target/target_core_pscsi.c8
-rw-r--r--drivers/target/target_core_sbc.c5
-rw-r--r--drivers/target/target_core_xcopy.c53
-rw-r--r--drivers/vhost/scsi.c2
-rw-r--r--include/trace/events/target.h4
-rw-r--r--kernel/events/core.c4
-rw-r--r--kernel/mutex.c32
-rw-r--r--kernel/time/clockevents.c65
-rw-r--r--tools/perf/util/event.c30
-rw-r--r--tools/perf/util/evsel.c1
-rw-r--r--tools/perf/util/probe-finder.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c2
15 files changed, 143 insertions, 76 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index c8d60c45c22..3438384d270 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2728,6 +2728,8 @@ T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
M: Vinod Koul <vinod.koul@intel.com>
M: Dan Williams <dan.j.williams@intel.com>
+L: dmaengine@vger.kernel.org
+Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
S: Supported
F: drivers/dma/
F: include/linux/dma*
@@ -4367,7 +4369,10 @@ F: arch/x86/kernel/microcode_intel.c
INTEL I/OAT DMA DRIVER
M: Dan Williams <dan.j.williams@intel.com>
-S: Maintained
+M: Dave Jiang <dave.jiang@intel.com>
+L: dmaengine@vger.kernel.org
+Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
+S: Supported
F: drivers/dma/ioat*
INTEL IOMMU (VT-d)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 3519111c566..10b577fcf48 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -305,6 +305,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
edma_alloc_slot(EDMA_CTLR(echan->ch_num),
EDMA_SLOT_ANY);
if (echan->slot[i] < 0) {
+ kfree(edesc);
dev_err(dev, "Failed to allocate slot\n");
kfree(edesc);
return NULL;
@@ -346,6 +347,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
ccnt = sg_dma_len(sg) / (acnt * bcnt);
if (ccnt > (SZ_64K - 1)) {
dev_err(dev, "Exceeded max SG segment size\n");
+ kfree(edesc);
return NULL;
}
cidx = acnt * bcnt;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 3591855cc5b..6df23502059 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -594,7 +594,7 @@ isert_connect_release(struct isert_conn *isert_conn)
pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
- if (device->use_frwr)
+ if (device && device->use_frwr)
isert_conn_free_frwr_pool(isert_conn);
if (isert_conn->conn_qp) {
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 551c96ca60a..0f199f6a073 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -134,10 +134,10 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
* pSCSI Host ID and enable for phba mode
*/
sh = scsi_host_lookup(phv->phv_host_id);
- if (IS_ERR(sh)) {
+ if (!sh) {
pr_err("pSCSI: Unable to locate SCSI Host for"
" phv_host_id: %d\n", phv->phv_host_id);
- return PTR_ERR(sh);
+ return -EINVAL;
}
phv->phv_lld_host = sh;
@@ -515,10 +515,10 @@ static int pscsi_configure_device(struct se_device *dev)
sh = phv->phv_lld_host;
} else {
sh = scsi_host_lookup(pdv->pdv_host_id);
- if (IS_ERR(sh)) {
+ if (!sh) {
pr_err("pSCSI: Unable to locate"
" pdv_host_id: %d\n", pdv->pdv_host_id);
- return PTR_ERR(sh);
+ return -EINVAL;
}
}
} else {
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 4714c6f8da4..d9b92b2c524 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -263,6 +263,11 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
sectors, cmd->se_dev->dev_attrib.max_write_same_len);
return TCM_INVALID_CDB_FIELD;
}
+ /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
+ if (flags[0] & 0x10) {
+ pr_warn("WRITE SAME with ANCHOR not supported\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
/*
* Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
* translated into block discard requests within backend code.
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 3da4fd10b9f..474cd44fac1 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -82,6 +82,9 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
mutex_lock(&g_device_mutex);
list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
+ if (!se_dev->dev_attrib.emulate_3pc)
+ continue;
+
memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
@@ -357,6 +360,7 @@ struct xcopy_pt_cmd {
struct se_cmd se_cmd;
struct xcopy_op *xcopy_op;
struct completion xpt_passthrough_sem;
+ unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
};
static struct se_port xcopy_pt_port;
@@ -675,7 +679,8 @@ static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
se_cmd->scsi_status);
- return 0;
+
+ return (se_cmd->scsi_status) ? -EINVAL : 0;
}
static int target_xcopy_read_source(
@@ -708,7 +713,7 @@ static int target_xcopy_read_source(
(unsigned long long)src_lba, src_sectors, length);
transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
- DMA_FROM_DEVICE, 0, NULL);
+ DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
xop->src_pt_cmd = xpt_cmd;
rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
@@ -768,7 +773,7 @@ static int target_xcopy_write_destination(
(unsigned long long)dst_lba, dst_sectors, length);
transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
- DMA_TO_DEVICE, 0, NULL);
+ DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
xop->dst_pt_cmd = xpt_cmd;
rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
@@ -884,30 +889,42 @@ out:
sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
{
+ struct se_device *dev = se_cmd->se_dev;
struct xcopy_op *xop = NULL;
unsigned char *p = NULL, *seg_desc;
unsigned int list_id, list_id_usage, sdll, inline_dl, sa;
+ sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
int rc;
unsigned short tdll;
+ if (!dev->dev_attrib.emulate_3pc) {
+ pr_err("EXTENDED_COPY operation explicitly disabled\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
sa = se_cmd->t_task_cdb[1] & 0x1f;
if (sa != 0x00) {
pr_err("EXTENDED_COPY(LID4) not supported\n");
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
+ xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
+ if (!xop) {
+ pr_err("Unable to allocate xcopy_op\n");
+ return TCM_OUT_OF_RESOURCES;
+ }
+ xop->xop_se_cmd = se_cmd;
+
p = transport_kmap_data_sg(se_cmd);
if (!p) {
pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
+ kfree(xop);
return TCM_OUT_OF_RESOURCES;
}
list_id = p[0];
- if (list_id != 0x00) {
- pr_err("XCOPY with non zero list_id: 0x%02x\n", list_id);
- goto out;
- }
- list_id_usage = (p[1] & 0x18);
+ list_id_usage = (p[1] & 0x18) >> 3;
+
/*
* Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
*/
@@ -920,13 +937,6 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
goto out;
}
- xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
- if (!xop) {
- pr_err("Unable to allocate xcopy_op\n");
- goto out;
- }
- xop->xop_se_cmd = se_cmd;
-
pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
" tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
tdll, sdll, inline_dl);
@@ -935,6 +945,17 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
if (rc <= 0)
goto out;
+ if (xop->src_dev->dev_attrib.block_size !=
+ xop->dst_dev->dev_attrib.block_size) {
+ pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev"
+ " block_size: %u currently unsupported\n",
+ xop->src_dev->dev_attrib.block_size,
+ xop->dst_dev->dev_attrib.block_size);
+ xcopy_pt_undepend_remotedev(xop);
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out;
+ }
+
pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
rc * XCOPY_TARGET_DESC_LEN);
seg_desc = &p[16];
@@ -957,7 +978,7 @@ out:
if (p)
transport_kunmap_data_sg(se_cmd);
kfree(xop);
- return TCM_INVALID_CDB_FIELD;
+ return ret;
}
static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index ce5221fa393..e663921eebb 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1056,7 +1056,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
if (data_direction != DMA_NONE) {
ret = vhost_scsi_map_iov_to_sgl(cmd,
&vq->iov[data_first], data_num,
- data_direction == DMA_TO_DEVICE);
+ data_direction == DMA_FROM_DEVICE);
if (unlikely(ret)) {
vq_err(vq, "Failed to map iov to sgl\n");
goto err_free;
diff --git a/include/trace/events/target.h b/include/trace/events/target.h
index aef8fc35402..da9cc0f05c9 100644
--- a/include/trace/events/target.h
+++ b/include/trace/events/target.h
@@ -144,7 +144,7 @@ TRACE_EVENT(target_sequencer_start,
),
TP_fast_assign(
- __entry->unpacked_lun = cmd->se_lun->unpacked_lun;
+ __entry->unpacked_lun = cmd->orig_fe_lun;
__entry->opcode = cmd->t_task_cdb[0];
__entry->data_length = cmd->data_length;
__entry->task_attribute = cmd->sam_task_attr;
@@ -182,7 +182,7 @@ TRACE_EVENT(target_cmd_complete,
),
TP_fast_assign(
- __entry->unpacked_lun = cmd->se_lun->unpacked_lun;
+ __entry->unpacked_lun = cmd->orig_fe_lun;
__entry->opcode = cmd->t_task_cdb[0];
__entry->data_length = cmd->data_length;
__entry->task_attribute = cmd->sam_task_attr;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d49a9d29334..953c1434837 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6767,6 +6767,10 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
if (ret)
return -EFAULT;
+ /* disabled for now */
+ if (attr->mmap2)
+ return -EINVAL;
+
if (attr->__reserved_1)
return -EINVAL;
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 6d647aedffe..d24105b1b79 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -410,7 +410,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
static __always_inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
struct lockdep_map *nest_lock, unsigned long ip,
- struct ww_acquire_ctx *ww_ctx)
+ struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
{
struct task_struct *task = current;
struct mutex_waiter waiter;
@@ -450,7 +450,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
struct task_struct *owner;
struct mspin_node node;
- if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
+ if (use_ww_ctx && ww_ctx->acquired > 0) {
struct ww_mutex *ww;
ww = container_of(lock, struct ww_mutex, base);
@@ -480,7 +480,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
if ((atomic_read(&lock->count) == 1) &&
(atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
lock_acquired(&lock->dep_map, ip);
- if (!__builtin_constant_p(ww_ctx == NULL)) {
+ if (use_ww_ctx) {
struct ww_mutex *ww;
ww = container_of(lock, struct ww_mutex, base);
@@ -551,7 +551,7 @@ slowpath:
goto err;
}
- if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
+ if (use_ww_ctx && ww_ctx->acquired > 0) {
ret = __mutex_lock_check_stamp(lock, ww_ctx);
if (ret)
goto err;
@@ -575,7 +575,7 @@ skip_wait:
lock_acquired(&lock->dep_map, ip);
mutex_set_owner(lock);
- if (!__builtin_constant_p(ww_ctx == NULL)) {
+ if (use_ww_ctx) {
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
struct mutex_waiter *cur;
@@ -615,7 +615,7 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass)
{
might_sleep();
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
- subclass, NULL, _RET_IP_, NULL);
+ subclass, NULL, _RET_IP_, NULL, 0);
}
EXPORT_SYMBOL_GPL(mutex_lock_nested);
@@ -625,7 +625,7 @@ _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
{
might_sleep();
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
- 0, nest, _RET_IP_, NULL);
+ 0, nest, _RET_IP_, NULL, 0);
}
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
@@ -635,7 +635,7 @@ mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
{
might_sleep();
return __mutex_lock_common(lock, TASK_KILLABLE,
- subclass, NULL, _RET_IP_, NULL);
+ subclass, NULL, _RET_IP_, NULL, 0);
}
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
@@ -644,7 +644,7 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
{
might_sleep();
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
- subclass, NULL, _RET_IP_, NULL);
+ subclass, NULL, _RET_IP_, NULL, 0);
}
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
@@ -682,7 +682,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
might_sleep();
ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
- 0, &ctx->dep_map, _RET_IP_, ctx);
+ 0, &ctx->dep_map, _RET_IP_, ctx, 1);
if (!ret && ctx->acquired > 1)
return ww_mutex_deadlock_injection(lock, ctx);
@@ -697,7 +697,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
might_sleep();
ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
- 0, &ctx->dep_map, _RET_IP_, ctx);
+ 0, &ctx->dep_map, _RET_IP_, ctx, 1);
if (!ret && ctx->acquired > 1)
return ww_mutex_deadlock_injection(lock, ctx);
@@ -809,28 +809,28 @@ __mutex_lock_slowpath(atomic_t *lock_count)
struct mutex *lock = container_of(lock_count, struct mutex, count);
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
- NULL, _RET_IP_, NULL);
+ NULL, _RET_IP_, NULL, 0);
}
static noinline int __sched
__mutex_lock_killable_slowpath(struct mutex *lock)
{
return __mutex_lock_common(lock, TASK_KILLABLE, 0,
- NULL, _RET_IP_, NULL);
+ NULL, _RET_IP_, NULL, 0);
}
static noinline int __sched
__mutex_lock_interruptible_slowpath(struct mutex *lock)
{
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
- NULL, _RET_IP_, NULL);
+ NULL, _RET_IP_, NULL, 0);
}
static noinline int __sched
__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
- NULL, _RET_IP_, ctx);
+ NULL, _RET_IP_, ctx, 1);
}
static noinline int __sched
@@ -838,7 +838,7 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
{
return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
- NULL, _RET_IP_, ctx);
+ NULL, _RET_IP_, ctx, 1);
}
#endif
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 38959c86678..662c5798a68 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -33,29 +33,64 @@ struct ce_unbind {
int res;
};
-/**
- * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
- * @latch: value to convert
- * @evt: pointer to clock event device descriptor
- *
- * Math helper, returns latch value converted to nanoseconds (bound checked)
- */
-u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
+static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
+ bool ismax)
{
u64 clc = (u64) latch << evt->shift;
+ u64 rnd;
if (unlikely(!evt->mult)) {
evt->mult = 1;
WARN_ON(1);
}
+ rnd = (u64) evt->mult - 1;
+
+ /*
+ * Upper bound sanity check. If the backwards conversion is
+ * not equal latch, we know that the above shift overflowed.
+ */
+ if ((clc >> evt->shift) != (u64)latch)
+ clc = ~0ULL;
+
+ /*
+ * Scaled math oddities:
+ *
+ * For mult <= (1 << shift) we can safely add mult - 1 to
+ * prevent integer rounding loss. So the backwards conversion
+ * from nsec to device ticks will be correct.
+ *
+ * For mult > (1 << shift), i.e. device frequency is > 1GHz we
+ * need to be careful. Adding mult - 1 will result in a value
+ * which when converted back to device ticks can be larger
+ * than latch by up to (mult - 1) >> shift. For the min_delta
+ * calculation we still want to apply this in order to stay
+ * above the minimum device ticks limit. For the upper limit
+ * we would end up with a latch value larger than the upper
+ * limit of the device, so we omit the add to stay below the
+ * device upper boundary.
+ *
+ * Also omit the add if it would overflow the u64 boundary.
+ */
+ if ((~0ULL - clc > rnd) &&
+ (!ismax || evt->mult <= (1U << evt->shift)))
+ clc += rnd;
do_div(clc, evt->mult);
- if (clc < 1000)
- clc = 1000;
- if (clc > KTIME_MAX)
- clc = KTIME_MAX;
- return clc;
+ /* Deltas less than 1usec are pointless noise */
+ return clc > 1000 ? clc : 1000;
+}
+
+/**
+ * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
+ * @latch: value to convert
+ * @evt: pointer to clock event device descriptor
+ *
+ * Math helper, returns latch value converted to nanoseconds (bound checked)
+ */
+u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
+{
+ return cev_delta2ns(latch, evt, false);
}
EXPORT_SYMBOL_GPL(clockevent_delta2ns);
@@ -380,8 +415,8 @@ void clockevents_config(struct clock_event_device *dev, u32 freq)
sec = 600;
clockevents_calc_mult_shift(dev, freq, sec);
- dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev);
- dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev);
+ dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
+ dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
}
/**
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 9b393e7dca6..63df031fc9c 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -187,7 +187,7 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
return -1;
}
- event->header.type = PERF_RECORD_MMAP2;
+ event->header.type = PERF_RECORD_MMAP;
/*
* Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
*/
@@ -198,7 +198,6 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
char prot[5];
char execname[PATH_MAX];
char anonstr[] = "//anon";
- unsigned int ino;
size_t size;
ssize_t n;
@@ -209,13 +208,10 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
strcpy(execname, "");
/* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
- n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
- &event->mmap2.start, &event->mmap2.len, prot,
- &event->mmap2.pgoff, &event->mmap2.maj,
- &event->mmap2.min,
- &ino, execname);
-
- event->mmap2.ino = (u64)ino;
+ n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %*x:%*x %*u %s\n",
+ &event->mmap.start, &event->mmap.len, prot,
+ &event->mmap.pgoff,
+ execname);
if (n != 8)
continue;
@@ -227,15 +223,15 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
strcpy(execname, anonstr);
size = strlen(execname) + 1;
- memcpy(event->mmap2.filename, execname, size);
+ memcpy(event->mmap.filename, execname, size);
size = PERF_ALIGN(size, sizeof(u64));
- event->mmap2.len -= event->mmap.start;
- event->mmap2.header.size = (sizeof(event->mmap2) -
- (sizeof(event->mmap2.filename) - size));
- memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
- event->mmap2.header.size += machine->id_hdr_size;
- event->mmap2.pid = tgid;
- event->mmap2.tid = pid;
+ event->mmap.len -= event->mmap.start;
+ event->mmap.header.size = (sizeof(event->mmap) -
+ (sizeof(event->mmap.filename) - size));
+ memset(event->mmap.filename + size, 0, machine->id_hdr_size);
+ event->mmap.header.size += machine->id_hdr_size;
+ event->mmap.pid = tgid;
+ event->mmap.tid = pid;
if (process(tool, event, &synth_sample, machine) != 0) {
rc = -1;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 0ce9febf1ba..9f1ef9bee2d 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -678,7 +678,6 @@ void perf_evsel__config(struct perf_evsel *evsel,
attr->sample_type |= PERF_SAMPLE_WEIGHT;
attr->mmap = track;
- attr->mmap2 = track && !perf_missing_features.mmap2;
attr->comm = track;
/*
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index c09e0a9fdf4..f0692737ebf 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -1357,10 +1357,10 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr,
goto post;
}
+ fname = dwarf_decl_file(&spdie);
if (addr == (unsigned long)baseaddr) {
/* Function entry - Relative line number is 0 */
lineno = baseline;
- fname = dwarf_decl_file(&spdie);
goto post;
}
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index a85e4ae5f3a..c0c9795c4f0 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -282,7 +282,7 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused,
event = find_cache_event(evsel);
if (!event)
- die("ug! no event found for type %" PRIu64, evsel->attr.config);
+ die("ug! no event found for type %" PRIu64, (u64)evsel->attr.config);
pid = raw_field_value(event, "common_pid", data);