On 02/27/2012 06:27 PM, Ming Lei wrote:
After some check, I just found there is another patch you missed.
Please try the attachment patch from Shilimkar, Santosh.
If it doesn't work, I can send my uImage for your test.
No effect, so please send an uImage if possible.
I'm re-sending cumulative patch against
500dd2370e77c9551ba298bdeeb91b02d8402199.
It's possible that I miss something else, so it would be great if you can
take a look through it one more time.
Thanks,
Dmitry
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index b5a5be2..90114fa 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -134,7 +134,7 @@ int __init armpmu_register(struct arm_pmu *armpmu, char
*name, int type);
u64 armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
- int idx, int overflow);
+ int idx);
int armpmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc,
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 5bb91bf..56173ae 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -180,7 +180,7 @@ armpmu_event_set_period(struct perf_event *event,
u64
armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
- int idx, int overflow)
+ int idx)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
u64 delta, prev_raw_count, new_raw_count;
@@ -193,13 +193,7 @@ again:
new_raw_count) != prev_raw_count)
goto again;
- new_raw_count &= armpmu->max_period;
- prev_raw_count &= armpmu->max_period;
-
- if (overflow)
- delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
- else
- delta = new_raw_count - prev_raw_count;
+ delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
@@ -216,7 +210,7 @@ armpmu_read(struct perf_event *event)
if (hwc->idx < 0)
return;
- armpmu_event_update(event, hwc, hwc->idx, 0);
+ armpmu_event_update(event, hwc, hwc->idx);
}
static void
@@ -232,7 +226,7 @@ armpmu_stop(struct perf_event *event, int flags)
if (!(hwc->state & PERF_HES_STOPPED)) {
armpmu->disable(hwc, hwc->idx);
barrier(); /* why? */
- armpmu_event_update(event, hwc, hwc->idx, 0);
+ armpmu_event_update(event, hwc, hwc->idx);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
@@ -518,7 +512,13 @@ __hw_perf_event_init(struct perf_event *event)
hwc->config_base |= (unsigned long)mapping;
if (!hwc->sample_period) {
- hwc->sample_period = armpmu->max_period;
+ /*
+ * For non-sampling runs, limit the sample_period to half
+ * of the counter width. That way, the new counter value
+ * is far less likely to overtake the previous one unless
+ * you have some serious IRQ latency issues.
+ */
+ hwc->sample_period = armpmu->max_period >> 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 533be99..b78af0c 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -467,23 +467,6 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static int counter_is_active(unsigned long pmcr, int idx)
-{
- unsigned long mask = 0;
- if (idx == ARMV6_CYCLE_COUNTER)
- mask = ARMV6_PMCR_CCOUNT_IEN;
- else if (idx == ARMV6_COUNTER0)
- mask = ARMV6_PMCR_COUNT0_IEN;
- else if (idx == ARMV6_COUNTER1)
- mask = ARMV6_PMCR_COUNT1_IEN;
-
- if (mask)
- return pmcr & mask;
-
- WARN_ONCE(1, "invalid counter number (%d)\n", idx);
- return 0;
-}
-
static irqreturn_t
armv6pmu_handle_irq(int irq_num,
void *dev)
@@ -513,7 +496,8 @@ armv6pmu_handle_irq(int irq_num,
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!counter_is_active(pmcr, idx))
+ /* Ignore if we don't have an event. */
+ if (!event)
continue;
/*
@@ -524,7 +508,7 @@ armv6pmu_handle_irq(int irq_num,
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 6933244..4d7095a 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -809,6 +809,11 @@ static inline int armv7_pmnc_disable_intens(int idx)
counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
+ isb();
+ /* Clear the overflow flag in case an interrupt is pending. */
+ asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
+ isb();
+
return idx;
}
@@ -955,6 +960,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void
*dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
+ /* Ignore if we don't have an event. */
+ if (!event)
+ continue;
+
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
@@ -963,7 +972,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void
*dev)
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
diff --git a/arch/arm/kernel/perf_event_xscale.c
b/arch/arm/kernel/perf_event_xscale.c
index 3b99d82..71a21e6 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -255,11 +255,14 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
+ if (!event)
+ continue;
+
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
@@ -592,11 +595,14 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
+ if (!event)
+ continue;
+
+ if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
@@ -663,7 +669,7 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
static void
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
- unsigned long flags, ien, evtsel;
+ unsigned long flags, ien, evtsel, of_flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
ien = xscale2pmu_read_int_enable();
@@ -672,26 +678,31 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int
idx)
switch (idx) {
case XSCALE_CYCLE_COUNTER:
ien &= ~XSCALE2_CCOUNT_INT_EN;
+ of_flags = XSCALE2_CCOUNT_OVERFLOW;
break;
case XSCALE_COUNTER0:
ien &= ~XSCALE2_COUNT0_INT_EN;
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
+ of_flags = XSCALE2_COUNT0_OVERFLOW;
break;
case XSCALE_COUNTER1:
ien &= ~XSCALE2_COUNT1_INT_EN;
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
+ of_flags = XSCALE2_COUNT1_OVERFLOW;
break;
case XSCALE_COUNTER2:
ien &= ~XSCALE2_COUNT2_INT_EN;
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
+ of_flags = XSCALE2_COUNT2_OVERFLOW;
break;
case XSCALE_COUNTER3:
ien &= ~XSCALE2_COUNT3_INT_EN;
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
+ of_flags = XSCALE2_COUNT3_OVERFLOW;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
@@ -701,6 +712,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
raw_spin_lock_irqsave(&events->pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
+ xscale2pmu_write_overflow_flags(of_flags);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
diff --git a/arch/arm/mach-omap2/clockdomains44xx_data.c
b/arch/arm/mach-omap2/clockdomains44xx_data.c
index 9299ac2..41d2260 100644
--- a/arch/arm/mach-omap2/clockdomains44xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains44xx_data.c
@@ -390,7 +390,7 @@ static struct clockdomain emu_sys_44xx_clkdm = {
.prcm_partition = OMAP4430_PRM_PARTITION,
.cm_inst = OMAP4430_PRM_EMU_CM_INST,
.clkdm_offs = OMAP4430_PRM_EMU_CM_EMU_CDOFFS,
- .flags = CLKDM_CAN_HWSUP,
+ .flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain l3_dma_44xx_clkdm = {
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 283d11e..91477f8 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -17,12 +17,14 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/pm_runtime.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
#include <asm/pmu.h>
+#include <asm/cti.h>
#include <plat/tc.h>
#include <plat/board.h>
@@ -445,14 +447,130 @@ static struct platform_device omap_pmu_device = {
.num_resources = 1,
};
-static void omap_init_pmu(void)
+static struct arm_pmu_platdata omap4_pmu_data;
+static struct omap_device_pm_latency omap_pmu_latency[] = {
+ [0] = {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ },
+};
+
+static struct cti omap4_cti[2];
+static struct platform_device *pmu_dev;
+
+static void omap4_enable_cti(int irq)
{
- if (cpu_is_omap24xx())
+ pm_runtime_get_sync(&pmu_dev->dev);
+ if (irq == OMAP44XX_IRQ_CTI0)
+ cti_enable(&omap4_cti[0]);
+ else if (irq == OMAP44XX_IRQ_CTI1)
+ cti_enable(&omap4_cti[1]);
+}
+
+static void omap4_disable_cti(int irq)
+{
+ if (irq == OMAP44XX_IRQ_CTI0)
+ cti_disable(&omap4_cti[0]);
+ else if (irq == OMAP44XX_IRQ_CTI1)
+ cti_disable(&omap4_cti[1]);
+ pm_runtime_put(&pmu_dev->dev);
+}
+
+static irqreturn_t omap4_pmu_handler(int irq, void *dev, irq_handler_t handler)
+{
+ if (irq == OMAP44XX_IRQ_CTI0)
+ cti_irq_ack(&omap4_cti[0]);
+ else if (irq == OMAP44XX_IRQ_CTI1)
+ cti_irq_ack(&omap4_cti[1]);
+
+ return handler(irq, dev);
+}
+
+static void __init omap4_configure_pmu_irq(void)
+{
+ void __iomem *base0;
+ void __iomem *base1;
+
+ base0 = ioremap(OMAP44XX_CTI0_BASE, SZ_4K);
+ base1 = ioremap(OMAP44XX_CTI1_BASE, SZ_4K);
+ if (!base0 && !base1) {
+ pr_err("ioremap for OMAP4 CTI failed\n");
+ return;
+ }
+
+ /*configure CTI0 for pmu irq routing*/
+ cti_init(&omap4_cti[0], base0, OMAP44XX_IRQ_CTI0, 6);
+ cti_unlock(&omap4_cti[0]);
+ cti_map_trigger(&omap4_cti[0], 1, 6, 2);
+
+ /*configure CTI1 for pmu irq routing*/
+ cti_init(&omap4_cti[1], base1, OMAP44XX_IRQ_CTI1, 6);
+ cti_unlock(&omap4_cti[1]);
+ cti_map_trigger(&omap4_cti[1], 1, 6, 2);
+}
+
+static struct platform_device* __init omap4_init_pmu(void)
+{
+ int id = -1;
+ const char *hw;
+ struct platform_device *pd;
+ struct omap_hwmod* oh[3];
+ char *dev_name = "arm-pmu";
+
+ hw = "l3_main_3";
+ oh[0] = omap_hwmod_lookup(hw);
+ if (!oh[0]) {
+ pr_err("Could not look up %s hwmod\n", hw);
+ return NULL;
+ }
+ hw = "l3_instr";
+ oh[1] = omap_hwmod_lookup(hw);
+ if (!oh[1]) {
+ pr_err("Could not look up %s hwmod\n", hw);
+ return NULL;
+ }
+ hw = "debugss";
+ oh[2] = omap_hwmod_lookup(hw);
+ if (!oh[2]) {
+ pr_err("Could not look up %s hwmod\n", hw);
+ return NULL;
+ }
+
+ omap4_pmu_data.handle_irq = omap4_pmu_handler;
+ omap4_pmu_data.enable_irq = omap4_enable_cti;
+ omap4_pmu_data.disable_irq = omap4_disable_cti;
+
+ pd = omap_device_build_ss(dev_name, id, oh, 3, &omap4_pmu_data,
+ sizeof(omap4_pmu_data),
+ omap_pmu_latency,
+ ARRAY_SIZE(omap_pmu_latency), 0);
+ WARN(IS_ERR(pd), "Can't build omap_device for %s.\n",
+ dev_name);
+ return pd;
+}
+static void __init omap_init_pmu(void)
+{
+ if (cpu_is_omap24xx()) {
omap_pmu_device.resource = &omap2_pmu_resource;
- else if (cpu_is_omap34xx())
+ } else if (cpu_is_omap34xx()) {
omap_pmu_device.resource = &omap3_pmu_resource;
- else
+ } else if (cpu_is_omap44xx()) {
+ struct platform_device *pd;
+
+ pd = omap4_init_pmu();
+ if (!pd)
+ return;
+
+ pmu_dev= pd;
+ pm_runtime_enable(&pd->dev);
+ pm_runtime_get_sync(&pd->dev);
+ omap4_configure_pmu_irq();
+ pm_runtime_put(&pd->dev);
+ return;
+ } else {
return;
+ }
platform_device_register(&omap_pmu_device);
}
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index ef0524c..0b7f528 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -48,6 +48,7 @@
/* Backward references (IPs with Bus Master capability) */
static struct omap_hwmod omap44xx_aess_hwmod;
+static struct omap_hwmod omap44xx_debugss_hwmod;
static struct omap_hwmod omap44xx_dma_system_hwmod;
static struct omap_hwmod omap44xx_dmm_hwmod;
static struct omap_hwmod omap44xx_dsp_hwmod;
@@ -340,6 +341,14 @@ static struct omap_hwmod omap44xx_l3_main_1_hwmod = {
};
/* l3_main_2 */
+/* debugss -> l3_main_2 */
+static struct omap_hwmod_ocp_if omap44xx_debugss__l3_main_2 = {
+ .master = &omap44xx_debugss_hwmod,
+ .slave = &omap44xx_l3_main_2_hwmod,
+ .clk = "dbgclk_mux_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
/* dma_system -> l3_main_2 */
static struct omap_hwmod_ocp_if omap44xx_dma_system__l3_main_2 = {
.master = &omap44xx_dma_system_hwmod,
@@ -689,7 +698,6 @@ static struct omap_hwmod omap44xx_mpu_private_hwmod = {
* ctrl_module_pad_core
* ctrl_module_pad_wkup
* ctrl_module_wkup
- * debugss
* efuse_ctrl_cust
* efuse_ctrl_std
* elm
@@ -911,6 +919,168 @@ static struct omap_hwmod omap44xx_counter_32k_hwmod = {
};
/*
+ * 'debugss' class
+ * debug and emulation sub system
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_debugss_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap44xx_debugss_hwmod_class = {
+ .name = "debugss",
+ .sysc = &omap44xx_debugss_sysc,
+};
+
+/* debugss */
+static struct omap_hwmod_irq_info omap44xx_debugss_irqs[] = {
+ { .name = "cti0", .irq = 1 + OMAP44XX_IRQ_GIC_START },
+ { .name = "cti1", .irq = 2 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
+};
+
+/* debugss master ports */
+static struct omap_hwmod_ocp_if *omap44xx_debugss_masters[] = {
+ &omap44xx_debugss__l3_main_2,
+};
+
+static struct omap_hwmod_addr_space omap44xx_debugss_addrs[] = {
+ {
+ .name = "mipi_stm_add_sp_0",
+ .pa_start = 0x54000000,
+ .pa_end = 0x540fffff,
+ },
+ {
+ .name = "mipi_stm_add_sp_1",
+ .pa_start = 0x54100000,
+ .pa_end = 0x5413ffff,
+ },
+ {
+ .name = "mpu_c0_debug",
+ .pa_start = 0x54140000,
+ .pa_end = 0x54141fff,
+ },
+ {
+ .name = "mpu_c1_debug",
+ .pa_start = 0x54142000,
+ .pa_end = 0x54143fff,
+ },
+ {
+ .name = "cti0_mpu",
+ .pa_start = 0x54148000,
+ .pa_end = 0x54148fff,
+ },
+ {
+ .name = "cti1_mpu",
+ .pa_start = 0x54149000,
+ .pa_end = 0x54149fff,
+ },
+ {
+ .name = "ptm0_mpu",
+ .pa_start = 0x5414c000,
+ .pa_end = 0x5414cfff,
+ },
+ {
+ .name = "ptm1_mpu",
+ .pa_start = 0x5414d000,
+ .pa_end = 0x5414dfff,
+ },
+ {
+ .name = "tf_mpu",
+ .pa_start = 0x54158000,
+ .pa_end = 0x54158fff,
+ },
+ {
+ .name = "dap_pc_mpu",
+ .pa_start = 0x54159000,
+ .pa_end = 0x54159fff,
+ },
+ {
+ .name = "apb_bridge_a_ctrl_time_out",
+ .pa_start = 0x5415f000,
+ .pa_end = 0x5415ffff,
+ },
+ {
+ .name = "drm",
+ .pa_start = 0x54160000,
+ .pa_end = 0x54160fff,
+ },
+ {
+ .name = "mipi_stm",
+ .pa_start = 0x54161000,
+ .pa_end = 0x54161fff,
+ .flags = ADDR_TYPE_RT
+ },
+ {
+ .name = "csetb",
+ .pa_start = 0x54162000,
+ .pa_end = 0x54162fff,
+ },
+ {
+ .name = "cstpiu",
+ .pa_start = 0x54163000,
+ .pa_end = 0x54163fff,
+ },
+ {
+ .name = "cstf1",
+ .pa_start = 0x54164000,
+ .pa_end = 0x54164fff,
+ },
+ {
+ .name = "cstf2",
+ .pa_start = 0x54165000,
+ .pa_end = 0x54165fff,
+ },
+ {
+ .name = "l4_cfg_emu_conf_regs",
+ .pa_start = 0x54167000,
+ .pa_end = 0x54167fff,
+ },
+ {
+ .name = "l3_instr_emu_conf_regs",
+ .pa_start = 0x54180000,
+ .pa_end = 0x54180fff,
+ },
+ { }
+};
+
+/* l3_instr -> debugss */
+static struct omap_hwmod_ocp_if omap44xx_l3_instr__debugss = {
+ .master = &omap44xx_l3_instr_hwmod,
+ .slave = &omap44xx_debugss_hwmod,
+ .clk = "l3_div_ck",
+ .addr = omap44xx_debugss_addrs,
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* debugss slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_debugss_slaves[] = {
+ &omap44xx_l3_instr__debugss,
+};
+
+static struct omap_hwmod omap44xx_debugss_hwmod = {
+ .name = "debugss",
+ .class = &omap44xx_debugss_hwmod_class,
+ .clkdm_name = "emu_sys_clkdm",
+ .mpu_irqs = omap44xx_debugss_irqs,
+ .main_clk = "trace_clk_div_ck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = OMAP4_CM_EMU_DEBUGSS_CLKCTRL_OFFSET,
+ .context_offs = OMAP4_RM_EMU_DEBUGSS_CONTEXT_OFFSET,
+ },
+ },
+ .slaves = omap44xx_debugss_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_debugss_slaves),
+ .masters = omap44xx_debugss_masters,
+ .masters_cnt = ARRAY_SIZE(omap44xx_debugss_masters),
+};
+
+/*
* 'dma' class
* dma controller for data exchange between memory to memory (i.e. internal or
* external memory) and gp peripherals to memory or memory to gp peripherals
@@ -3907,8 +4077,6 @@ static struct omap_hwmod_class omap44xx_mpu_hwmod_class =
{
/* mpu */
static struct omap_hwmod_irq_info omap44xx_mpu_irqs[] = {
{ .name = "pl310", .irq = 0 + OMAP44XX_IRQ_GIC_START },
- { .name = "cti0", .irq = 1 + OMAP44XX_IRQ_GIC_START },
- { .name = "cti1", .irq = 2 + OMAP44XX_IRQ_GIC_START },
{ .irq = -1 }
};
@@ -5514,6 +5682,9 @@ static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
/* counter class */
/* &omap44xx_counter_32k_hwmod, */
+ /* debugss class */
+ &omap44xx_debugss_hwmod,
+
/* dma class */
&omap44xx_dma_system_hwmod,
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index c264ef7..d7bd020 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -197,7 +197,7 @@ static int __init omap4_pm_init(void)
{
int ret;
struct clockdomain *emif_clkdm, *mpuss_clkdm, *l3_1_clkdm;
- struct clockdomain *ducati_clkdm, *l3_2_clkdm, *l4_per_clkdm;
+ struct clockdomain *ducati_clkdm, *l3_2_clkdm, *l4_per_clkdm, *l4wkup;
if (!cpu_is_omap44xx())
return -ENODEV;
@@ -227,14 +227,16 @@ static int __init omap4_pm_init(void)
l3_2_clkdm = clkdm_lookup("l3_2_clkdm");
l4_per_clkdm = clkdm_lookup("l4_per_clkdm");
ducati_clkdm = clkdm_lookup("ducati_clkdm");
+ l4wkup = clkdm_lookup("l4_wkup_clkdm");
if ((!mpuss_clkdm) || (!emif_clkdm) || (!l3_1_clkdm) ||
- (!l3_2_clkdm) || (!ducati_clkdm) || (!l4_per_clkdm))
+ (!l3_2_clkdm) || (!ducati_clkdm) || (!l4_per_clkdm) ||
(!l4wkup))
goto err2;
ret = clkdm_add_wkdep(mpuss_clkdm, emif_clkdm);
ret |= clkdm_add_wkdep(mpuss_clkdm, l3_1_clkdm);
ret |= clkdm_add_wkdep(mpuss_clkdm, l3_2_clkdm);
ret |= clkdm_add_wkdep(mpuss_clkdm, l4_per_clkdm);
+ ret |= clkdm_add_wkdep(mpuss_clkdm, l4wkup);
ret |= clkdm_add_wkdep(ducati_clkdm, l3_1_clkdm);
ret |= clkdm_add_wkdep(ducati_clkdm, l3_2_clkdm);
if (ret) {
diff --git a/arch/arm/plat-omap/include/plat/omap44xx.h
b/arch/arm/plat-omap/include/plat/omap44xx.h
index c0d478e..0b04969 100644
--- a/arch/arm/plat-omap/include/plat/omap44xx.h
+++ b/arch/arm/plat-omap/include/plat/omap44xx.h
@@ -58,5 +58,8 @@
#define OMAP44XX_HSUSB_OHCI_BASE (L4_44XX_BASE + 0x64800)
#define OMAP44XX_HSUSB_EHCI_BASE (L4_44XX_BASE + 0x64C00)
+#define OMAP44XX_CTI0_BASE 0x54148000
+#define OMAP44XX_CTI1_BASE 0x54149000
+
#endif /* __ASM_ARCH_OMAP44XX_H */
_______________________________________________
linaro-dev mailing list
linaro-dev@lists.linaro.org
http://lists.linaro.org/mailman/listinfo/linaro-dev