mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 20:54:10 +08:00
mmc: Aggressive clock gating framework
This patch modifies the MMC core code to optionally call the set_ios() operation on the driver with the clock frequency set to 0 (gate) after a grace period of at least 8 MCLK cycles, then restore it (ungate) before any new request. This gives the driver the option to shut down the MCI clock to the MMC/SD card when the clock frequency is 0, i.e. the core has stated that the MCI clock does not need to be generated. It is inspired by existing clock gating code found in the OMAP and Atmel drivers and brings this up to the host abstraction. Gating is performed before and after any MMC request. This patchset implements this for the MMCI/PL180 MMC/SD host controller, but it should be simple to switch OMAP/Atmel over to using this instead. mmc_set_{gated,ungated}() add variable protection to the state holders for the clock gating code. This is particularly important when ordinary .set_ios() calls would race with the .set_ios() call resulting from a delayed gate operation. Signed-off-by: Linus Walleij <linus.walleij@stericsson.com> Reviewed-by: Chris Ball <cjb@laptop.org> Tested-by: Chris Ball <cjb@laptop.org> Signed-off-by: Chris Ball <cjb@laptop.org>
This commit is contained in:
parent
26daa1ed40
commit
04566831a7
@ -16,3 +16,14 @@ config MMC_UNSAFE_RESUME
|
||||
|
||||
This option sets a default which can be overridden by the
|
||||
module parameter "removable=0" or "removable=1".
|
||||
|
||||
config MMC_CLKGATE
|
||||
bool "MMC host clock gating (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL
|
||||
help
|
||||
This will attempt to aggressively gate the clock to the MMC card.
|
||||
This is done to save power due to gating off the logic and bus
|
||||
noise when the MMC card is not in use. Your host driver has to
|
||||
support handling this in order for it to be of any use.
|
||||
|
||||
If unsure, say N.
|
||||
|
@ -130,6 +130,8 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
|
||||
|
||||
if (mrq->done)
|
||||
mrq->done(mrq);
|
||||
|
||||
mmc_host_clk_gate(host);
|
||||
}
|
||||
}
|
||||
|
||||
@ -190,6 +192,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
|
||||
mrq->stop->mrq = mrq;
|
||||
}
|
||||
}
|
||||
mmc_host_clk_ungate(host);
|
||||
host->ops->request(host, mrq);
|
||||
}
|
||||
|
||||
@ -296,7 +299,7 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
|
||||
|
||||
timeout_us = data->timeout_ns / 1000;
|
||||
timeout_us += data->timeout_clks * 1000 /
|
||||
(card->host->ios.clock / 1000);
|
||||
(mmc_host_clk_rate(card->host) / 1000);
|
||||
|
||||
if (data->flags & MMC_DATA_WRITE)
|
||||
/*
|
||||
@ -614,6 +617,8 @@ static inline void mmc_set_ios(struct mmc_host *host)
|
||||
ios->power_mode, ios->chip_select, ios->vdd,
|
||||
ios->bus_width, ios->timing);
|
||||
|
||||
if (ios->clock > 0)
|
||||
mmc_set_ungated(host);
|
||||
host->ops->set_ios(host, ios);
|
||||
}
|
||||
|
||||
@ -641,6 +646,61 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
|
||||
mmc_set_ios(host);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMC_CLKGATE
|
||||
/*
|
||||
* This gates the clock by setting it to 0 Hz.
|
||||
*/
|
||||
void mmc_gate_clock(struct mmc_host *host)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
host->clk_old = host->ios.clock;
|
||||
host->ios.clock = 0;
|
||||
host->clk_gated = true;
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
mmc_set_ios(host);
|
||||
}
|
||||
|
||||
/*
|
||||
* This restores the clock from gating by using the cached
|
||||
* clock value.
|
||||
*/
|
||||
void mmc_ungate_clock(struct mmc_host *host)
|
||||
{
|
||||
/*
|
||||
* We should previously have gated the clock, so the clock shall
|
||||
* be 0 here! The clock may however be 0 during initialization,
|
||||
* when some request operations are performed before setting
|
||||
* the frequency. When ungate is requested in that situation
|
||||
* we just ignore the call.
|
||||
*/
|
||||
if (host->clk_old) {
|
||||
BUG_ON(host->ios.clock);
|
||||
/* This call will also set host->clk_gated to false */
|
||||
mmc_set_clock(host, host->clk_old);
|
||||
}
|
||||
}
|
||||
|
||||
void mmc_set_ungated(struct mmc_host *host)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* We've been given a new frequency while the clock is gated,
|
||||
* so make sure we regard this as ungating it.
|
||||
*/
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
host->clk_gated = false;
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
}
|
||||
|
||||
#else
|
||||
void mmc_set_ungated(struct mmc_host *host)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Change the bus mode (open drain/push-pull) of a host.
|
||||
*/
|
||||
|
@ -33,6 +33,9 @@ void mmc_init_erase(struct mmc_card *card);
|
||||
|
||||
void mmc_set_chip_select(struct mmc_host *host, int mode);
|
||||
void mmc_set_clock(struct mmc_host *host, unsigned int hz);
|
||||
void mmc_gate_clock(struct mmc_host *host);
|
||||
void mmc_ungate_clock(struct mmc_host *host);
|
||||
void mmc_set_ungated(struct mmc_host *host);
|
||||
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
|
||||
void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
|
||||
void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width,
|
||||
|
@ -183,6 +183,11 @@ void mmc_add_host_debugfs(struct mmc_host *host)
|
||||
&mmc_clock_fops))
|
||||
goto err_node;
|
||||
|
||||
#ifdef CONFIG_MMC_CLKGATE
|
||||
if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
|
||||
root, &host->clk_delay))
|
||||
goto err_node;
|
||||
#endif
|
||||
return;
|
||||
|
||||
err_node:
|
||||
|
@ -3,6 +3,7 @@
|
||||
*
|
||||
* Copyright (C) 2003 Russell King, All Rights Reserved.
|
||||
* Copyright (C) 2007-2008 Pierre Ossman
|
||||
* Copyright (C) 2010 Linus Walleij
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
@ -20,6 +21,7 @@
|
||||
#include <linux/suspend.h>
|
||||
|
||||
#include <linux/mmc/host.h>
|
||||
#include <linux/mmc/card.h>
|
||||
|
||||
#include "core.h"
|
||||
#include "host.h"
|
||||
@ -50,6 +52,204 @@ void mmc_unregister_host_class(void)
|
||||
static DEFINE_IDR(mmc_host_idr);
|
||||
static DEFINE_SPINLOCK(mmc_host_lock);
|
||||
|
||||
#ifdef CONFIG_MMC_CLKGATE
|
||||
|
||||
/*
|
||||
* Enabling clock gating will make the core call out to the host
|
||||
* once up and once down when it performs a request or card operation
|
||||
* intermingled in any fashion. The driver will see this through
|
||||
* set_ios() operations with ios.clock field set to 0 to gate (disable)
|
||||
* the block clock, and to the old frequency to enable it again.
|
||||
*/
|
||||
static void mmc_host_clk_gate_delayed(struct mmc_host *host)
|
||||
{
|
||||
unsigned long tick_ns;
|
||||
unsigned long freq = host->ios.clock;
|
||||
unsigned long flags;
|
||||
|
||||
if (!freq) {
|
||||
pr_debug("%s: frequency set to 0 in disable function, "
|
||||
"this means the clock is already disabled.\n",
|
||||
mmc_hostname(host));
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* New requests may have appeared while we were scheduling,
|
||||
* then there is no reason to delay the check before
|
||||
* clk_disable().
|
||||
*/
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
|
||||
/*
|
||||
* Delay n bus cycles (at least 8 from MMC spec) before attempting
|
||||
* to disable the MCI block clock. The reference count may have
|
||||
* gone up again after this delay due to rescheduling!
|
||||
*/
|
||||
if (!host->clk_requests) {
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
tick_ns = DIV_ROUND_UP(1000000000, freq);
|
||||
ndelay(host->clk_delay * tick_ns);
|
||||
} else {
|
||||
/* New users appeared while waiting for this work */
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
return;
|
||||
}
|
||||
mutex_lock(&host->clk_gate_mutex);
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
if (!host->clk_requests) {
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
/* This will set host->ios.clock to 0 */
|
||||
mmc_gate_clock(host);
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
|
||||
}
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
mutex_unlock(&host->clk_gate_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal work. Work to disable the clock at some later point.
|
||||
*/
|
||||
static void mmc_host_clk_gate_work(struct work_struct *work)
|
||||
{
|
||||
struct mmc_host *host = container_of(work, struct mmc_host,
|
||||
clk_gate_work);
|
||||
|
||||
mmc_host_clk_gate_delayed(host);
|
||||
}
|
||||
|
||||
/**
|
||||
* mmc_host_clk_ungate - ungate hardware MCI clocks
|
||||
* @host: host to ungate.
|
||||
*
|
||||
* Makes sure the host ios.clock is restored to a non-zero value
|
||||
* past this call. Increase clock reference count and ungate clock
|
||||
* if we're the first user.
|
||||
*/
|
||||
void mmc_host_clk_ungate(struct mmc_host *host)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
mutex_lock(&host->clk_gate_mutex);
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
if (host->clk_gated) {
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
mmc_ungate_clock(host);
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
|
||||
}
|
||||
host->clk_requests++;
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
mutex_unlock(&host->clk_gate_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* mmc_host_may_gate_card - check if this card may be gated
|
||||
* @card: card to check.
|
||||
*/
|
||||
static bool mmc_host_may_gate_card(struct mmc_card *card)
|
||||
{
|
||||
/* If there is no card we may gate it */
|
||||
if (!card)
|
||||
return true;
|
||||
/*
|
||||
* Don't gate SDIO cards! These need to be clocked at all times
|
||||
* since they may be independent systems generating interrupts
|
||||
* and other events. The clock requests counter from the core will
|
||||
* go down to zero since the core does not need it, but we will not
|
||||
* gate the clock, because there is somebody out there that may still
|
||||
* be using it.
|
||||
*/
|
||||
if (mmc_card_sdio(card))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* mmc_host_clk_gate - gate off hardware MCI clocks
|
||||
* @host: host to gate.
|
||||
*
|
||||
* Calls the host driver with ios.clock set to zero as often as possible
|
||||
* in order to gate off hardware MCI clocks. Decrease clock reference
|
||||
* count and schedule disabling of clock.
|
||||
*/
|
||||
void mmc_host_clk_gate(struct mmc_host *host)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
host->clk_requests--;
|
||||
if (mmc_host_may_gate_card(host->card) &&
|
||||
!host->clk_requests)
|
||||
schedule_work(&host->clk_gate_work);
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* mmc_host_clk_rate - get current clock frequency setting
|
||||
* @host: host to get the clock frequency for.
|
||||
*
|
||||
* Returns current clock frequency regardless of gating.
|
||||
*/
|
||||
unsigned int mmc_host_clk_rate(struct mmc_host *host)
|
||||
{
|
||||
unsigned long freq;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
if (host->clk_gated)
|
||||
freq = host->clk_old;
|
||||
else
|
||||
freq = host->ios.clock;
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
return freq;
|
||||
}
|
||||
|
||||
/**
|
||||
* mmc_host_clk_init - set up clock gating code
|
||||
* @host: host with potential clock to control
|
||||
*/
|
||||
static inline void mmc_host_clk_init(struct mmc_host *host)
|
||||
{
|
||||
host->clk_requests = 0;
|
||||
/* Hold MCI clock for 8 cycles by default */
|
||||
host->clk_delay = 8;
|
||||
host->clk_gated = false;
|
||||
INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
|
||||
spin_lock_init(&host->clk_lock);
|
||||
mutex_init(&host->clk_gate_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* mmc_host_clk_exit - shut down clock gating code
|
||||
* @host: host with potential clock to control
|
||||
*/
|
||||
static inline void mmc_host_clk_exit(struct mmc_host *host)
|
||||
{
|
||||
/*
|
||||
* Wait for any outstanding gate and then make sure we're
|
||||
* ungated before exiting.
|
||||
*/
|
||||
if (cancel_work_sync(&host->clk_gate_work))
|
||||
mmc_host_clk_gate_delayed(host);
|
||||
if (host->clk_gated)
|
||||
mmc_host_clk_ungate(host);
|
||||
BUG_ON(host->clk_requests > 0);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline void mmc_host_clk_init(struct mmc_host *host)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mmc_host_clk_exit(struct mmc_host *host)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* mmc_alloc_host - initialise the per-host structure.
|
||||
* @extra: sizeof private data structure
|
||||
@ -82,6 +282,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
|
||||
host->class_dev.class = &mmc_host_class;
|
||||
device_initialize(&host->class_dev);
|
||||
|
||||
mmc_host_clk_init(host);
|
||||
|
||||
spin_lock_init(&host->lock);
|
||||
init_waitqueue_head(&host->wq);
|
||||
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
|
||||
@ -163,6 +365,8 @@ void mmc_remove_host(struct mmc_host *host)
|
||||
device_del(&host->class_dev);
|
||||
|
||||
led_trigger_unregister_simple(host->led);
|
||||
|
||||
mmc_host_clk_exit(host);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(mmc_remove_host);
|
||||
@ -183,4 +387,3 @@ void mmc_free_host(struct mmc_host *host)
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(mmc_free_host);
|
||||
|
||||
|
@ -10,10 +10,31 @@
|
||||
*/
|
||||
#ifndef _MMC_CORE_HOST_H
|
||||
#define _MMC_CORE_HOST_H
|
||||
#include <linux/mmc/host.h>
|
||||
|
||||
int mmc_register_host_class(void);
|
||||
void mmc_unregister_host_class(void);
|
||||
|
||||
#ifdef CONFIG_MMC_CLKGATE
|
||||
void mmc_host_clk_ungate(struct mmc_host *host);
|
||||
void mmc_host_clk_gate(struct mmc_host *host);
|
||||
unsigned int mmc_host_clk_rate(struct mmc_host *host);
|
||||
|
||||
#else
|
||||
static inline void mmc_host_clk_ungate(struct mmc_host *host)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mmc_host_clk_gate(struct mmc_host *host)
|
||||
{
|
||||
}
|
||||
|
||||
static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
|
||||
{
|
||||
return host->ios.clock;
|
||||
}
|
||||
#endif
|
||||
|
||||
void mmc_host_deeper_disable(struct work_struct *work);
|
||||
|
||||
#endif
|
||||
|
@ -172,6 +172,16 @@ struct mmc_host {
|
||||
|
||||
mmc_pm_flag_t pm_caps; /* supported pm features */
|
||||
|
||||
#ifdef CONFIG_MMC_CLKGATE
|
||||
int clk_requests; /* internal reference counter */
|
||||
unsigned int clk_delay; /* number of MCI clk hold cycles */
|
||||
bool clk_gated; /* clock gated */
|
||||
struct work_struct clk_gate_work; /* delayed clock gate */
|
||||
unsigned int clk_old; /* old clock value cache */
|
||||
spinlock_t clk_lock; /* lock for clk fields */
|
||||
struct mutex clk_gate_mutex; /* mutex for clock gating */
|
||||
#endif
|
||||
|
||||
/* host specific block data */
|
||||
unsigned int max_seg_size; /* see blk_queue_max_segment_size */
|
||||
unsigned short max_segs; /* see blk_queue_max_segments */
|
||||
|
Loading…
Reference in New Issue
Block a user