|  | @@ -895,3 +895,127 @@ bool clkdm_in_hwsup(struct clockdomain *clkdm)
 | 
	
		
			
				|  |  |  		return false;
 | 
	
		
			
				|  |  |  
 | 
	
		
			
				|  |  |  	spin_lock_irqsave(&clkdm->lock, flags);
 | 
	
		
			
				|  |  | +	ret = (clkdm->_flags & _CLKDM_FLAG_HWSUP_ENABLED) ? true : false;
 | 
	
		
			
				|  |  | +	spin_unlock_irqrestore(&clkdm->lock, flags);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	return ret;
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/**
 | 
	
		
			
				|  |  | + * clkdm_missing_idle_reporting - can @clkdm enter autoidle even if in use?
 | 
	
		
			
				|  |  | + * @clkdm: struct clockdomain *
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * Returns true if clockdomain @clkdm has the
 | 
	
		
			
				|  |  | + * CLKDM_MISSING_IDLE_REPORTING flag set, or false if not or @clkdm is
 | 
	
		
			
				|  |  | + * null.  More information is available in the documentation for the
 | 
	
		
			
				|  |  | + * CLKDM_MISSING_IDLE_REPORTING macro.
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +bool clkdm_missing_idle_reporting(struct clockdomain *clkdm)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	if (!clkdm)
 | 
	
		
			
				|  |  | +		return false;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	return (clkdm->flags & CLKDM_MISSING_IDLE_REPORTING) ? true : false;
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/* Clockdomain-to-clock/hwmod framework interface code */
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	unsigned long flags;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_enable)
 | 
	
		
			
				|  |  | +		return -EINVAL;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	spin_lock_irqsave(&clkdm->lock, flags);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	/*
 | 
	
		
			
				|  |  | +	 * For arch's with no autodeps, clkcm_clk_enable
 | 
	
		
			
				|  |  | +	 * should be called for every clock instance or hwmod that is
 | 
	
		
			
				|  |  | +	 * enabled, so the clkdm can be force woken up.
 | 
	
		
			
				|  |  | +	 */
 | 
	
		
			
				|  |  | +	if ((atomic_inc_return(&clkdm->usecount) > 1) && autodeps) {
 | 
	
		
			
				|  |  | +		spin_unlock_irqrestore(&clkdm->lock, flags);
 | 
	
		
			
				|  |  | +		return 0;
 | 
	
		
			
				|  |  | +	}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	arch_clkdm->clkdm_clk_enable(clkdm);
 | 
	
		
			
				|  |  | +	pwrdm_state_switch(clkdm->pwrdm.ptr);
 | 
	
		
			
				|  |  | +	spin_unlock_irqrestore(&clkdm->lock, flags);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	pr_debug("clockdomain: %s: enabled\n", clkdm->name);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	return 0;
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/**
 | 
	
		
			
				|  |  | + * clkdm_clk_enable - add an enabled downstream clock to this clkdm
 | 
	
		
			
				|  |  | + * @clkdm: struct clockdomain *
 | 
	
		
			
				|  |  | + * @clk: struct clk * of the enabled downstream clock
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * Increment the usecount of the clockdomain @clkdm and ensure that it
 | 
	
		
			
				|  |  | + * is awake before @clk is enabled.  Intended to be called by
 | 
	
		
			
				|  |  | + * clk_enable() code.  If the clockdomain is in software-supervised
 | 
	
		
			
				|  |  | + * idle mode, force the clockdomain to wake.  If the clockdomain is in
 | 
	
		
			
				|  |  | + * hardware-supervised idle mode, add clkdm-pwrdm autodependencies, to
 | 
	
		
			
				|  |  | + * ensure that devices in the clockdomain can be read from/written to
 | 
	
		
			
				|  |  | + * by on-chip processors.  Returns -EINVAL if passed null pointers;
 | 
	
		
			
				|  |  | + * returns 0 upon success or if the clockdomain is in hwsup idle mode.
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +int clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	/*
 | 
	
		
			
				|  |  | +	 * XXX Rewrite this code to maintain a list of enabled
 | 
	
		
			
				|  |  | +	 * downstream clocks for debugging purposes?
 | 
	
		
			
				|  |  | +	 */
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	if (!clk)
 | 
	
		
			
				|  |  | +		return -EINVAL;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	return _clkdm_clk_hwmod_enable(clkdm);
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/**
 | 
	
		
			
				|  |  | + * clkdm_clk_disable - remove an enabled downstream clock from this clkdm
 | 
	
		
			
				|  |  | + * @clkdm: struct clockdomain *
 | 
	
		
			
				|  |  | + * @clk: struct clk * of the disabled downstream clock
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * Decrement the usecount of this clockdomain @clkdm when @clk is
 | 
	
		
			
				|  |  | + * disabled.  Intended to be called by clk_disable() code.  If the
 | 
	
		
			
				|  |  | + * clockdomain usecount goes to 0, put the clockdomain to sleep
 | 
	
		
			
				|  |  | + * (software-supervised mode) or remove the clkdm autodependencies
 | 
	
		
			
				|  |  | + * (hardware-supervised mode).  Returns -EINVAL if passed null
 | 
	
		
			
				|  |  | + * pointers; -ERANGE if the @clkdm usecount underflows; or returns 0
 | 
	
		
			
				|  |  | + * upon success or if the clockdomain is in hwsup idle mode.
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	unsigned long flags;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	if (!clkdm || !clk || !arch_clkdm || !arch_clkdm->clkdm_clk_disable)
 | 
	
		
			
				|  |  | +		return -EINVAL;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	spin_lock_irqsave(&clkdm->lock, flags);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	/* corner case: disabling unused clocks */
 | 
	
		
			
				|  |  | +	if ((__clk_get_enable_count(clk) == 0) &&
 | 
	
		
			
				|  |  | +	    (atomic_read(&clkdm->usecount) == 0))
 | 
	
		
			
				|  |  | +		goto ccd_exit;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	if (atomic_read(&clkdm->usecount) == 0) {
 | 
	
		
			
				|  |  | +		spin_unlock_irqrestore(&clkdm->lock, flags);
 | 
	
		
			
				|  |  | +		WARN_ON(1); /* underflow */
 | 
	
		
			
				|  |  | +		return -ERANGE;
 | 
	
		
			
				|  |  | +	}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	if (atomic_dec_return(&clkdm->usecount) > 0) {
 | 
	
		
			
				|  |  | +		spin_unlock_irqrestore(&clkdm->lock, flags);
 | 
	
		
			
				|  |  | +		return 0;
 | 
	
		
			
				|  |  | +	}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	arch_clkdm->clkdm_clk_disable(clkdm);
 | 
	
		
			
				|  |  | +	pwrdm_state_switch(clkdm->pwrdm.ptr);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	pr_debug("clockdomain: %s: disabled\n", clkdm->name);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +ccd_exit:
 |