blob: 52c81378e36ef4e0b58c8af5097cef7220ccc499 [file] [log] [blame]
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2023, Linaro Ltd. All rights reserved.
*/
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/usb/pd.h>
#include <linux/usb/tcpm.h>
#include "qcom_pmic_typec_pdphy.h"
struct pmic_typec_pdphy_irq_data {
int virq;
int irq;
struct pmic_typec_pdphy *pmic_typec_pdphy;
};
struct pmic_typec_pdphy {
struct device *dev;
struct tcpm_port *tcpm_port;
struct regmap *regmap;
u32 base;
unsigned int nr_irqs;
struct pmic_typec_pdphy_irq_data *irq_data;
struct work_struct reset_work;
struct work_struct receive_work;
struct regulator *vdd_pdphy;
spinlock_t lock; /* Register atomicity */
};
static void qcom_pmic_typec_pdphy_reset_on(struct pmic_typec_pdphy *pmic_typec_pdphy)
{
struct device *dev = pmic_typec_pdphy->dev;
int ret;
/* Terminate TX */
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, 0);
if (ret)
goto err;
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_FRAME_FILTER_REG, 0);
if (ret)
goto err;
return;
err:
dev_err(dev, "pd_reset_on error\n");
}
static void qcom_pmic_typec_pdphy_reset_off(struct pmic_typec_pdphy *pmic_typec_pdphy)
{
struct device *dev = pmic_typec_pdphy->dev;
int ret;
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_FRAME_FILTER_REG,
FRAME_FILTER_EN_SOP | FRAME_FILTER_EN_HARD_RESET);
if (ret)
dev_err(dev, "pd_reset_off error\n");
}
static void qcom_pmic_typec_pdphy_sig_reset_work(struct work_struct *work)
{
struct pmic_typec_pdphy *pmic_typec_pdphy = container_of(work, struct pmic_typec_pdphy,
reset_work);
unsigned long flags;
spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
qcom_pmic_typec_pdphy_reset_off(pmic_typec_pdphy);
spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
tcpm_pd_hard_reset(pmic_typec_pdphy->tcpm_port);
}
static int
qcom_pmic_typec_pdphy_clear_tx_control_reg(struct pmic_typec_pdphy *pmic_typec_pdphy)
{
struct device *dev = pmic_typec_pdphy->dev;
unsigned int val;
int ret;
/* Clear TX control register */
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, 0);
if (ret)
goto done;
/* Perform readback to ensure sufficient delay for command to latch */
ret = regmap_read(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, &val);
done:
if (ret)
dev_err(dev, "pd_clear_tx_control_reg: clear tx flag\n");
return ret;
}
static int
qcom_pmic_typec_pdphy_pd_transmit_signal(struct pmic_typec_pdphy *pmic_typec_pdphy,
enum tcpm_transmit_type type,
unsigned int negotiated_rev)
{
struct device *dev = pmic_typec_pdphy->dev;
unsigned int val;
unsigned long flags;
int ret;
spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
/* Clear TX control register */
ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy);
if (ret)
goto done;
val = TX_CONTROL_SEND_SIGNAL;
if (negotiated_rev == PD_REV30)
val |= TX_CONTROL_RETRY_COUNT(2);
else
val |= TX_CONTROL_RETRY_COUNT(3);
if (type == TCPC_TX_CABLE_RESET || type == TCPC_TX_HARD_RESET)
val |= TX_CONTROL_FRAME_TYPE(1);
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, val);
done:
spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
dev_vdbg(dev, "pd_transmit_signal: type %d negotiate_rev %d send %d\n",
type, negotiated_rev, ret);
return ret;
}
static int
qcom_pmic_typec_pdphy_pd_transmit_payload(struct pmic_typec_pdphy *pmic_typec_pdphy,
enum tcpm_transmit_type type,
const struct pd_message *msg,
unsigned int negotiated_rev)
{
struct device *dev = pmic_typec_pdphy->dev;
unsigned int val, hdr_len, txbuf_len, txsize_len;
unsigned long flags;
int ret;
spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
ret = regmap_read(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG,
&val);
if (ret)
goto done;
if (val) {
dev_err(dev, "pd_transmit_payload: RX message pending\n");
ret = -EBUSY;
goto done;
}
/* Clear TX control register */
ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy);
if (ret)
goto done;
hdr_len = sizeof(msg->header);
txbuf_len = pd_header_cnt_le(msg->header) * 4;
txsize_len = hdr_len + txbuf_len - 1;
/* Write message header sizeof(u16) to USB_PDPHY_TX_BUFFER_HDR_REG */
ret = regmap_bulk_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_TX_BUFFER_HDR_REG,
&msg->header, hdr_len);
if (ret)
goto done;
/* Write payload to USB_PDPHY_TX_BUFFER_DATA_REG for txbuf_len */
if (txbuf_len) {
ret = regmap_bulk_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_TX_BUFFER_DATA_REG,
&msg->payload, txbuf_len);
if (ret)
goto done;
}
/* Write total length ((header + data) - 1) to USB_PDPHY_TX_SIZE_REG */
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_TX_SIZE_REG,
txsize_len);
if (ret)
goto done;
/* Clear TX control register */
ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy);
if (ret)
goto done;
/* Initiate transmit with retry count as indicated by PD revision */
val = TX_CONTROL_FRAME_TYPE(type) | TX_CONTROL_SEND_MSG;
if (pd_header_rev(msg->header) == PD_REV30)
val |= TX_CONTROL_RETRY_COUNT(2);
else
val |= TX_CONTROL_RETRY_COUNT(3);
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, val);
done:
spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
if (ret) {
dev_err(dev, "pd_transmit_payload: hdr %*ph data %*ph ret %d\n",
hdr_len, &msg->header, txbuf_len, &msg->payload, ret);
}
return ret;
}
int qcom_pmic_typec_pdphy_pd_transmit(struct pmic_typec_pdphy *pmic_typec_pdphy,
enum tcpm_transmit_type type,
const struct pd_message *msg,
unsigned int negotiated_rev)
{
struct device *dev = pmic_typec_pdphy->dev;
int ret;
if (msg) {
ret = qcom_pmic_typec_pdphy_pd_transmit_payload(pmic_typec_pdphy,
type, msg,
negotiated_rev);
} else {
ret = qcom_pmic_typec_pdphy_pd_transmit_signal(pmic_typec_pdphy,
type,
negotiated_rev);
}
if (ret)
dev_dbg(dev, "pd_transmit: type %x result %d\n", type, ret);
return ret;
}
static void qcom_pmic_typec_pdphy_pd_receive(struct pmic_typec_pdphy *pmic_typec_pdphy)
{
struct device *dev = pmic_typec_pdphy->dev;
struct pd_message msg;
unsigned int size, rx_status;
unsigned long flags;
int ret;
spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
ret = regmap_read(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_RX_SIZE_REG, &size);
if (ret)
goto done;
/* Hardware requires +1 of the real read value to be passed */
if (size < 1 || size > sizeof(msg.payload) + 1) {
dev_dbg(dev, "pd_receive: invalid size %d\n", size);
goto done;
}
size += 1;
ret = regmap_read(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_RX_STATUS_REG,
&rx_status);
if (ret)
goto done;
ret = regmap_bulk_read(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_RX_BUFFER_REG,
(u8 *)&msg, size);
if (ret)
goto done;
/* Return ownership of RX buffer to hardware */
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG, 0);
done:
spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
if (!ret) {
dev_vdbg(dev, "pd_receive: handing %d bytes to tcpm\n", size);
tcpm_pd_receive(pmic_typec_pdphy->tcpm_port, &msg);
}
}
static irqreturn_t qcom_pmic_typec_pdphy_isr(int irq, void *dev_id)
{
struct pmic_typec_pdphy_irq_data *irq_data = dev_id;
struct pmic_typec_pdphy *pmic_typec_pdphy = irq_data->pmic_typec_pdphy;
struct device *dev = pmic_typec_pdphy->dev;
switch (irq_data->virq) {
case PMIC_PDPHY_SIG_TX_IRQ:
dev_err(dev, "isr: tx_sig\n");
break;
case PMIC_PDPHY_SIG_RX_IRQ:
schedule_work(&pmic_typec_pdphy->reset_work);
break;
case PMIC_PDPHY_MSG_TX_IRQ:
tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port,
TCPC_TX_SUCCESS);
break;
case PMIC_PDPHY_MSG_RX_IRQ:
qcom_pmic_typec_pdphy_pd_receive(pmic_typec_pdphy);
break;
case PMIC_PDPHY_MSG_TX_FAIL_IRQ:
tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port,
TCPC_TX_FAILED);
break;
case PMIC_PDPHY_MSG_TX_DISCARD_IRQ:
tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port,
TCPC_TX_DISCARDED);
break;
}
return IRQ_HANDLED;
}
int qcom_pmic_typec_pdphy_set_pd_rx(struct pmic_typec_pdphy *pmic_typec_pdphy, bool on)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG, !on);
spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
dev_dbg(pmic_typec_pdphy->dev, "set_pd_rx: %s\n", on ? "on" : "off");
return ret;
}
int qcom_pmic_typec_pdphy_set_roles(struct pmic_typec_pdphy *pmic_typec_pdphy,
bool data_role_host, bool power_role_src)
{
struct device *dev = pmic_typec_pdphy->dev;
unsigned long flags;
int ret;
spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
ret = regmap_update_bits(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG,
MSG_CONFIG_PORT_DATA_ROLE |
MSG_CONFIG_PORT_POWER_ROLE,
data_role_host << 3 | power_role_src << 2);
spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
dev_dbg(dev, "pdphy_set_roles: data_role_host=%d power_role_src=%d\n",
data_role_host, power_role_src);
return ret;
}
static int qcom_pmic_typec_pdphy_enable(struct pmic_typec_pdphy *pmic_typec_pdphy)
{
struct device *dev = pmic_typec_pdphy->dev;
int ret;
/* PD 2.0, DR=TYPEC_DEVICE, PR=TYPEC_SINK */
ret = regmap_update_bits(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG,
MSG_CONFIG_SPEC_REV_MASK, PD_REV20);
if (ret)
goto done;
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG, 0);
if (ret)
goto done;
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG,
CONTROL_ENABLE);
if (ret)
goto done;
qcom_pmic_typec_pdphy_reset_off(pmic_typec_pdphy);
done:
if (ret) {
regulator_disable(pmic_typec_pdphy->vdd_pdphy);
dev_err(dev, "pdphy_enable fail %d\n", ret);
}
return ret;
}
static int qcom_pmic_typec_pdphy_disable(struct pmic_typec_pdphy *pmic_typec_pdphy)
{
int ret;
qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG, 0);
return ret;
}
static int pmic_typec_pdphy_reset(struct pmic_typec_pdphy *pmic_typec_pdphy)
{
int ret;
ret = qcom_pmic_typec_pdphy_disable(pmic_typec_pdphy);
if (ret)
goto done;
usleep_range(400, 500);
ret = qcom_pmic_typec_pdphy_enable(pmic_typec_pdphy);
done:
return ret;
}
int qcom_pmic_typec_pdphy_start(struct pmic_typec_pdphy *pmic_typec_pdphy,
struct tcpm_port *tcpm_port)
{
int i;
int ret;
ret = regulator_enable(pmic_typec_pdphy->vdd_pdphy);
if (ret)
return ret;
pmic_typec_pdphy->tcpm_port = tcpm_port;
ret = pmic_typec_pdphy_reset(pmic_typec_pdphy);
if (ret)
return ret;
for (i = 0; i < pmic_typec_pdphy->nr_irqs; i++)
enable_irq(pmic_typec_pdphy->irq_data[i].irq);
return 0;
}
void qcom_pmic_typec_pdphy_stop(struct pmic_typec_pdphy *pmic_typec_pdphy)
{
int i;
for (i = 0; i < pmic_typec_pdphy->nr_irqs; i++)
disable_irq(pmic_typec_pdphy->irq_data[i].irq);
qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
regulator_disable(pmic_typec_pdphy->vdd_pdphy);
}
struct pmic_typec_pdphy *qcom_pmic_typec_pdphy_alloc(struct device *dev)
{
return devm_kzalloc(dev, sizeof(struct pmic_typec_pdphy), GFP_KERNEL);
}
int qcom_pmic_typec_pdphy_probe(struct platform_device *pdev,
struct pmic_typec_pdphy *pmic_typec_pdphy,
struct pmic_typec_pdphy_resources *res,
struct regmap *regmap,
u32 base)
{
struct device *dev = &pdev->dev;
struct pmic_typec_pdphy_irq_data *irq_data;
int i, ret, irq;
if (!res->nr_irqs || res->nr_irqs > PMIC_PDPHY_MAX_IRQS)
return -EINVAL;
irq_data = devm_kzalloc(dev, sizeof(*irq_data) * res->nr_irqs,
GFP_KERNEL);
if (!irq_data)
return -ENOMEM;
pmic_typec_pdphy->vdd_pdphy = devm_regulator_get(dev, "vdd-pdphy");
if (IS_ERR(pmic_typec_pdphy->vdd_pdphy))
return PTR_ERR(pmic_typec_pdphy->vdd_pdphy);
pmic_typec_pdphy->dev = dev;
pmic_typec_pdphy->base = base;
pmic_typec_pdphy->regmap = regmap;
pmic_typec_pdphy->nr_irqs = res->nr_irqs;
pmic_typec_pdphy->irq_data = irq_data;
spin_lock_init(&pmic_typec_pdphy->lock);
INIT_WORK(&pmic_typec_pdphy->reset_work, qcom_pmic_typec_pdphy_sig_reset_work);
for (i = 0; i < res->nr_irqs; i++, irq_data++) {
irq = platform_get_irq_byname(pdev, res->irq_params[i].irq_name);
if (irq < 0)
return irq;
irq_data->pmic_typec_pdphy = pmic_typec_pdphy;
irq_data->irq = irq;
irq_data->virq = res->irq_params[i].virq;
ret = devm_request_threaded_irq(dev, irq, NULL,
qcom_pmic_typec_pdphy_isr,
IRQF_ONESHOT | IRQF_NO_AUTOEN,
res->irq_params[i].irq_name,
irq_data);
if (ret)
return ret;
}
return 0;
}