blob: abe1b1f4362f14a196f76c2408303630832e1a3e [file] [log] [blame]
Tomas Winkler9fff0422019-03-12 00:10:41 +02001// SPDX-License-Identifier: GPL-2.0
Oren Weil3ce72722011-05-15 13:43:43 +03002/*
Tomas Winkler1e55b602019-03-12 00:10:44 +02003 * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
Oren Weil3ce72722011-05-15 13:43:43 +03004 * Intel Management Engine Interface (Intel MEI) Linux driver
Oren Weil3ce72722011-05-15 13:43:43 +03005 */
6
7#include <linux/pci.h>
Tomas Winkler06ecd642013-02-06 14:06:42 +02008
9#include <linux/kthread.h>
10#include <linux/interrupt.h>
Alexander Usyskin77537ad2016-06-16 17:58:52 +030011#include <linux/pm_runtime.h>
Alexander Usyskin7026a5f2018-07-31 09:35:37 +030012#include <linux/sizes.h>
Tomas Winkler47a73802012-12-25 19:06:03 +020013
14#include "mei_dev.h"
Tomas Winkler06ecd642013-02-06 14:06:42 +020015#include "hbm.h"
16
Tomas Winkler6e4cd272014-03-11 14:49:23 +020017#include "hw-me.h"
18#include "hw-me-regs.h"
Tomas Winkler06ecd642013-02-06 14:06:42 +020019
Tomas Winklera0a927d2015-02-10 10:39:33 +020020#include "mei-trace.h"
21
Tomas Winkler3a65dd42012-12-25 19:06:06 +020022/**
Tomas Winklerb68301e2013-03-27 16:58:29 +020023 * mei_me_reg_read - Reads 32bit data from the mei device
Tomas Winkler3a65dd42012-12-25 19:06:06 +020024 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +030025 * @hw: the me hardware structure
Tomas Winkler3a65dd42012-12-25 19:06:06 +020026 * @offset: offset from which to read the data
27 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +030028 * Return: register value (u32)
Tomas Winkler3a65dd42012-12-25 19:06:06 +020029 */
Tomas Winklerb68301e2013-03-27 16:58:29 +020030static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
Tomas Winkler3a65dd42012-12-25 19:06:06 +020031 unsigned long offset)
32{
Tomas Winkler52c34562013-02-06 14:06:40 +020033 return ioread32(hw->mem_addr + offset);
Tomas Winkler3a65dd42012-12-25 19:06:06 +020034}
Oren Weil3ce72722011-05-15 13:43:43 +030035
36
37/**
Tomas Winklerb68301e2013-03-27 16:58:29 +020038 * mei_me_reg_write - Writes 32bit data to the mei device
Tomas Winkler3a65dd42012-12-25 19:06:06 +020039 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +030040 * @hw: the me hardware structure
Tomas Winkler3a65dd42012-12-25 19:06:06 +020041 * @offset: offset from which to write the data
42 * @value: register value to write (u32)
43 */
Tomas Winklerb68301e2013-03-27 16:58:29 +020044static inline void mei_me_reg_write(const struct mei_me_hw *hw,
Tomas Winkler3a65dd42012-12-25 19:06:06 +020045 unsigned long offset, u32 value)
46{
Tomas Winkler52c34562013-02-06 14:06:40 +020047 iowrite32(value, hw->mem_addr + offset);
Tomas Winkler3a65dd42012-12-25 19:06:06 +020048}
49
50/**
Tomas Winklerb68301e2013-03-27 16:58:29 +020051 * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
Tomas Winklerd0252842013-01-08 23:07:24 +020052 * read window register
Tomas Winkler3a65dd42012-12-25 19:06:06 +020053 *
54 * @dev: the device structure
55 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +030056 * Return: ME_CB_RW register value (u32)
Tomas Winkler3a65dd42012-12-25 19:06:06 +020057 */
Tomas Winkler381a58c2015-02-10 10:39:32 +020058static inline u32 mei_me_mecbrw_read(const struct mei_device *dev)
Tomas Winkler3a65dd42012-12-25 19:06:06 +020059{
Tomas Winklerb68301e2013-03-27 16:58:29 +020060 return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
Tomas Winkler3a65dd42012-12-25 19:06:06 +020061}
Tomas Winkler381a58c2015-02-10 10:39:32 +020062
63/**
64 * mei_me_hcbww_write - write 32bit data to the host circular buffer
65 *
66 * @dev: the device structure
67 * @data: 32bit data to be written to the host circular buffer
68 */
69static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data)
70{
71 mei_me_reg_write(to_me_hw(dev), H_CB_WW, data);
72}
73
Tomas Winkler3a65dd42012-12-25 19:06:06 +020074/**
Tomas Winklerb68301e2013-03-27 16:58:29 +020075 * mei_me_mecsr_read - Reads 32bit data from the ME CSR
Tomas Winkler3a65dd42012-12-25 19:06:06 +020076 *
Tomas Winkler381a58c2015-02-10 10:39:32 +020077 * @dev: the device structure
Tomas Winkler3a65dd42012-12-25 19:06:06 +020078 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +030079 * Return: ME_CSR_HA register value (u32)
Tomas Winkler3a65dd42012-12-25 19:06:06 +020080 */
Tomas Winkler381a58c2015-02-10 10:39:32 +020081static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
Tomas Winkler3a65dd42012-12-25 19:06:06 +020082{
Tomas Winklera0a927d2015-02-10 10:39:33 +020083 u32 reg;
84
85 reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
86 trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
87
88 return reg;
Tomas Winkler3a65dd42012-12-25 19:06:06 +020089}
90
91/**
Tomas Winklerd0252842013-01-08 23:07:24 +020092 * mei_hcsr_read - Reads 32bit data from the host CSR
93 *
Tomas Winkler381a58c2015-02-10 10:39:32 +020094 * @dev: the device structure
Tomas Winklerd0252842013-01-08 23:07:24 +020095 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +030096 * Return: H_CSR register value (u32)
Tomas Winklerd0252842013-01-08 23:07:24 +020097 */
Tomas Winkler381a58c2015-02-10 10:39:32 +020098static inline u32 mei_hcsr_read(const struct mei_device *dev)
Tomas Winklerd0252842013-01-08 23:07:24 +020099{
Tomas Winklera0a927d2015-02-10 10:39:33 +0200100 u32 reg;
101
102 reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
103 trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
104
105 return reg;
Tomas Winkler381a58c2015-02-10 10:39:32 +0200106}
107
108/**
109 * mei_hcsr_write - writes H_CSR register to the mei device
110 *
111 * @dev: the device structure
112 * @reg: new register value
113 */
114static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
115{
Tomas Winklera0a927d2015-02-10 10:39:33 +0200116 trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
Tomas Winkler381a58c2015-02-10 10:39:32 +0200117 mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
Tomas Winklerd0252842013-01-08 23:07:24 +0200118}
119
120/**
121 * mei_hcsr_set - writes H_CSR register to the mei device,
Oren Weil3ce72722011-05-15 13:43:43 +0300122 * and ignores the H_IS bit for it is write-one-to-zero.
123 *
Tomas Winkler381a58c2015-02-10 10:39:32 +0200124 * @dev: the device structure
125 * @reg: new register value
Oren Weil3ce72722011-05-15 13:43:43 +0300126 */
Tomas Winkler381a58c2015-02-10 10:39:32 +0200127static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
Oren Weil3ce72722011-05-15 13:43:43 +0300128{
Alexander Usyskin1fa55b42015-08-02 22:20:52 +0300129 reg &= ~H_CSR_IS_MASK;
Tomas Winkler381a58c2015-02-10 10:39:32 +0200130 mei_hcsr_write(dev, reg);
Oren Weil3ce72722011-05-15 13:43:43 +0300131}
132
Tomas Winkler1bd30b62014-09-29 16:31:43 +0300133/**
Alexander Usyskin9c7daa62017-02-02 11:26:53 +0200134 * mei_hcsr_set_hig - set host interrupt (set H_IG)
135 *
136 * @dev: the device structure
137 */
138static inline void mei_hcsr_set_hig(struct mei_device *dev)
139{
140 u32 hcsr;
141
142 hcsr = mei_hcsr_read(dev) | H_IG;
143 mei_hcsr_set(dev, hcsr);
144}
145
146/**
Alexander Usyskin859ef2f2015-08-02 22:20:54 +0300147 * mei_me_d0i3c_read - Reads 32bit data from the D0I3C register
148 *
149 * @dev: the device structure
150 *
151 * Return: H_D0I3C register value (u32)
152 */
153static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
154{
155 u32 reg;
156
157 reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
Alexander Usyskincf094eb2015-09-18 00:11:52 +0300158 trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg);
Alexander Usyskin859ef2f2015-08-02 22:20:54 +0300159
160 return reg;
161}
162
163/**
164 * mei_me_d0i3c_write - writes H_D0I3C register to device
165 *
166 * @dev: the device structure
167 * @reg: new register value
168 */
169static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
170{
Alexander Usyskincf094eb2015-09-18 00:11:52 +0300171 trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg);
Alexander Usyskin859ef2f2015-08-02 22:20:54 +0300172 mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
173}
174
175/**
Tomas Winkler1bd30b62014-09-29 16:31:43 +0300176 * mei_me_fw_status - read fw status register from pci config space
177 *
178 * @dev: mei device
179 * @fw_status: fw status register values
Alexander Usyskince231392014-09-29 16:31:50 +0300180 *
181 * Return: 0 on success, error otherwise
Tomas Winkler1bd30b62014-09-29 16:31:43 +0300182 */
183static int mei_me_fw_status(struct mei_device *dev,
184 struct mei_fw_status *fw_status)
185{
Tomas Winkler1bd30b62014-09-29 16:31:43 +0300186 struct pci_dev *pdev = to_pci_dev(dev->dev);
Tomas Winkler4ad96db2014-09-29 16:31:45 +0300187 struct mei_me_hw *hw = to_me_hw(dev);
188 const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
Tomas Winkler1bd30b62014-09-29 16:31:43 +0300189 int ret;
190 int i;
191
192 if (!fw_status)
193 return -EINVAL;
194
195 fw_status->count = fw_src->count;
196 for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
Tomas Winklera96c5482016-02-07 22:46:51 +0200197 ret = pci_read_config_dword(pdev, fw_src->status[i],
198 &fw_status->status[i]);
199 trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
200 fw_src->status[i],
201 fw_status->status[i]);
Tomas Winkler1bd30b62014-09-29 16:31:43 +0300202 if (ret)
203 return ret;
204 }
205
206 return 0;
207}
Tomas Winklere7e0c232013-01-08 23:07:31 +0200208
209/**
Masanari Iida393b1482013-04-05 01:05:05 +0900210 * mei_me_hw_config - configure hw dependent settings
Tomas Winklere7e0c232013-01-08 23:07:31 +0200211 *
212 * @dev: mei device
213 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200214static void mei_me_hw_config(struct mei_device *dev)
Tomas Winklere7e0c232013-01-08 23:07:31 +0200215{
Alexander Usyskinbb9f4d22015-08-02 22:20:51 +0300216 struct pci_dev *pdev = to_pci_dev(dev->dev);
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200217 struct mei_me_hw *hw = to_me_hw(dev);
Alexander Usyskinbb9f4d22015-08-02 22:20:51 +0300218 u32 hcsr, reg;
219
Tomas Winklere7e0c232013-01-08 23:07:31 +0200220 /* Doesn't change in runtime */
Alexander Usyskinbb9f4d22015-08-02 22:20:51 +0300221 hcsr = mei_hcsr_read(dev);
Tomas Winkler8c8d9642018-07-23 13:21:23 +0300222 hw->hbuf_depth = (hcsr & H_CBD) >> 24;
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200223
Alexander Usyskinbb9f4d22015-08-02 22:20:51 +0300224 reg = 0;
225 pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
Tomas Winklera96c5482016-02-07 22:46:51 +0200226 trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
Alexander Usyskinbb9f4d22015-08-02 22:20:51 +0300227 hw->d0i3_supported =
228 ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
Alexander Usyskinb9a1fc92015-08-02 22:20:56 +0300229
230 hw->pg_state = MEI_PG_OFF;
231 if (hw->d0i3_supported) {
232 reg = mei_me_d0i3c_read(dev);
233 if (reg & H_D0I3C_I3)
234 hw->pg_state = MEI_PG_ON;
235 }
Tomas Winklere7e0c232013-01-08 23:07:31 +0200236}
Tomas Winkler964a2332014-03-18 22:51:59 +0200237
238/**
239 * mei_me_pg_state - translate internal pg state
240 * to the mei power gating state
241 *
Alexander Usyskince231392014-09-29 16:31:50 +0300242 * @dev: mei device
243 *
244 * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
Tomas Winkler964a2332014-03-18 22:51:59 +0200245 */
246static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
247{
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200248 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winkler92db1552014-09-29 16:31:37 +0300249
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200250 return hw->pg_state;
Tomas Winkler964a2332014-03-18 22:51:59 +0200251}
252
Alexander Usyskina2eb0fc2016-12-04 15:22:59 +0200253static inline u32 me_intr_src(u32 hcsr)
254{
255 return hcsr & H_CSR_IS_MASK;
256}
257
258/**
259 * me_intr_disable - disables mei device interrupts
260 * using supplied hcsr register value.
261 *
262 * @dev: the device structure
263 * @hcsr: supplied hcsr register value
264 */
265static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
266{
267 hcsr &= ~H_CSR_IE_MASK;
268 mei_hcsr_set(dev, hcsr);
269}
270
271/**
272 * mei_me_intr_clear - clear and stop interrupts
273 *
274 * @dev: the device structure
275 * @hcsr: supplied hcsr register value
276 */
277static inline void me_intr_clear(struct mei_device *dev, u32 hcsr)
278{
279 if (me_intr_src(hcsr))
280 mei_hcsr_write(dev, hcsr);
281}
282
Oren Weil3ce72722011-05-15 13:43:43 +0300283/**
Alexander Usyskince231392014-09-29 16:31:50 +0300284 * mei_me_intr_clear - clear and stop interrupts
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200285 *
286 * @dev: the device structure
287 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200288static void mei_me_intr_clear(struct mei_device *dev)
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200289{
Tomas Winkler381a58c2015-02-10 10:39:32 +0200290 u32 hcsr = mei_hcsr_read(dev);
Tomas Winkler92db1552014-09-29 16:31:37 +0300291
Alexander Usyskina2eb0fc2016-12-04 15:22:59 +0200292 me_intr_clear(dev, hcsr);
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200293}
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200294/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200295 * mei_me_intr_enable - enables mei device interrupts
Oren Weil3ce72722011-05-15 13:43:43 +0300296 *
297 * @dev: the device structure
298 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200299static void mei_me_intr_enable(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300300{
Tomas Winkler381a58c2015-02-10 10:39:32 +0200301 u32 hcsr = mei_hcsr_read(dev);
Tomas Winkler92db1552014-09-29 16:31:37 +0300302
Alexander Usyskin1fa55b42015-08-02 22:20:52 +0300303 hcsr |= H_CSR_IE_MASK;
Tomas Winkler381a58c2015-02-10 10:39:32 +0200304 mei_hcsr_set(dev, hcsr);
Oren Weil3ce72722011-05-15 13:43:43 +0300305}
306
307/**
Alexander Usyskince231392014-09-29 16:31:50 +0300308 * mei_me_intr_disable - disables mei device interrupts
Oren Weil3ce72722011-05-15 13:43:43 +0300309 *
310 * @dev: the device structure
311 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200312static void mei_me_intr_disable(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300313{
Tomas Winkler381a58c2015-02-10 10:39:32 +0200314 u32 hcsr = mei_hcsr_read(dev);
Tomas Winkler92db1552014-09-29 16:31:37 +0300315
Alexander Usyskina2eb0fc2016-12-04 15:22:59 +0200316 me_intr_disable(dev, hcsr);
Oren Weil3ce72722011-05-15 13:43:43 +0300317}
318
Tomas Winkleradfba322013-01-08 23:07:27 +0200319/**
Tomas Winkler4a8efd42016-12-04 15:22:58 +0200320 * mei_me_synchronize_irq - wait for pending IRQ handlers
321 *
322 * @dev: the device structure
323 */
324static void mei_me_synchronize_irq(struct mei_device *dev)
325{
326 struct pci_dev *pdev = to_pci_dev(dev->dev);
327
328 synchronize_irq(pdev->irq);
329}
330
331/**
Tomas Winkler68f8ea12013-03-10 13:56:07 +0200332 * mei_me_hw_reset_release - release device from the reset
333 *
334 * @dev: the device structure
335 */
336static void mei_me_hw_reset_release(struct mei_device *dev)
337{
Tomas Winkler381a58c2015-02-10 10:39:32 +0200338 u32 hcsr = mei_hcsr_read(dev);
Tomas Winkler68f8ea12013-03-10 13:56:07 +0200339
340 hcsr |= H_IG;
341 hcsr &= ~H_RST;
Tomas Winkler381a58c2015-02-10 10:39:32 +0200342 mei_hcsr_set(dev, hcsr);
Tomas Winkler68f8ea12013-03-10 13:56:07 +0200343}
Tomas Winkleradfba322013-01-08 23:07:27 +0200344
Tomas Winkler115ba282013-01-08 23:07:29 +0200345/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200346 * mei_me_host_set_ready - enable device
Tomas Winkler115ba282013-01-08 23:07:29 +0200347 *
Alexander Usyskince231392014-09-29 16:31:50 +0300348 * @dev: mei device
Tomas Winkler115ba282013-01-08 23:07:29 +0200349 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200350static void mei_me_host_set_ready(struct mei_device *dev)
Tomas Winkler115ba282013-01-08 23:07:29 +0200351{
Tomas Winkler381a58c2015-02-10 10:39:32 +0200352 u32 hcsr = mei_hcsr_read(dev);
Tomas Winkler92db1552014-09-29 16:31:37 +0300353
Alexander Usyskin1fa55b42015-08-02 22:20:52 +0300354 hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
Tomas Winkler381a58c2015-02-10 10:39:32 +0200355 mei_hcsr_set(dev, hcsr);
Tomas Winkler115ba282013-01-08 23:07:29 +0200356}
Alexander Usyskince231392014-09-29 16:31:50 +0300357
Tomas Winkler115ba282013-01-08 23:07:29 +0200358/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200359 * mei_me_host_is_ready - check whether the host has turned ready
Tomas Winkler115ba282013-01-08 23:07:29 +0200360 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300361 * @dev: mei device
362 * Return: bool
Tomas Winkler115ba282013-01-08 23:07:29 +0200363 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200364static bool mei_me_host_is_ready(struct mei_device *dev)
Tomas Winkler115ba282013-01-08 23:07:29 +0200365{
Tomas Winkler381a58c2015-02-10 10:39:32 +0200366 u32 hcsr = mei_hcsr_read(dev);
Tomas Winkler92db1552014-09-29 16:31:37 +0300367
Tomas Winkler18caeb72014-11-12 23:42:14 +0200368 return (hcsr & H_RDY) == H_RDY;
Tomas Winkler115ba282013-01-08 23:07:29 +0200369}
370
371/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200372 * mei_me_hw_is_ready - check whether the me(hw) has turned ready
Tomas Winkler115ba282013-01-08 23:07:29 +0200373 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300374 * @dev: mei device
375 * Return: bool
Tomas Winkler115ba282013-01-08 23:07:29 +0200376 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200377static bool mei_me_hw_is_ready(struct mei_device *dev)
Tomas Winkler115ba282013-01-08 23:07:29 +0200378{
Tomas Winkler381a58c2015-02-10 10:39:32 +0200379 u32 mecsr = mei_me_mecsr_read(dev);
Tomas Winkler92db1552014-09-29 16:31:37 +0300380
Tomas Winkler18caeb72014-11-12 23:42:14 +0200381 return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
Tomas Winkler115ba282013-01-08 23:07:29 +0200382}
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200383
Alexander Usyskince231392014-09-29 16:31:50 +0300384/**
Alexander Usyskin47f60a02017-02-02 11:26:54 +0200385 * mei_me_hw_is_resetting - check whether the me(hw) is in reset
386 *
387 * @dev: mei device
388 * Return: bool
389 */
390static bool mei_me_hw_is_resetting(struct mei_device *dev)
391{
392 u32 mecsr = mei_me_mecsr_read(dev);
393
394 return (mecsr & ME_RST_HRA) == ME_RST_HRA;
395}
396
397/**
Alexander Usyskince231392014-09-29 16:31:50 +0300398 * mei_me_hw_ready_wait - wait until the me(hw) has turned ready
399 * or timeout is reached
400 *
401 * @dev: mei device
402 * Return: 0 on success, error otherwise
403 */
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200404static int mei_me_hw_ready_wait(struct mei_device *dev)
405{
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200406 mutex_unlock(&dev->device_lock);
Alexander Usyskin2c2b93e2014-08-12 20:16:03 +0300407 wait_event_timeout(dev->wait_hw_ready,
Tomas Winklerdab9bf42013-07-17 15:13:17 +0300408 dev->recvd_hw_ready,
Tomas Winkler7d93e582014-01-14 23:10:10 +0200409 mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200410 mutex_lock(&dev->device_lock);
Alexander Usyskin2c2b93e2014-08-12 20:16:03 +0300411 if (!dev->recvd_hw_ready) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300412 dev_err(dev->dev, "wait hw ready failed\n");
Alexander Usyskin2c2b93e2014-08-12 20:16:03 +0300413 return -ETIME;
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200414 }
415
Alexander Usyskin663b7ee2015-01-25 23:45:28 +0200416 mei_me_hw_reset_release(dev);
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200417 dev->recvd_hw_ready = false;
418 return 0;
419}
420
Alexander Usyskince231392014-09-29 16:31:50 +0300421/**
422 * mei_me_hw_start - hw start routine
423 *
424 * @dev: mei device
425 * Return: 0 on success, error otherwise
426 */
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200427static int mei_me_hw_start(struct mei_device *dev)
428{
429 int ret = mei_me_hw_ready_wait(dev);
Tomas Winkler92db1552014-09-29 16:31:37 +0300430
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200431 if (ret)
432 return ret;
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300433 dev_dbg(dev->dev, "hw is ready\n");
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200434
435 mei_me_host_set_ready(dev);
436 return ret;
437}
438
439
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200440/**
Tomas Winkler726917f2012-06-25 23:46:28 +0300441 * mei_hbuf_filled_slots - gets number of device filled buffer slots
Oren Weil3ce72722011-05-15 13:43:43 +0300442 *
Sedat Dilek7353f852013-01-17 19:54:15 +0100443 * @dev: the device structure
Oren Weil3ce72722011-05-15 13:43:43 +0300444 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300445 * Return: number of filled slots
Oren Weil3ce72722011-05-15 13:43:43 +0300446 */
Tomas Winkler726917f2012-06-25 23:46:28 +0300447static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300448{
Tomas Winkler18caeb72014-11-12 23:42:14 +0200449 u32 hcsr;
Oren Weil3ce72722011-05-15 13:43:43 +0300450 char read_ptr, write_ptr;
451
Tomas Winkler381a58c2015-02-10 10:39:32 +0200452 hcsr = mei_hcsr_read(dev);
Tomas Winkler726917f2012-06-25 23:46:28 +0300453
Tomas Winkler18caeb72014-11-12 23:42:14 +0200454 read_ptr = (char) ((hcsr & H_CBRP) >> 8);
455 write_ptr = (char) ((hcsr & H_CBWP) >> 16);
Oren Weil3ce72722011-05-15 13:43:43 +0300456
457 return (unsigned char) (write_ptr - read_ptr);
458}
459
460/**
Masanari Iida393b1482013-04-05 01:05:05 +0900461 * mei_me_hbuf_is_empty - checks if host buffer is empty.
Oren Weil3ce72722011-05-15 13:43:43 +0300462 *
463 * @dev: the device structure
464 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300465 * Return: true if empty, false - otherwise.
Oren Weil3ce72722011-05-15 13:43:43 +0300466 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200467static bool mei_me_hbuf_is_empty(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300468{
Tomas Winkler726917f2012-06-25 23:46:28 +0300469 return mei_hbuf_filled_slots(dev) == 0;
Oren Weil3ce72722011-05-15 13:43:43 +0300470}
471
472/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200473 * mei_me_hbuf_empty_slots - counts write empty slots.
Oren Weil3ce72722011-05-15 13:43:43 +0300474 *
475 * @dev: the device structure
476 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300477 * Return: -EOVERFLOW if overflow, otherwise empty slots count
Oren Weil3ce72722011-05-15 13:43:43 +0300478 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200479static int mei_me_hbuf_empty_slots(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300480{
Tomas Winkler8c8d9642018-07-23 13:21:23 +0300481 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winkler24aadc82012-06-25 23:46:27 +0300482 unsigned char filled_slots, empty_slots;
Oren Weil3ce72722011-05-15 13:43:43 +0300483
Tomas Winkler726917f2012-06-25 23:46:28 +0300484 filled_slots = mei_hbuf_filled_slots(dev);
Tomas Winkler8c8d9642018-07-23 13:21:23 +0300485 empty_slots = hw->hbuf_depth - filled_slots;
Oren Weil3ce72722011-05-15 13:43:43 +0300486
487 /* check for overflow */
Tomas Winkler8c8d9642018-07-23 13:21:23 +0300488 if (filled_slots > hw->hbuf_depth)
Oren Weil3ce72722011-05-15 13:43:43 +0300489 return -EOVERFLOW;
490
491 return empty_slots;
492}
493
Alexander Usyskince231392014-09-29 16:31:50 +0300494/**
Tomas Winkler8c8d9642018-07-23 13:21:23 +0300495 * mei_me_hbuf_depth - returns depth of the hw buffer.
Alexander Usyskince231392014-09-29 16:31:50 +0300496 *
497 * @dev: the device structure
498 *
Tomas Winkler8c8d9642018-07-23 13:21:23 +0300499 * Return: size of hw buffer in slots
Alexander Usyskince231392014-09-29 16:31:50 +0300500 */
Tomas Winkler8c8d9642018-07-23 13:21:23 +0300501static u32 mei_me_hbuf_depth(const struct mei_device *dev)
Tomas Winkler827eef52013-02-06 14:06:41 +0200502{
Tomas Winkler8c8d9642018-07-23 13:21:23 +0300503 struct mei_me_hw *hw = to_me_hw(dev);
504
505 return hw->hbuf_depth;
Tomas Winkler827eef52013-02-06 14:06:41 +0200506}
507
Oren Weil3ce72722011-05-15 13:43:43 +0300508/**
Tomas Winkler4b9960d2016-11-11 03:00:08 +0200509 * mei_me_hbuf_write - writes a message to host hw buffer.
Oren Weil3ce72722011-05-15 13:43:43 +0300510 *
511 * @dev: the device structure
Tomas Winkler98e70862018-07-31 09:35:33 +0300512 * @hdr: header of message
513 * @hdr_len: header length in bytes: must be multiplication of a slot (4bytes)
514 * @data: payload
515 * @data_len: payload length in bytes
Oren Weil3ce72722011-05-15 13:43:43 +0300516 *
Tomas Winkler98e70862018-07-31 09:35:33 +0300517 * Return: 0 if success, < 0 - otherwise.
Oren Weil3ce72722011-05-15 13:43:43 +0300518 */
Tomas Winkler4b9960d2016-11-11 03:00:08 +0200519static int mei_me_hbuf_write(struct mei_device *dev,
Tomas Winkler98e70862018-07-31 09:35:33 +0300520 const void *hdr, size_t hdr_len,
521 const void *data, size_t data_len)
Oren Weil3ce72722011-05-15 13:43:43 +0300522{
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200523 unsigned long rem;
Tomas Winkler44c98df2018-07-12 17:10:09 +0300524 unsigned long i;
Tomas Winkler98e70862018-07-31 09:35:33 +0300525 const u32 *reg_buf;
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200526 u32 dw_cnt;
Tomas Winkler169d1332012-06-19 09:13:35 +0300527 int empty_slots;
Oren Weil3ce72722011-05-15 13:43:43 +0300528
Tomas Winkler98e70862018-07-31 09:35:33 +0300529 if (WARN_ON(!hdr || !data || hdr_len & 0x3))
530 return -EINVAL;
531
532 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
Oren Weil3ce72722011-05-15 13:43:43 +0300533
Tomas Winkler726917f2012-06-25 23:46:28 +0300534 empty_slots = mei_hbuf_empty_slots(dev);
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300535 dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots);
Oren Weil3ce72722011-05-15 13:43:43 +0300536
Tomas Winklerde877432018-07-12 17:10:08 +0300537 if (empty_slots < 0)
538 return -EOVERFLOW;
539
Tomas Winkler98e70862018-07-31 09:35:33 +0300540 dw_cnt = mei_data2slots(hdr_len + data_len);
Tomas Winklerde877432018-07-12 17:10:08 +0300541 if (dw_cnt > (u32)empty_slots)
Tomas Winkler9d098192014-02-19 17:35:48 +0200542 return -EMSGSIZE;
Oren Weil3ce72722011-05-15 13:43:43 +0300543
Tomas Winkler98e70862018-07-31 09:35:33 +0300544 reg_buf = hdr;
545 for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++)
Tomas Winkler381a58c2015-02-10 10:39:32 +0200546 mei_me_hcbww_write(dev, reg_buf[i]);
Tomas Winkler169d1332012-06-19 09:13:35 +0300547
Tomas Winkler98e70862018-07-31 09:35:33 +0300548 reg_buf = data;
549 for (i = 0; i < data_len / MEI_SLOT_SIZE; i++)
550 mei_me_hcbww_write(dev, reg_buf[i]);
551
552 rem = data_len & 0x3;
Tomas Winkler169d1332012-06-19 09:13:35 +0300553 if (rem > 0) {
554 u32 reg = 0;
Tomas Winkler92db1552014-09-29 16:31:37 +0300555
Tomas Winkler98e70862018-07-31 09:35:33 +0300556 memcpy(&reg, (const u8 *)data + data_len - rem, rem);
Tomas Winkler381a58c2015-02-10 10:39:32 +0200557 mei_me_hcbww_write(dev, reg);
Oren Weil3ce72722011-05-15 13:43:43 +0300558 }
559
Alexander Usyskin9c7daa62017-02-02 11:26:53 +0200560 mei_hcsr_set_hig(dev);
Tomas Winkler827eef52013-02-06 14:06:41 +0200561 if (!mei_me_hw_is_ready(dev))
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200562 return -EIO;
Oren Weil3ce72722011-05-15 13:43:43 +0300563
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200564 return 0;
Oren Weil3ce72722011-05-15 13:43:43 +0300565}
566
567/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200568 * mei_me_count_full_read_slots - counts read full slots.
Oren Weil3ce72722011-05-15 13:43:43 +0300569 *
570 * @dev: the device structure
571 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300572 * Return: -EOVERFLOW if overflow, otherwise filled slots count
Oren Weil3ce72722011-05-15 13:43:43 +0300573 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200574static int mei_me_count_full_read_slots(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300575{
Tomas Winkler18caeb72014-11-12 23:42:14 +0200576 u32 me_csr;
Oren Weil3ce72722011-05-15 13:43:43 +0300577 char read_ptr, write_ptr;
578 unsigned char buffer_depth, filled_slots;
579
Tomas Winkler381a58c2015-02-10 10:39:32 +0200580 me_csr = mei_me_mecsr_read(dev);
Tomas Winkler18caeb72014-11-12 23:42:14 +0200581 buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
582 read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
583 write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
Oren Weil3ce72722011-05-15 13:43:43 +0300584 filled_slots = (unsigned char) (write_ptr - read_ptr);
585
586 /* check for overflow */
587 if (filled_slots > buffer_depth)
588 return -EOVERFLOW;
589
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300590 dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
Oren Weil3ce72722011-05-15 13:43:43 +0300591 return (int)filled_slots;
592}
593
594/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200595 * mei_me_read_slots - reads a message from mei device.
Oren Weil3ce72722011-05-15 13:43:43 +0300596 *
597 * @dev: the device structure
598 * @buffer: message buffer will be written
599 * @buffer_length: message size will be read
Alexander Usyskince231392014-09-29 16:31:50 +0300600 *
601 * Return: always 0
Oren Weil3ce72722011-05-15 13:43:43 +0300602 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200603static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
Tomas Winkler9fc5f0f2018-07-23 13:21:22 +0300604 unsigned long buffer_length)
Oren Weil3ce72722011-05-15 13:43:43 +0300605{
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200606 u32 *reg_buf = (u32 *)buffer;
Oren Weil3ce72722011-05-15 13:43:43 +0300607
Tomas Winkler9fc5f0f2018-07-23 13:21:22 +0300608 for (; buffer_length >= MEI_SLOT_SIZE; buffer_length -= MEI_SLOT_SIZE)
Tomas Winkler827eef52013-02-06 14:06:41 +0200609 *reg_buf++ = mei_me_mecbrw_read(dev);
Oren Weil3ce72722011-05-15 13:43:43 +0300610
611 if (buffer_length > 0) {
Tomas Winkler827eef52013-02-06 14:06:41 +0200612 u32 reg = mei_me_mecbrw_read(dev);
Tomas Winkler92db1552014-09-29 16:31:37 +0300613
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200614 memcpy(reg_buf, &reg, buffer_length);
Oren Weil3ce72722011-05-15 13:43:43 +0300615 }
616
Alexander Usyskin9c7daa62017-02-02 11:26:53 +0200617 mei_hcsr_set_hig(dev);
Tomas Winkler827eef52013-02-06 14:06:41 +0200618 return 0;
Oren Weil3ce72722011-05-15 13:43:43 +0300619}
620
Tomas Winkler06ecd642013-02-06 14:06:42 +0200621/**
Alexander Usyskin2d1995f2015-02-10 10:39:34 +0200622 * mei_me_pg_set - write pg enter register
Tomas Winklerb16c3572014-03-18 22:51:57 +0200623 *
624 * @dev: the device structure
625 */
Alexander Usyskin2d1995f2015-02-10 10:39:34 +0200626static void mei_me_pg_set(struct mei_device *dev)
Tomas Winklerb16c3572014-03-18 22:51:57 +0200627{
628 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winklera0a927d2015-02-10 10:39:33 +0200629 u32 reg;
630
631 reg = mei_me_reg_read(hw, H_HPG_CSR);
632 trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
Tomas Winkler92db1552014-09-29 16:31:37 +0300633
Tomas Winklerb16c3572014-03-18 22:51:57 +0200634 reg |= H_HPG_CSR_PGI;
Tomas Winklera0a927d2015-02-10 10:39:33 +0200635
636 trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
Tomas Winklerb16c3572014-03-18 22:51:57 +0200637 mei_me_reg_write(hw, H_HPG_CSR, reg);
638}
639
640/**
Alexander Usyskin2d1995f2015-02-10 10:39:34 +0200641 * mei_me_pg_unset - write pg exit register
Tomas Winklerb16c3572014-03-18 22:51:57 +0200642 *
643 * @dev: the device structure
644 */
Alexander Usyskin2d1995f2015-02-10 10:39:34 +0200645static void mei_me_pg_unset(struct mei_device *dev)
Tomas Winklerb16c3572014-03-18 22:51:57 +0200646{
647 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winklera0a927d2015-02-10 10:39:33 +0200648 u32 reg;
649
650 reg = mei_me_reg_read(hw, H_HPG_CSR);
651 trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
Tomas Winklerb16c3572014-03-18 22:51:57 +0200652
653 WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
654
655 reg |= H_HPG_CSR_PGIHEXR;
Tomas Winklera0a927d2015-02-10 10:39:33 +0200656
657 trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
Tomas Winklerb16c3572014-03-18 22:51:57 +0200658 mei_me_reg_write(hw, H_HPG_CSR, reg);
659}
660
661/**
Alexander Usyskin859ef2f2015-08-02 22:20:54 +0300662 * mei_me_pg_legacy_enter_sync - perform legacy pg entry procedure
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200663 *
664 * @dev: the device structure
665 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300666 * Return: 0 on success an error code otherwise
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200667 */
Alexander Usyskin859ef2f2015-08-02 22:20:54 +0300668static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200669{
670 struct mei_me_hw *hw = to_me_hw(dev);
671 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
672 int ret;
673
674 dev->pg_event = MEI_PG_EVENT_WAIT;
675
676 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
677 if (ret)
678 return ret;
679
680 mutex_unlock(&dev->device_lock);
681 wait_event_timeout(dev->wait_pg,
682 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
683 mutex_lock(&dev->device_lock);
684
685 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
Alexander Usyskin2d1995f2015-02-10 10:39:34 +0200686 mei_me_pg_set(dev);
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200687 ret = 0;
688 } else {
689 ret = -ETIME;
690 }
691
692 dev->pg_event = MEI_PG_EVENT_IDLE;
693 hw->pg_state = MEI_PG_ON;
694
695 return ret;
696}
697
698/**
Alexander Usyskin859ef2f2015-08-02 22:20:54 +0300699 * mei_me_pg_legacy_exit_sync - perform legacy pg exit procedure
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200700 *
701 * @dev: the device structure
702 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300703 * Return: 0 on success an error code otherwise
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200704 */
Alexander Usyskin859ef2f2015-08-02 22:20:54 +0300705static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200706{
707 struct mei_me_hw *hw = to_me_hw(dev);
708 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
709 int ret;
710
711 if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
712 goto reply;
713
714 dev->pg_event = MEI_PG_EVENT_WAIT;
715
Alexander Usyskin2d1995f2015-02-10 10:39:34 +0200716 mei_me_pg_unset(dev);
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200717
718 mutex_unlock(&dev->device_lock);
719 wait_event_timeout(dev->wait_pg,
720 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
721 mutex_lock(&dev->device_lock);
722
723reply:
Alexander Usyskin3dc196e2015-06-13 08:51:17 +0300724 if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
725 ret = -ETIME;
726 goto out;
727 }
728
729 dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
730 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
731 if (ret)
732 return ret;
733
734 mutex_unlock(&dev->device_lock);
735 wait_event_timeout(dev->wait_pg,
736 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
737 mutex_lock(&dev->device_lock);
738
739 if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
740 ret = 0;
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200741 else
742 ret = -ETIME;
743
Alexander Usyskin3dc196e2015-06-13 08:51:17 +0300744out:
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200745 dev->pg_event = MEI_PG_EVENT_IDLE;
746 hw->pg_state = MEI_PG_OFF;
747
748 return ret;
749}
750
751/**
Alexander Usyskin3dc196e2015-06-13 08:51:17 +0300752 * mei_me_pg_in_transition - is device now in pg transition
753 *
754 * @dev: the device structure
755 *
756 * Return: true if in pg transition, false otherwise
757 */
758static bool mei_me_pg_in_transition(struct mei_device *dev)
759{
760 return dev->pg_event >= MEI_PG_EVENT_WAIT &&
761 dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
762}
763
764/**
Tomas Winkleree7e5af2014-03-18 22:51:58 +0200765 * mei_me_pg_is_enabled - detect if PG is supported by HW
766 *
767 * @dev: the device structure
768 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300769 * Return: true is pg supported, false otherwise
Tomas Winkleree7e5af2014-03-18 22:51:58 +0200770 */
771static bool mei_me_pg_is_enabled(struct mei_device *dev)
772{
Alexander Usyskin859ef2f2015-08-02 22:20:54 +0300773 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winkler381a58c2015-02-10 10:39:32 +0200774 u32 reg = mei_me_mecsr_read(dev);
Tomas Winkleree7e5af2014-03-18 22:51:58 +0200775
Alexander Usyskin859ef2f2015-08-02 22:20:54 +0300776 if (hw->d0i3_supported)
777 return true;
778
Tomas Winkleree7e5af2014-03-18 22:51:58 +0200779 if ((reg & ME_PGIC_HRA) == 0)
780 goto notsupported;
781
Tomas Winklerbae1cc72014-08-21 14:29:21 +0300782 if (!dev->hbm_f_pg_supported)
Tomas Winkleree7e5af2014-03-18 22:51:58 +0200783 goto notsupported;
784
785 return true;
786
787notsupported:
Alexander Usyskin859ef2f2015-08-02 22:20:54 +0300788 dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
789 hw->d0i3_supported,
Tomas Winkleree7e5af2014-03-18 22:51:58 +0200790 !!(reg & ME_PGIC_HRA),
791 dev->version.major_version,
792 dev->version.minor_version,
793 HBM_MAJOR_VERSION_PGI,
794 HBM_MINOR_VERSION_PGI);
795
796 return false;
797}
798
799/**
Alexander Usyskin859ef2f2015-08-02 22:20:54 +0300800 * mei_me_d0i3_set - write d0i3 register bit on mei device.
801 *
802 * @dev: the device structure
803 * @intr: ask for interrupt
804 *
805 * Return: D0I3C register value
806 */
807static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
808{
809 u32 reg = mei_me_d0i3c_read(dev);
810
811 reg |= H_D0I3C_I3;
812 if (intr)
813 reg |= H_D0I3C_IR;
814 else
815 reg &= ~H_D0I3C_IR;
816 mei_me_d0i3c_write(dev, reg);
817 /* read it to ensure HW consistency */
818 reg = mei_me_d0i3c_read(dev);
819 return reg;
820}
821
822/**
823 * mei_me_d0i3_unset - clean d0i3 register bit on mei device.
824 *
825 * @dev: the device structure
826 *
827 * Return: D0I3C register value
828 */
829static u32 mei_me_d0i3_unset(struct mei_device *dev)
830{
831 u32 reg = mei_me_d0i3c_read(dev);
832
833 reg &= ~H_D0I3C_I3;
834 reg |= H_D0I3C_IR;
835 mei_me_d0i3c_write(dev, reg);
836 /* read it to ensure HW consistency */
837 reg = mei_me_d0i3c_read(dev);
838 return reg;
839}
840
841/**
842 * mei_me_d0i3_enter_sync - perform d0i3 entry procedure
843 *
844 * @dev: the device structure
845 *
846 * Return: 0 on success an error code otherwise
847 */
848static int mei_me_d0i3_enter_sync(struct mei_device *dev)
849{
850 struct mei_me_hw *hw = to_me_hw(dev);
851 unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
852 unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
853 int ret;
854 u32 reg;
855
856 reg = mei_me_d0i3c_read(dev);
857 if (reg & H_D0I3C_I3) {
858 /* we are in d0i3, nothing to do */
859 dev_dbg(dev->dev, "d0i3 set not needed\n");
860 ret = 0;
861 goto on;
862 }
863
864 /* PGI entry procedure */
865 dev->pg_event = MEI_PG_EVENT_WAIT;
866
867 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
868 if (ret)
869 /* FIXME: should we reset here? */
870 goto out;
871
872 mutex_unlock(&dev->device_lock);
873 wait_event_timeout(dev->wait_pg,
874 dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
875 mutex_lock(&dev->device_lock);
876
877 if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
878 ret = -ETIME;
879 goto out;
880 }
881 /* end PGI entry procedure */
882
883 dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
884
885 reg = mei_me_d0i3_set(dev, true);
886 if (!(reg & H_D0I3C_CIP)) {
887 dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
888 ret = 0;
889 goto on;
890 }
891
892 mutex_unlock(&dev->device_lock);
893 wait_event_timeout(dev->wait_pg,
894 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
895 mutex_lock(&dev->device_lock);
896
897 if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
898 reg = mei_me_d0i3c_read(dev);
899 if (!(reg & H_D0I3C_I3)) {
900 ret = -ETIME;
901 goto out;
902 }
903 }
904
905 ret = 0;
906on:
907 hw->pg_state = MEI_PG_ON;
908out:
909 dev->pg_event = MEI_PG_EVENT_IDLE;
910 dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
911 return ret;
912}
913
914/**
915 * mei_me_d0i3_enter - perform d0i3 entry procedure
916 * no hbm PG handshake
917 * no waiting for confirmation; runs with interrupts
918 * disabled
919 *
920 * @dev: the device structure
921 *
922 * Return: 0 on success an error code otherwise
923 */
924static int mei_me_d0i3_enter(struct mei_device *dev)
925{
926 struct mei_me_hw *hw = to_me_hw(dev);
927 u32 reg;
928
929 reg = mei_me_d0i3c_read(dev);
930 if (reg & H_D0I3C_I3) {
931 /* we are in d0i3, nothing to do */
932 dev_dbg(dev->dev, "already d0i3 : set not needed\n");
933 goto on;
934 }
935
936 mei_me_d0i3_set(dev, false);
937on:
938 hw->pg_state = MEI_PG_ON;
939 dev->pg_event = MEI_PG_EVENT_IDLE;
940 dev_dbg(dev->dev, "d0i3 enter\n");
941 return 0;
942}
943
944/**
945 * mei_me_d0i3_exit_sync - perform d0i3 exit procedure
946 *
947 * @dev: the device structure
948 *
949 * Return: 0 on success an error code otherwise
950 */
951static int mei_me_d0i3_exit_sync(struct mei_device *dev)
952{
953 struct mei_me_hw *hw = to_me_hw(dev);
954 unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
955 int ret;
956 u32 reg;
957
958 dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
959
960 reg = mei_me_d0i3c_read(dev);
961 if (!(reg & H_D0I3C_I3)) {
962 /* we are not in d0i3, nothing to do */
963 dev_dbg(dev->dev, "d0i3 exit not needed\n");
964 ret = 0;
965 goto off;
966 }
967
968 reg = mei_me_d0i3_unset(dev);
969 if (!(reg & H_D0I3C_CIP)) {
970 dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
971 ret = 0;
972 goto off;
973 }
974
975 mutex_unlock(&dev->device_lock);
976 wait_event_timeout(dev->wait_pg,
977 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
978 mutex_lock(&dev->device_lock);
979
980 if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
981 reg = mei_me_d0i3c_read(dev);
982 if (reg & H_D0I3C_I3) {
983 ret = -ETIME;
984 goto out;
985 }
986 }
987
988 ret = 0;
989off:
990 hw->pg_state = MEI_PG_OFF;
991out:
992 dev->pg_event = MEI_PG_EVENT_IDLE;
993
994 dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
995 return ret;
996}
997
998/**
999 * mei_me_pg_legacy_intr - perform legacy pg processing
1000 * in interrupt thread handler
1001 *
1002 * @dev: the device structure
1003 */
1004static void mei_me_pg_legacy_intr(struct mei_device *dev)
1005{
1006 struct mei_me_hw *hw = to_me_hw(dev);
1007
1008 if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
1009 return;
1010
1011 dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
1012 hw->pg_state = MEI_PG_OFF;
1013 if (waitqueue_active(&dev->wait_pg))
1014 wake_up(&dev->wait_pg);
1015}
1016
1017/**
1018 * mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler
1019 *
1020 * @dev: the device structure
Alexander Usyskina2eb0fc2016-12-04 15:22:59 +02001021 * @intr_source: interrupt source
Alexander Usyskin859ef2f2015-08-02 22:20:54 +03001022 */
Alexander Usyskina2eb0fc2016-12-04 15:22:59 +02001023static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
Alexander Usyskin859ef2f2015-08-02 22:20:54 +03001024{
1025 struct mei_me_hw *hw = to_me_hw(dev);
1026
1027 if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
Alexander Usyskina2eb0fc2016-12-04 15:22:59 +02001028 (intr_source & H_D0I3C_IS)) {
Alexander Usyskin859ef2f2015-08-02 22:20:54 +03001029 dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
1030 if (hw->pg_state == MEI_PG_ON) {
1031 hw->pg_state = MEI_PG_OFF;
1032 if (dev->hbm_state != MEI_HBM_IDLE) {
1033 /*
1034 * force H_RDY because it could be
1035 * wiped off during PG
1036 */
1037 dev_dbg(dev->dev, "d0i3 set host ready\n");
1038 mei_me_host_set_ready(dev);
1039 }
1040 } else {
1041 hw->pg_state = MEI_PG_ON;
1042 }
1043
1044 wake_up(&dev->wait_pg);
1045 }
1046
Alexander Usyskina2eb0fc2016-12-04 15:22:59 +02001047 if (hw->pg_state == MEI_PG_ON && (intr_source & H_IS)) {
Alexander Usyskin859ef2f2015-08-02 22:20:54 +03001048 /*
1049 * HW sent some data and we are in D0i3, so
1050 * we got here because of HW initiated exit from D0i3.
1051 * Start runtime pm resume sequence to exit low power state.
1052 */
1053 dev_dbg(dev->dev, "d0i3 want resume\n");
1054 mei_hbm_pg_resume(dev);
1055 }
1056}
1057
1058/**
Alexander Usyskin3dc196e2015-06-13 08:51:17 +03001059 * mei_me_pg_intr - perform pg processing in interrupt thread handler
1060 *
1061 * @dev: the device structure
Alexander Usyskina2eb0fc2016-12-04 15:22:59 +02001062 * @intr_source: interrupt source
Alexander Usyskin3dc196e2015-06-13 08:51:17 +03001063 */
Alexander Usyskina2eb0fc2016-12-04 15:22:59 +02001064static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source)
Alexander Usyskin3dc196e2015-06-13 08:51:17 +03001065{
1066 struct mei_me_hw *hw = to_me_hw(dev);
1067
Alexander Usyskin859ef2f2015-08-02 22:20:54 +03001068 if (hw->d0i3_supported)
Alexander Usyskina2eb0fc2016-12-04 15:22:59 +02001069 mei_me_d0i3_intr(dev, intr_source);
Alexander Usyskin859ef2f2015-08-02 22:20:54 +03001070 else
1071 mei_me_pg_legacy_intr(dev);
1072}
Alexander Usyskin3dc196e2015-06-13 08:51:17 +03001073
Alexander Usyskin859ef2f2015-08-02 22:20:54 +03001074/**
1075 * mei_me_pg_enter_sync - perform runtime pm entry procedure
1076 *
1077 * @dev: the device structure
1078 *
1079 * Return: 0 on success an error code otherwise
1080 */
1081int mei_me_pg_enter_sync(struct mei_device *dev)
1082{
1083 struct mei_me_hw *hw = to_me_hw(dev);
1084
1085 if (hw->d0i3_supported)
1086 return mei_me_d0i3_enter_sync(dev);
1087 else
1088 return mei_me_pg_legacy_enter_sync(dev);
1089}
1090
1091/**
1092 * mei_me_pg_exit_sync - perform runtime pm exit procedure
1093 *
1094 * @dev: the device structure
1095 *
1096 * Return: 0 on success an error code otherwise
1097 */
1098int mei_me_pg_exit_sync(struct mei_device *dev)
1099{
1100 struct mei_me_hw *hw = to_me_hw(dev);
1101
1102 if (hw->d0i3_supported)
1103 return mei_me_d0i3_exit_sync(dev);
1104 else
1105 return mei_me_pg_legacy_exit_sync(dev);
Alexander Usyskin3dc196e2015-06-13 08:51:17 +03001106}
1107
1108/**
Alexander Usyskinebad6b92015-08-02 22:20:55 +03001109 * mei_me_hw_reset - resets fw via mei csr register.
1110 *
1111 * @dev: the device structure
1112 * @intr_enable: if interrupt should be enabled after reset.
1113 *
Alexander Usyskinb9a1fc92015-08-02 22:20:56 +03001114 * Return: 0 on success an error code otherwise
Alexander Usyskinebad6b92015-08-02 22:20:55 +03001115 */
1116static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
1117{
Alexander Usyskinb9a1fc92015-08-02 22:20:56 +03001118 struct mei_me_hw *hw = to_me_hw(dev);
1119 int ret;
1120 u32 hcsr;
Alexander Usyskinebad6b92015-08-02 22:20:55 +03001121
Alexander Usyskinb9a1fc92015-08-02 22:20:56 +03001122 if (intr_enable) {
1123 mei_me_intr_enable(dev);
1124 if (hw->d0i3_supported) {
1125 ret = mei_me_d0i3_exit_sync(dev);
1126 if (ret)
1127 return ret;
1128 }
1129 }
1130
Alexander Usyskin77537ad2016-06-16 17:58:52 +03001131 pm_runtime_set_active(dev->dev);
1132
Alexander Usyskinb9a1fc92015-08-02 22:20:56 +03001133 hcsr = mei_hcsr_read(dev);
Alexander Usyskinebad6b92015-08-02 22:20:55 +03001134 /* H_RST may be found lit before reset is started,
1135 * for example if preceding reset flow hasn't completed.
1136 * In that case asserting H_RST will be ignored, therefore
1137 * we need to clean H_RST bit to start a successful reset sequence.
1138 */
1139 if ((hcsr & H_RST) == H_RST) {
1140 dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
1141 hcsr &= ~H_RST;
1142 mei_hcsr_set(dev, hcsr);
1143 hcsr = mei_hcsr_read(dev);
1144 }
1145
1146 hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
1147
Alexander Usyskinb9a1fc92015-08-02 22:20:56 +03001148 if (!intr_enable)
Alexander Usyskinebad6b92015-08-02 22:20:55 +03001149 hcsr &= ~H_CSR_IE_MASK;
1150
1151 dev->recvd_hw_ready = false;
1152 mei_hcsr_write(dev, hcsr);
1153
1154 /*
1155 * Host reads the H_CSR once to ensure that the
1156 * posted write to H_CSR completes.
1157 */
1158 hcsr = mei_hcsr_read(dev);
1159
1160 if ((hcsr & H_RST) == 0)
1161 dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
1162
1163 if ((hcsr & H_RDY) == H_RDY)
1164 dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
1165
Alexander Usyskinb9a1fc92015-08-02 22:20:56 +03001166 if (!intr_enable) {
Alexander Usyskinebad6b92015-08-02 22:20:55 +03001167 mei_me_hw_reset_release(dev);
Alexander Usyskinb9a1fc92015-08-02 22:20:56 +03001168 if (hw->d0i3_supported) {
1169 ret = mei_me_d0i3_enter(dev);
1170 if (ret)
1171 return ret;
1172 }
1173 }
Alexander Usyskinebad6b92015-08-02 22:20:55 +03001174 return 0;
1175}
1176
1177/**
Tomas Winkler06ecd642013-02-06 14:06:42 +02001178 * mei_me_irq_quick_handler - The ISR of the MEI device
1179 *
1180 * @irq: The irq number
1181 * @dev_id: pointer to the device structure
1182 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001183 * Return: irqreturn_t
Tomas Winkler06ecd642013-02-06 14:06:42 +02001184 */
Tomas Winkler06ecd642013-02-06 14:06:42 +02001185irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
1186{
Alexander Usyskin1fa55b42015-08-02 22:20:52 +03001187 struct mei_device *dev = (struct mei_device *)dev_id;
Alexander Usyskin1fa55b42015-08-02 22:20:52 +03001188 u32 hcsr;
Tomas Winkler06ecd642013-02-06 14:06:42 +02001189
Alexander Usyskin1fa55b42015-08-02 22:20:52 +03001190 hcsr = mei_hcsr_read(dev);
Alexander Usyskina2eb0fc2016-12-04 15:22:59 +02001191 if (!me_intr_src(hcsr))
Tomas Winkler06ecd642013-02-06 14:06:42 +02001192 return IRQ_NONE;
1193
Alexander Usyskina2eb0fc2016-12-04 15:22:59 +02001194 dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
Alexander Usyskin1fa55b42015-08-02 22:20:52 +03001195
Alexander Usyskina2eb0fc2016-12-04 15:22:59 +02001196 /* disable interrupts on device */
1197 me_intr_disable(dev, hcsr);
Tomas Winkler06ecd642013-02-06 14:06:42 +02001198 return IRQ_WAKE_THREAD;
1199}
1200
1201/**
1202 * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
1203 * processing.
1204 *
1205 * @irq: The irq number
1206 * @dev_id: pointer to the device structure
1207 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001208 * Return: irqreturn_t
Tomas Winkler06ecd642013-02-06 14:06:42 +02001209 *
1210 */
1211irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
1212{
1213 struct mei_device *dev = (struct mei_device *) dev_id;
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001214 struct list_head cmpl_list;
Tomas Winkler06ecd642013-02-06 14:06:42 +02001215 s32 slots;
Alexander Usyskina2eb0fc2016-12-04 15:22:59 +02001216 u32 hcsr;
Tomas Winkler544f9462014-01-08 20:19:21 +02001217 int rets = 0;
Tomas Winkler06ecd642013-02-06 14:06:42 +02001218
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001219 dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
Tomas Winkler06ecd642013-02-06 14:06:42 +02001220 /* initialize our complete list */
1221 mutex_lock(&dev->device_lock);
Alexander Usyskina2eb0fc2016-12-04 15:22:59 +02001222
1223 hcsr = mei_hcsr_read(dev);
1224 me_intr_clear(dev, hcsr);
1225
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001226 INIT_LIST_HEAD(&cmpl_list);
Tomas Winkler06ecd642013-02-06 14:06:42 +02001227
Tomas Winkler06ecd642013-02-06 14:06:42 +02001228 /* check if ME wants a reset */
Tomas Winkler33ec0822014-01-12 00:36:09 +02001229 if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001230 dev_warn(dev->dev, "FW not ready: resetting.\n");
Tomas Winkler544f9462014-01-08 20:19:21 +02001231 schedule_work(&dev->reset_work);
1232 goto end;
Tomas Winkler06ecd642013-02-06 14:06:42 +02001233 }
1234
Alexander Usyskin47f60a02017-02-02 11:26:54 +02001235 if (mei_me_hw_is_resetting(dev))
1236 mei_hcsr_set_hig(dev);
1237
Alexander Usyskina2eb0fc2016-12-04 15:22:59 +02001238 mei_me_pg_intr(dev, me_intr_src(hcsr));
Alexander Usyskin3dc196e2015-06-13 08:51:17 +03001239
Tomas Winkler06ecd642013-02-06 14:06:42 +02001240 /* check if we need to start the dev */
1241 if (!mei_host_is_ready(dev)) {
1242 if (mei_hw_is_ready(dev)) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001243 dev_dbg(dev->dev, "we need to start the dev.\n");
Tomas Winkleraafae7e2013-03-11 18:27:03 +02001244 dev->recvd_hw_ready = true;
Alexander Usyskin2c2b93e2014-08-12 20:16:03 +03001245 wake_up(&dev->wait_hw_ready);
Tomas Winkler06ecd642013-02-06 14:06:42 +02001246 } else {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001247 dev_dbg(dev->dev, "Spurious Interrupt\n");
Tomas Winkler06ecd642013-02-06 14:06:42 +02001248 }
Tomas Winkler544f9462014-01-08 20:19:21 +02001249 goto end;
Tomas Winkler06ecd642013-02-06 14:06:42 +02001250 }
1251 /* check slots available for reading */
1252 slots = mei_count_full_read_slots(dev);
1253 while (slots > 0) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001254 dev_dbg(dev->dev, "slots to read = %08x\n", slots);
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001255 rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
Tomas Winklerb1b94b52014-03-03 00:21:28 +02001256 /* There is a race between ME write and interrupt delivery:
1257 * Not all data is always available immediately after the
1258 * interrupt, so try to read again on the next interrupt.
1259 */
1260 if (rets == -ENODATA)
1261 break;
1262
Tomas Winkler8d52af62017-12-12 13:27:06 +02001263 if (rets &&
Colin Ian King912ed8a2017-12-19 17:35:30 +00001264 (dev->dev_state != MEI_DEV_RESETTING &&
Tomas Winkler8d52af62017-12-12 13:27:06 +02001265 dev->dev_state != MEI_DEV_POWER_DOWN)) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001266 dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
Tomas Winklerb1b94b52014-03-03 00:21:28 +02001267 rets);
Tomas Winkler544f9462014-01-08 20:19:21 +02001268 schedule_work(&dev->reset_work);
Tomas Winkler06ecd642013-02-06 14:06:42 +02001269 goto end;
Tomas Winkler544f9462014-01-08 20:19:21 +02001270 }
Tomas Winkler06ecd642013-02-06 14:06:42 +02001271 }
Tomas Winkler06ecd642013-02-06 14:06:42 +02001272
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001273 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1274
Tomas Winklerba9cdd02014-03-18 22:52:00 +02001275 /*
1276 * During PG handshake only allowed write is the replay to the
1277 * PG exit message, so block calling write function
Alexander Usyskin3dc196e2015-06-13 08:51:17 +03001278 * if the pg event is in PG handshake
Tomas Winklerba9cdd02014-03-18 22:52:00 +02001279 */
Alexander Usyskin3dc196e2015-06-13 08:51:17 +03001280 if (dev->pg_event != MEI_PG_EVENT_WAIT &&
1281 dev->pg_event != MEI_PG_EVENT_RECEIVED) {
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001282 rets = mei_irq_write_handler(dev, &cmpl_list);
Tomas Winklerba9cdd02014-03-18 22:52:00 +02001283 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1284 }
Tomas Winkler06ecd642013-02-06 14:06:42 +02001285
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001286 mei_irq_compl_handler(dev, &cmpl_list);
Tomas Winkler06ecd642013-02-06 14:06:42 +02001287
Tomas Winkler544f9462014-01-08 20:19:21 +02001288end:
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001289 dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
Alexander Usyskina2eb0fc2016-12-04 15:22:59 +02001290 mei_me_intr_enable(dev);
Tomas Winkler544f9462014-01-08 20:19:21 +02001291 mutex_unlock(&dev->device_lock);
Tomas Winkler06ecd642013-02-06 14:06:42 +02001292 return IRQ_HANDLED;
1293}
Alexander Usyskin04dd3662014-03-31 17:59:23 +03001294
Tomas Winkler827eef52013-02-06 14:06:41 +02001295static const struct mei_hw_ops mei_me_hw_ops = {
1296
Tomas Winkler1bd30b62014-09-29 16:31:43 +03001297 .fw_status = mei_me_fw_status,
Tomas Winkler964a2332014-03-18 22:51:59 +02001298 .pg_state = mei_me_pg_state,
1299
Tomas Winkler827eef52013-02-06 14:06:41 +02001300 .host_is_ready = mei_me_host_is_ready,
1301
1302 .hw_is_ready = mei_me_hw_is_ready,
1303 .hw_reset = mei_me_hw_reset,
Tomas Winkleraafae7e2013-03-11 18:27:03 +02001304 .hw_config = mei_me_hw_config,
1305 .hw_start = mei_me_hw_start,
Tomas Winkler827eef52013-02-06 14:06:41 +02001306
Alexander Usyskin3dc196e2015-06-13 08:51:17 +03001307 .pg_in_transition = mei_me_pg_in_transition,
Tomas Winkleree7e5af2014-03-18 22:51:58 +02001308 .pg_is_enabled = mei_me_pg_is_enabled,
1309
Tomas Winkler827eef52013-02-06 14:06:41 +02001310 .intr_clear = mei_me_intr_clear,
1311 .intr_enable = mei_me_intr_enable,
1312 .intr_disable = mei_me_intr_disable,
Tomas Winkler4a8efd42016-12-04 15:22:58 +02001313 .synchronize_irq = mei_me_synchronize_irq,
Tomas Winkler827eef52013-02-06 14:06:41 +02001314
1315 .hbuf_free_slots = mei_me_hbuf_empty_slots,
1316 .hbuf_is_ready = mei_me_hbuf_is_empty,
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001317 .hbuf_depth = mei_me_hbuf_depth,
Tomas Winkler827eef52013-02-06 14:06:41 +02001318
Tomas Winkler4b9960d2016-11-11 03:00:08 +02001319 .write = mei_me_hbuf_write,
Tomas Winkler827eef52013-02-06 14:06:41 +02001320
1321 .rdbuf_full_slots = mei_me_count_full_read_slots,
1322 .read_hdr = mei_me_mecbrw_read,
1323 .read = mei_me_read_slots
1324};
1325
Tomas Winklerc9199512014-05-13 01:30:54 +03001326static bool mei_me_fw_type_nm(struct pci_dev *pdev)
1327{
1328 u32 reg;
Tomas Winkler92db1552014-09-29 16:31:37 +03001329
Tomas Winklerc9199512014-05-13 01:30:54 +03001330 pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
Tomas Winklera96c5482016-02-07 22:46:51 +02001331 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
Tomas Winklerc9199512014-05-13 01:30:54 +03001332 /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
1333 return (reg & 0x600) == 0x200;
1334}
1335
1336#define MEI_CFG_FW_NM \
1337 .quirk_probe = mei_me_fw_type_nm
1338
1339static bool mei_me_fw_type_sps(struct pci_dev *pdev)
1340{
1341 u32 reg;
Tomas Winkler8c57cac2016-07-20 10:24:02 +03001342 unsigned int devfn;
1343
1344 /*
1345 * Read ME FW Status register to check for SPS Firmware
1346 * The SPS FW is only signaled in pci function 0
1347 */
1348 devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
1349 pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, &reg);
Tomas Winklera96c5482016-02-07 22:46:51 +02001350 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
Tomas Winklerc9199512014-05-13 01:30:54 +03001351 /* if bits [19:16] = 15, running SPS Firmware */
1352 return (reg & 0xf0000) == 0xf0000;
1353}
1354
1355#define MEI_CFG_FW_SPS \
1356 .quirk_probe = mei_me_fw_type_sps
1357
1358
Tomas Winklerf5ac3c492017-06-14 10:03:15 +03001359#define MEI_CFG_ICH_HFS \
Alexander Usyskin8d929d42014-05-13 01:30:53 +03001360 .fw_status.count = 0
1361
Tomas Winklerf5ac3c492017-06-14 10:03:15 +03001362#define MEI_CFG_ICH10_HFS \
Alexander Usyskin8d929d42014-05-13 01:30:53 +03001363 .fw_status.count = 1, \
1364 .fw_status.status[0] = PCI_CFG_HFS_1
1365
1366#define MEI_CFG_PCH_HFS \
1367 .fw_status.count = 2, \
1368 .fw_status.status[0] = PCI_CFG_HFS_1, \
1369 .fw_status.status[1] = PCI_CFG_HFS_2
1370
Alexander Usyskinedca5ea2014-11-19 17:01:38 +02001371#define MEI_CFG_PCH8_HFS \
1372 .fw_status.count = 6, \
1373 .fw_status.status[0] = PCI_CFG_HFS_1, \
1374 .fw_status.status[1] = PCI_CFG_HFS_2, \
1375 .fw_status.status[2] = PCI_CFG_HFS_3, \
1376 .fw_status.status[3] = PCI_CFG_HFS_4, \
1377 .fw_status.status[4] = PCI_CFG_HFS_5, \
1378 .fw_status.status[5] = PCI_CFG_HFS_6
Alexander Usyskin8d929d42014-05-13 01:30:53 +03001379
Alexander Usyskin7026a5f2018-07-31 09:35:37 +03001380#define MEI_CFG_DMA_128 \
1381 .dma_size[DMA_DSCR_HOST] = SZ_128K, \
1382 .dma_size[DMA_DSCR_DEVICE] = SZ_128K, \
1383 .dma_size[DMA_DSCR_CTRL] = PAGE_SIZE
1384
Alexander Usyskin8d929d42014-05-13 01:30:53 +03001385/* ICH Legacy devices */
Tomas Winklerf5ac3c492017-06-14 10:03:15 +03001386static const struct mei_cfg mei_me_ich_cfg = {
Alexander Usyskin8d929d42014-05-13 01:30:53 +03001387 MEI_CFG_ICH_HFS,
1388};
1389
Tomas Winklerf5ac3c492017-06-14 10:03:15 +03001390/* ICH devices */
1391static const struct mei_cfg mei_me_ich10_cfg = {
1392 MEI_CFG_ICH10_HFS,
1393};
1394
Alexander Usyskin8d929d42014-05-13 01:30:53 +03001395/* PCH devices */
Tomas Winklerf5ac3c492017-06-14 10:03:15 +03001396static const struct mei_cfg mei_me_pch_cfg = {
Alexander Usyskin8d929d42014-05-13 01:30:53 +03001397 MEI_CFG_PCH_HFS,
1398};
1399
Tomas Winklerc9199512014-05-13 01:30:54 +03001400/* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
Tomas Winklerf5ac3c492017-06-14 10:03:15 +03001401static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
Tomas Winklerc9199512014-05-13 01:30:54 +03001402 MEI_CFG_PCH_HFS,
1403 MEI_CFG_FW_NM,
1404};
1405
Alexander Usyskinedca5ea2014-11-19 17:01:38 +02001406/* PCH8 Lynx Point and newer devices */
Tomas Winklerf5ac3c492017-06-14 10:03:15 +03001407static const struct mei_cfg mei_me_pch8_cfg = {
Alexander Usyskinedca5ea2014-11-19 17:01:38 +02001408 MEI_CFG_PCH8_HFS,
1409};
1410
1411/* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
Tomas Winklerf5ac3c492017-06-14 10:03:15 +03001412static const struct mei_cfg mei_me_pch8_sps_cfg = {
Alexander Usyskinedca5ea2014-11-19 17:01:38 +02001413 MEI_CFG_PCH8_HFS,
Tomas Winklerc9199512014-05-13 01:30:54 +03001414 MEI_CFG_FW_SPS,
1415};
1416
Alexander Usyskin7026a5f2018-07-31 09:35:37 +03001417/* Cannon Lake and newer devices */
1418static const struct mei_cfg mei_me_pch12_cfg = {
1419 MEI_CFG_PCH8_HFS,
1420 MEI_CFG_DMA_128,
1421};
1422
Tomas Winklerf5ac3c492017-06-14 10:03:15 +03001423/*
1424 * mei_cfg_list - A list of platform platform specific configurations.
1425 * Note: has to be synchronized with enum mei_cfg_idx.
1426 */
1427static const struct mei_cfg *const mei_cfg_list[] = {
1428 [MEI_ME_UNDEF_CFG] = NULL,
1429 [MEI_ME_ICH_CFG] = &mei_me_ich_cfg,
1430 [MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg,
1431 [MEI_ME_PCH_CFG] = &mei_me_pch_cfg,
1432 [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
1433 [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
1434 [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
Alexander Usyskin7026a5f2018-07-31 09:35:37 +03001435 [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
Tomas Winklerf5ac3c492017-06-14 10:03:15 +03001436};
1437
1438const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
1439{
1440 BUILD_BUG_ON(ARRAY_SIZE(mei_cfg_list) != MEI_ME_NUM_CFG);
1441
1442 if (idx >= MEI_ME_NUM_CFG)
1443 return NULL;
1444
1445 return mei_cfg_list[idx];
1446};
1447
Tomas Winkler52c34562013-02-06 14:06:40 +02001448/**
Masanari Iida393b1482013-04-05 01:05:05 +09001449 * mei_me_dev_init - allocates and initializes the mei device structure
Tomas Winkler52c34562013-02-06 14:06:40 +02001450 *
1451 * @pdev: The pci device structure
Alexander Usyskin8d929d42014-05-13 01:30:53 +03001452 * @cfg: per device generation config
Tomas Winkler52c34562013-02-06 14:06:40 +02001453 *
Tomas Winklerf8a09602017-01-26 17:16:26 +02001454 * Return: The mei_device pointer on success, NULL on failure.
Tomas Winkler52c34562013-02-06 14:06:40 +02001455 */
Alexander Usyskin8d929d42014-05-13 01:30:53 +03001456struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
1457 const struct mei_cfg *cfg)
Tomas Winkler52c34562013-02-06 14:06:40 +02001458{
1459 struct mei_device *dev;
Tomas Winkler4ad96db2014-09-29 16:31:45 +03001460 struct mei_me_hw *hw;
Tomas Winklerce0925e2018-11-22 13:11:36 +02001461 int i;
Tomas Winkler52c34562013-02-06 14:06:40 +02001462
Tomas Winklerf8a09602017-01-26 17:16:26 +02001463 dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
1464 sizeof(struct mei_me_hw), GFP_KERNEL);
Tomas Winkler52c34562013-02-06 14:06:40 +02001465 if (!dev)
1466 return NULL;
Tomas Winklerce0925e2018-11-22 13:11:36 +02001467
Tomas Winkler4ad96db2014-09-29 16:31:45 +03001468 hw = to_me_hw(dev);
Tomas Winkler52c34562013-02-06 14:06:40 +02001469
Tomas Winklerce0925e2018-11-22 13:11:36 +02001470 for (i = 0; i < DMA_DSCR_NUM; i++)
1471 dev->dr_dscr[i].size = cfg->dma_size[i];
1472
Tomas Winkler3a7e9b62014-09-29 16:31:41 +03001473 mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
Tomas Winkler4ad96db2014-09-29 16:31:45 +03001474 hw->cfg = cfg;
Tomas Winklerce0925e2018-11-22 13:11:36 +02001475
Tomas Winkler52c34562013-02-06 14:06:40 +02001476 return dev;
1477}
Tomas Winkler06ecd642013-02-06 14:06:42 +02001478