71d9f109a742c58e4d80894cfd3b551ccbac0f9d
[openwrt/staging/jow.git] /
1 From 7ae6478b304bc004c3139b422665b0e23b57f05c Mon Sep 17 00:00:00 2001
2 From: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
3 Date: Wed, 13 Oct 2021 14:19:55 +0100
4 Subject: [PATCH] nvmem: core: rework nvmem cell instance creation
5
6 In the existing design, we do not create a instance per nvmem cell consumer
7 but we directly refer cell from nvmem cell list that are added to provider.
8
9 However this design has some limitations when consumers want to assign name
10 or connection id the nvmem cell instance, ex: via "nvmem-cell-names" or
11 id in nvmem_cell_get(id).
12
13 Having a name associated with nvmem cell consumer instance will help
14 provider drivers in performing post processing of nvmem cell data if required
15 before data is seen by the consumers. This is pretty normal with some vendors
16 storing nvmem cells like mac-address in a vendor specific data layouts that
17 are not directly usable by the consumer drivers.
18
19 With this patch nvmem cell will be created dynamically during nvmem_cell_get
20 and destroyed in nvmem_cell_put, allowing consumers to associate name with
21 nvmem cell consumer instance.
22
23 With this patch a new struct nvmem_cell_entry replaces struct nvmem_cell
24 for storing nvmem cell information within the core.
25 This patch does not change nvmem-consumer interface based on nvmem_cell.
26
27 Tested-by: Joakim Zhang <qiangqing.zhang@nxp.com>
28 Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
29 Link: https://lore.kernel.org/r/20211013131957.30271-2-srinivas.kandagatla@linaro.org
30 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
31 ---
32 drivers/nvmem/core.c | 165 +++++++++++++++++++++++++++----------------
33 1 file changed, 105 insertions(+), 60 deletions(-)
34
35 --- a/drivers/nvmem/core.c
36 +++ b/drivers/nvmem/core.c
37 @@ -45,8 +45,7 @@ struct nvmem_device {
38 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
39
40 #define FLAG_COMPAT BIT(0)
41 -
42 -struct nvmem_cell {
43 +struct nvmem_cell_entry {
44 const char *name;
45 int offset;
46 int bytes;
47 @@ -57,6 +56,11 @@ struct nvmem_cell {
48 struct list_head node;
49 };
50
51 +struct nvmem_cell {
52 + struct nvmem_cell_entry *entry;
53 + const char *id;
54 +};
55 +
56 static DEFINE_MUTEX(nvmem_mutex);
57 static DEFINE_IDA(nvmem_ida);
58
59 @@ -424,7 +428,7 @@ static struct bus_type nvmem_bus_type =
60 .name = "nvmem",
61 };
62
63 -static void nvmem_cell_drop(struct nvmem_cell *cell)
64 +static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell)
65 {
66 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
67 mutex_lock(&nvmem_mutex);
68 @@ -437,13 +441,13 @@ static void nvmem_cell_drop(struct nvmem
69
70 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
71 {
72 - struct nvmem_cell *cell, *p;
73 + struct nvmem_cell_entry *cell, *p;
74
75 list_for_each_entry_safe(cell, p, &nvmem->cells, node)
76 - nvmem_cell_drop(cell);
77 + nvmem_cell_entry_drop(cell);
78 }
79
80 -static void nvmem_cell_add(struct nvmem_cell *cell)
81 +static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell)
82 {
83 mutex_lock(&nvmem_mutex);
84 list_add_tail(&cell->node, &cell->nvmem->cells);
85 @@ -451,9 +455,9 @@ static void nvmem_cell_add(struct nvmem_
86 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
87 }
88
89 -static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem,
90 - const struct nvmem_cell_info *info,
91 - struct nvmem_cell *cell)
92 +static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
93 + const struct nvmem_cell_info *info,
94 + struct nvmem_cell_entry *cell)
95 {
96 cell->nvmem = nvmem;
97 cell->offset = info->offset;
98 @@ -477,13 +481,13 @@ static int nvmem_cell_info_to_nvmem_cell
99 return 0;
100 }
101
102 -static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
103 - const struct nvmem_cell_info *info,
104 - struct nvmem_cell *cell)
105 +static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem,
106 + const struct nvmem_cell_info *info,
107 + struct nvmem_cell_entry *cell)
108 {
109 int err;
110
111 - err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell);
112 + err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell);
113 if (err)
114 return err;
115
116 @@ -507,7 +511,7 @@ static int nvmem_add_cells(struct nvmem_
117 const struct nvmem_cell_info *info,
118 int ncells)
119 {
120 - struct nvmem_cell **cells;
121 + struct nvmem_cell_entry **cells;
122 int i, rval;
123
124 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
125 @@ -521,13 +525,13 @@ static int nvmem_add_cells(struct nvmem_
126 goto err;
127 }
128
129 - rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
130 + rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, &info[i], cells[i]);
131 if (rval) {
132 kfree(cells[i]);
133 goto err;
134 }
135
136 - nvmem_cell_add(cells[i]);
137 + nvmem_cell_entry_add(cells[i]);
138 }
139
140 /* remove tmp array */
141 @@ -536,7 +540,7 @@ static int nvmem_add_cells(struct nvmem_
142 return 0;
143 err:
144 while (i--)
145 - nvmem_cell_drop(cells[i]);
146 + nvmem_cell_entry_drop(cells[i]);
147
148 kfree(cells);
149
150 @@ -573,7 +577,7 @@ static int nvmem_add_cells_from_table(st
151 {
152 const struct nvmem_cell_info *info;
153 struct nvmem_cell_table *table;
154 - struct nvmem_cell *cell;
155 + struct nvmem_cell_entry *cell;
156 int rval = 0, i;
157
158 mutex_lock(&nvmem_cell_mutex);
159 @@ -588,15 +592,13 @@ static int nvmem_add_cells_from_table(st
160 goto out;
161 }
162
163 - rval = nvmem_cell_info_to_nvmem_cell(nvmem,
164 - info,
165 - cell);
166 + rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
167 if (rval) {
168 kfree(cell);
169 goto out;
170 }
171
172 - nvmem_cell_add(cell);
173 + nvmem_cell_entry_add(cell);
174 }
175 }
176 }
177 @@ -606,10 +608,10 @@ out:
178 return rval;
179 }
180
181 -static struct nvmem_cell *
182 -nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
183 +static struct nvmem_cell_entry *
184 +nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id)
185 {
186 - struct nvmem_cell *iter, *cell = NULL;
187 + struct nvmem_cell_entry *iter, *cell = NULL;
188
189 mutex_lock(&nvmem_mutex);
190 list_for_each_entry(iter, &nvmem->cells, node) {
191 @@ -680,7 +682,7 @@ static int nvmem_add_cells_from_of(struc
192 {
193 struct device_node *parent, *child;
194 struct device *dev = &nvmem->dev;
195 - struct nvmem_cell *cell;
196 + struct nvmem_cell_entry *cell;
197 const __be32 *addr;
198 int len;
199
200 @@ -729,7 +731,7 @@ static int nvmem_add_cells_from_of(struc
201 }
202
203 cell->np = of_node_get(child);
204 - nvmem_cell_add(cell);
205 + nvmem_cell_entry_add(cell);
206 }
207
208 return 0;
209 @@ -1139,9 +1141,33 @@ struct nvmem_device *devm_nvmem_device_g
210 }
211 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
212
213 +static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, const char *id)
214 +{
215 + struct nvmem_cell *cell;
216 + const char *name = NULL;
217 +
218 + cell = kzalloc(sizeof(*cell), GFP_KERNEL);
219 + if (!cell)
220 + return ERR_PTR(-ENOMEM);
221 +
222 + if (id) {
223 + name = kstrdup_const(id, GFP_KERNEL);
224 + if (!name) {
225 + kfree(cell);
226 + return ERR_PTR(-ENOMEM);
227 + }
228 + }
229 +
230 + cell->id = name;
231 + cell->entry = entry;
232 +
233 + return cell;
234 +}
235 +
236 static struct nvmem_cell *
237 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
238 {
239 + struct nvmem_cell_entry *cell_entry;
240 struct nvmem_cell *cell = ERR_PTR(-ENOENT);
241 struct nvmem_cell_lookup *lookup;
242 struct nvmem_device *nvmem;
243 @@ -1166,11 +1192,15 @@ nvmem_cell_get_from_lookup(struct device
244 break;
245 }
246
247 - cell = nvmem_find_cell_by_name(nvmem,
248 - lookup->cell_name);
249 - if (!cell) {
250 + cell_entry = nvmem_find_cell_entry_by_name(nvmem,
251 + lookup->cell_name);
252 + if (!cell_entry) {
253 __nvmem_device_put(nvmem);
254 cell = ERR_PTR(-ENOENT);
255 + } else {
256 + cell = nvmem_create_cell(cell_entry, con_id);
257 + if (IS_ERR(cell))
258 + __nvmem_device_put(nvmem);
259 }
260 break;
261 }
262 @@ -1181,10 +1211,10 @@ nvmem_cell_get_from_lookup(struct device
263 }
264
265 #if IS_ENABLED(CONFIG_OF)
266 -static struct nvmem_cell *
267 -nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
268 +static struct nvmem_cell_entry *
269 +nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np)
270 {
271 - struct nvmem_cell *iter, *cell = NULL;
272 + struct nvmem_cell_entry *iter, *cell = NULL;
273
274 mutex_lock(&nvmem_mutex);
275 list_for_each_entry(iter, &nvmem->cells, node) {
276 @@ -1214,6 +1244,7 @@ struct nvmem_cell *of_nvmem_cell_get(str
277 {
278 struct device_node *cell_np, *nvmem_np;
279 struct nvmem_device *nvmem;
280 + struct nvmem_cell_entry *cell_entry;
281 struct nvmem_cell *cell;
282 int index = 0;
283
284 @@ -1234,12 +1265,16 @@ struct nvmem_cell *of_nvmem_cell_get(str
285 if (IS_ERR(nvmem))
286 return ERR_CAST(nvmem);
287
288 - cell = nvmem_find_cell_by_node(nvmem, cell_np);
289 - if (!cell) {
290 + cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
291 + if (!cell_entry) {
292 __nvmem_device_put(nvmem);
293 return ERR_PTR(-ENOENT);
294 }
295
296 + cell = nvmem_create_cell(cell_entry, id);
297 + if (IS_ERR(cell))
298 + __nvmem_device_put(nvmem);
299 +
300 return cell;
301 }
302 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
303 @@ -1345,13 +1380,17 @@ EXPORT_SYMBOL(devm_nvmem_cell_put);
304 */
305 void nvmem_cell_put(struct nvmem_cell *cell)
306 {
307 - struct nvmem_device *nvmem = cell->nvmem;
308 + struct nvmem_device *nvmem = cell->entry->nvmem;
309 +
310 + if (cell->id)
311 + kfree_const(cell->id);
312
313 + kfree(cell);
314 __nvmem_device_put(nvmem);
315 }
316 EXPORT_SYMBOL_GPL(nvmem_cell_put);
317
318 -static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
319 +static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf)
320 {
321 u8 *p, *b;
322 int i, extra, bit_offset = cell->bit_offset;
323 @@ -1385,8 +1424,8 @@ static void nvmem_shift_read_buffer_in_p
324 }
325
326 static int __nvmem_cell_read(struct nvmem_device *nvmem,
327 - struct nvmem_cell *cell,
328 - void *buf, size_t *len)
329 + struct nvmem_cell_entry *cell,
330 + void *buf, size_t *len, const char *id)
331 {
332 int rc;
333
334 @@ -1417,18 +1456,18 @@ static int __nvmem_cell_read(struct nvme
335 */
336 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
337 {
338 - struct nvmem_device *nvmem = cell->nvmem;
339 + struct nvmem_device *nvmem = cell->entry->nvmem;
340 u8 *buf;
341 int rc;
342
343 if (!nvmem)
344 return ERR_PTR(-EINVAL);
345
346 - buf = kzalloc(cell->bytes, GFP_KERNEL);
347 + buf = kzalloc(cell->entry->bytes, GFP_KERNEL);
348 if (!buf)
349 return ERR_PTR(-ENOMEM);
350
351 - rc = __nvmem_cell_read(nvmem, cell, buf, len);
352 + rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id);
353 if (rc) {
354 kfree(buf);
355 return ERR_PTR(rc);
356 @@ -1438,7 +1477,7 @@ void *nvmem_cell_read(struct nvmem_cell
357 }
358 EXPORT_SYMBOL_GPL(nvmem_cell_read);
359
360 -static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
361 +static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell,
362 u8 *_buf, int len)
363 {
364 struct nvmem_device *nvmem = cell->nvmem;
365 @@ -1491,16 +1530,7 @@ err:
366 return ERR_PTR(rc);
367 }
368
369 -/**
370 - * nvmem_cell_write() - Write to a given nvmem cell
371 - *
372 - * @cell: nvmem cell to be written.
373 - * @buf: Buffer to be written.
374 - * @len: length of buffer to be written to nvmem cell.
375 - *
376 - * Return: length of bytes written or negative on failure.
377 - */
378 -int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
379 +static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len)
380 {
381 struct nvmem_device *nvmem = cell->nvmem;
382 int rc;
383 @@ -1526,6 +1556,21 @@ int nvmem_cell_write(struct nvmem_cell *
384
385 return len;
386 }
387 +
388 +/**
389 + * nvmem_cell_write() - Write to a given nvmem cell
390 + *
391 + * @cell: nvmem cell to be written.
392 + * @buf: Buffer to be written.
393 + * @len: length of buffer to be written to nvmem cell.
394 + *
395 + * Return: length of bytes written or negative on failure.
396 + */
397 +int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
398 +{
399 + return __nvmem_cell_entry_write(cell->entry, buf, len);
400 +}
401 +
402 EXPORT_SYMBOL_GPL(nvmem_cell_write);
403
404 static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
405 @@ -1628,7 +1673,7 @@ static const void *nvmem_cell_read_varia
406 if (IS_ERR(cell))
407 return cell;
408
409 - nbits = cell->nbits;
410 + nbits = cell->entry->nbits;
411 buf = nvmem_cell_read(cell, len);
412 nvmem_cell_put(cell);
413 if (IS_ERR(buf))
414 @@ -1724,18 +1769,18 @@ EXPORT_SYMBOL_GPL(nvmem_cell_read_variab
415 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
416 struct nvmem_cell_info *info, void *buf)
417 {
418 - struct nvmem_cell cell;
419 + struct nvmem_cell_entry cell;
420 int rc;
421 ssize_t len;
422
423 if (!nvmem)
424 return -EINVAL;
425
426 - rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
427 + rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
428 if (rc)
429 return rc;
430
431 - rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
432 + rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL);
433 if (rc)
434 return rc;
435
436 @@ -1755,17 +1800,17 @@ EXPORT_SYMBOL_GPL(nvmem_device_cell_read
437 int nvmem_device_cell_write(struct nvmem_device *nvmem,
438 struct nvmem_cell_info *info, void *buf)
439 {
440 - struct nvmem_cell cell;
441 + struct nvmem_cell_entry cell;
442 int rc;
443
444 if (!nvmem)
445 return -EINVAL;
446
447 - rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
448 + rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
449 if (rc)
450 return rc;
451
452 - return nvmem_cell_write(&cell, buf, cell.bytes);
453 + return __nvmem_cell_entry_write(&cell, buf, cell.bytes);
454 }
455 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
456