|
| 1 | +/* |
| 2 | + * Copyright (c) 2006-2023, RT-Thread Development Team |
| 3 | + * |
| 4 | + * SPDX-License-Identifier: Apache-2.0 |
| 5 | + * |
| 6 | + * Change Logs: |
| 7 | + * Date Author Notes |
| 8 | + * 2022-09-24 GuEe-GUI the first version |
| 9 | + */ |
| 10 | + |
| 11 | +#include <rtdevice.h> |
| 12 | + |
| 13 | +#define DBG_TAG "rtdm.power" |
| 14 | +#define DBG_LVL DBG_INFO |
| 15 | +#include <rtdbg.h> |
| 16 | + |
| 17 | +struct power_off_track |
| 18 | +{ |
| 19 | + rt_slist_t list; |
| 20 | + |
| 21 | + struct rt_device *dev; |
| 22 | + rt_err_t (*callback)(struct rt_device *); |
| 23 | +}; |
| 24 | + |
| 25 | +void (*rt_dm_machine_shutdown)(void) = RT_NULL; |
| 26 | +void (*rt_dm_machine_reset)(void) = RT_NULL; |
| 27 | + |
| 28 | +static RT_DEFINE_SPINLOCK(_power_off_lock); |
| 29 | +static rt_slist_t _power_off_handler_nodes[RT_DM_POWER_OFF_MODE_NR][RT_DM_POWER_OFF_PRIO_NR] = |
| 30 | +{ |
| 31 | + [0 ... RT_DM_POWER_OFF_MODE_NR - 1] = |
| 32 | + { |
| 33 | + [0 ... RT_DM_POWER_OFF_PRIO_NR - 1] = |
| 34 | + { |
| 35 | + RT_NULL, |
| 36 | + } |
| 37 | + } |
| 38 | +}; |
| 39 | + |
| 40 | +static rt_used char * const _mode_name[] = |
| 41 | +{ |
| 42 | + [RT_DM_POWER_OFF_MODE_SHUTDOWN] = "SHUTDOWN", |
| 43 | + [RT_DM_POWER_OFF_MODE_RESET] = "RESET", |
| 44 | +}; |
| 45 | + |
| 46 | +rt_err_t rt_dm_power_off_handler(struct rt_device *dev, int mode, int priority, |
| 47 | + rt_err_t (*callback)(struct rt_device *dev)) |
| 48 | +{ |
| 49 | + struct power_off_track *track; |
| 50 | + |
| 51 | + RT_ASSERT(mode < RT_DM_POWER_OFF_MODE_NR); |
| 52 | + RT_ASSERT(priority < RT_DM_POWER_OFF_PRIO_NR); |
| 53 | + |
| 54 | + track = rt_malloc(sizeof(*track)); |
| 55 | + |
| 56 | + if (!track) |
| 57 | + { |
| 58 | + return -RT_ENOMEM; |
| 59 | + } |
| 60 | + |
| 61 | + rt_slist_init(&track->list); |
| 62 | + track->dev = dev; |
| 63 | + track->callback = callback; |
| 64 | + |
| 65 | + rt_hw_spin_lock(&_power_off_lock.lock); |
| 66 | + |
| 67 | + rt_slist_insert(&_power_off_handler_nodes[mode][priority], &track->list); |
| 68 | + |
| 69 | + rt_hw_spin_unlock(&_power_off_lock.lock); |
| 70 | + |
| 71 | + return RT_EOK; |
| 72 | +} |
| 73 | + |
| 74 | +static void dm_power_off_handler(int mode) |
| 75 | +{ |
| 76 | + struct power_off_track *track; |
| 77 | + |
| 78 | + rt_hw_spin_lock(&_power_off_lock.lock); |
| 79 | + |
| 80 | + for (int i = 0; i < RT_DM_POWER_OFF_PRIO_NR; ++i) |
| 81 | + { |
| 82 | + rt_slist_t *nodes = &_power_off_handler_nodes[mode][i]; |
| 83 | + |
| 84 | + rt_slist_for_each_entry(track, nodes, list) |
| 85 | + { |
| 86 | + rt_err_t err; |
| 87 | + struct rt_device *dev = track->dev; |
| 88 | + |
| 89 | + if ((err = track->callback(dev))) |
| 90 | + { |
| 91 | + LOG_E("%s: %s fail error = %s", dev ? rt_dm_dev_get_name(dev) : RT_NULL, |
| 92 | + _mode_name[mode], rt_strerror(err)); |
| 93 | + } |
| 94 | + } |
| 95 | + } |
| 96 | + |
| 97 | + rt_hw_spin_unlock(&_power_off_lock.lock); |
| 98 | +} |
| 99 | + |
| 100 | +struct reboot_mode_track |
| 101 | +{ |
| 102 | + rt_slist_t list; |
| 103 | + |
| 104 | + struct rt_device *dev; |
| 105 | + rt_err_t (*callback)(struct rt_device *, char *cmd); |
| 106 | +}; |
| 107 | + |
| 108 | +static char *_reboot_mode_cmd = "normal"; |
| 109 | +static RT_DEFINE_SPINLOCK(_reboot_mode_lock); |
| 110 | +static rt_slist_t _reboot_mode_handler_nodes = { RT_NULL }; |
| 111 | + |
| 112 | +rt_err_t rt_dm_reboot_mode_register(struct rt_device *dev, |
| 113 | + rt_err_t (*callback)(struct rt_device *, char *cmd)) |
| 114 | +{ |
| 115 | + struct reboot_mode_track *track; |
| 116 | + |
| 117 | + track = rt_malloc(sizeof(*track)); |
| 118 | + |
| 119 | + if (!track) |
| 120 | + { |
| 121 | + return -RT_ENOMEM; |
| 122 | + } |
| 123 | + |
| 124 | + rt_slist_init(&track->list); |
| 125 | + track->dev = dev; |
| 126 | + track->callback = callback; |
| 127 | + |
| 128 | + rt_hw_spin_lock(&_reboot_mode_lock.lock); |
| 129 | + |
| 130 | + rt_slist_insert(&_reboot_mode_handler_nodes, &track->list); |
| 131 | + |
| 132 | + rt_hw_spin_unlock(&_reboot_mode_lock.lock); |
| 133 | + |
| 134 | + return RT_EOK; |
| 135 | +} |
| 136 | + |
| 137 | +rt_err_t rt_dm_reboot_mode_unregister(struct rt_device *dev) |
| 138 | +{ |
| 139 | + struct reboot_mode_track *track, *target_track = RT_NULL; |
| 140 | + |
| 141 | + rt_hw_spin_lock(&_reboot_mode_lock.lock); |
| 142 | + |
| 143 | + rt_slist_for_each_entry(track, &_reboot_mode_handler_nodes, list) |
| 144 | + { |
| 145 | + if (track->dev == dev) |
| 146 | + { |
| 147 | + target_track = track; |
| 148 | + rt_slist_remove(&_reboot_mode_handler_nodes, &track->list); |
| 149 | + break; |
| 150 | + } |
| 151 | + } |
| 152 | + |
| 153 | + rt_hw_spin_unlock(&_reboot_mode_lock.lock); |
| 154 | + |
| 155 | + if (target_track) |
| 156 | + { |
| 157 | + rt_free(target_track); |
| 158 | + } |
| 159 | + |
| 160 | + return target_track ? RT_EOK : -RT_EEMPTY; |
| 161 | +} |
| 162 | + |
| 163 | +static rt_err_t dm_reboot_notifiy(struct rt_device *request_dev) |
| 164 | +{ |
| 165 | + struct reboot_mode_track *track; |
| 166 | + |
| 167 | + rt_hw_spin_lock(&_reboot_mode_lock.lock); |
| 168 | + |
| 169 | + rt_slist_for_each_entry(track, &_reboot_mode_handler_nodes, list) |
| 170 | + { |
| 171 | + rt_err_t err; |
| 172 | + struct rt_device *dev = track->dev; |
| 173 | + |
| 174 | + if ((err = track->callback(dev, _reboot_mode_cmd))) |
| 175 | + { |
| 176 | + LOG_E("%s: %s fail error = %s", dev ? rt_dm_dev_get_name(dev) : RT_NULL, |
| 177 | + "reboot mode apply", rt_strerror(err)); |
| 178 | + } |
| 179 | + } |
| 180 | + |
| 181 | + rt_hw_spin_unlock(&_reboot_mode_lock.lock); |
| 182 | + |
| 183 | + return RT_EOK; |
| 184 | +} |
| 185 | + |
| 186 | +static int reboot_mode_init(void) |
| 187 | +{ |
| 188 | + return rt_dm_power_off_handler(RT_NULL, RT_DM_POWER_OFF_MODE_RESET, |
| 189 | + RT_DM_POWER_OFF_PRIO_HIGH, &dm_reboot_notifiy); |
| 190 | +} |
| 191 | +INIT_CORE_EXPORT(reboot_mode_init); |
| 192 | + |
| 193 | +void rt_hw_cpu_reset_mode(char *cmd) |
| 194 | +{ |
| 195 | + static RT_DEFINE_SPINLOCK(pe_lock); |
| 196 | + |
| 197 | + rt_hw_spin_lock(&pe_lock.lock); |
| 198 | + |
| 199 | + _reboot_mode_cmd = cmd ? : _reboot_mode_cmd; |
| 200 | + |
| 201 | + rt_hw_cpu_reset(); |
| 202 | + |
| 203 | + /* Unreachable */ |
| 204 | + rt_hw_spin_unlock(&pe_lock.lock); |
| 205 | +} |
| 206 | + |
| 207 | +static struct rt_thread power_task; |
| 208 | +static void power_task_async(void (*fn)(void)); |
| 209 | + |
| 210 | +rt_inline rt_bool_t power_need_async(void) |
| 211 | +{ |
| 212 | + struct rt_thread *tid = rt_thread_self(); |
| 213 | + |
| 214 | + return tid && tid != &power_task && rt_interrupt_get_nest(); |
| 215 | +} |
| 216 | + |
| 217 | +void rt_hw_cpu_shutdown(void) |
| 218 | +{ |
| 219 | + register rt_ubase_t level; |
| 220 | + |
| 221 | + if (power_need_async()) |
| 222 | + { |
| 223 | + power_task_async(&rt_hw_cpu_shutdown); |
| 224 | + |
| 225 | + return; |
| 226 | + } |
| 227 | + |
| 228 | + dm_power_off_handler(RT_DM_POWER_OFF_MODE_SHUTDOWN); |
| 229 | + |
| 230 | + LOG_I("Shutdown"); |
| 231 | + |
| 232 | + /* Machine shutdown */ |
| 233 | + if (rt_dm_machine_shutdown) |
| 234 | + { |
| 235 | + rt_dm_machine_shutdown(); |
| 236 | + } |
| 237 | + |
| 238 | + level = rt_hw_interrupt_disable(); |
| 239 | + while (level) |
| 240 | + { |
| 241 | + RT_ASSERT(0); |
| 242 | + } |
| 243 | +} |
| 244 | +MSH_CMD_EXPORT_ALIAS(rt_hw_cpu_shutdown, shutdown, shutdown machine); |
| 245 | + |
| 246 | +void rt_hw_cpu_reset(void) |
| 247 | +{ |
| 248 | + register rt_ubase_t level; |
| 249 | + |
| 250 | + if (power_need_async()) |
| 251 | + { |
| 252 | + power_task_async(&rt_hw_cpu_reset); |
| 253 | + |
| 254 | + return; |
| 255 | + } |
| 256 | + |
| 257 | + dm_power_off_handler(RT_DM_POWER_OFF_MODE_RESET); |
| 258 | + |
| 259 | + LOG_I("Reset"); |
| 260 | + |
| 261 | + /* Machine reset */ |
| 262 | + if (rt_dm_machine_reset) |
| 263 | + { |
| 264 | + rt_dm_machine_reset(); |
| 265 | + } |
| 266 | + |
| 267 | + level = rt_hw_interrupt_disable(); |
| 268 | + while (level) |
| 269 | + { |
| 270 | + RT_ASSERT(0); |
| 271 | + } |
| 272 | +} |
| 273 | + |
| 274 | +static int reset(int args, char**argv) |
| 275 | +{ |
| 276 | + if (args > 1) |
| 277 | + { |
| 278 | + rt_hw_cpu_reset_mode(argv[1]); |
| 279 | + } |
| 280 | + else |
| 281 | + { |
| 282 | + rt_hw_cpu_reset(); |
| 283 | + } |
| 284 | + |
| 285 | + return 0; |
| 286 | +} |
| 287 | +MSH_CMD_EXPORT(reset, reset machine); |
| 288 | + |
| 289 | +static void power_task_entry(void *param) |
| 290 | +{ |
| 291 | + void (*fn)(void) = rt_thread_self()->parameter; |
| 292 | + |
| 293 | + fn(); |
| 294 | +} |
| 295 | + |
| 296 | +static void power_task_async(void (*fn)(void)) |
| 297 | +{ |
| 298 | + power_task.parameter = fn; |
| 299 | + |
| 300 | + rt_thread_startup(&power_task); |
| 301 | +} |
| 302 | + |
| 303 | +static int power_init(void) |
| 304 | +{ |
| 305 | + static rt_uint8_t power_task_stack[DM_THREAD_STACK_SIZE]; |
| 306 | + |
| 307 | + return rt_thread_init(&power_task, "pwr", power_task_entry, RT_NULL, |
| 308 | + &power_task_stack, sizeof(power_task_stack), |
| 309 | + RT_THREAD_PRIORITY_MAX / 2, 32); |
| 310 | +} |
| 311 | +INIT_CORE_EXPORT(power_init); |
0 commit comments