|
8 | 8 | #include "xe_printk.h" |
9 | 9 | #include "xe_sriov_packet.h" |
10 | 10 | #include "xe_sriov_packet_types.h" |
| 11 | +#include "xe_sriov_pf_helpers.h" |
| 12 | +#include "xe_sriov_pf_migration.h" |
| 13 | +#include "xe_sriov_printk.h" |
| 14 | + |
| 15 | +static struct mutex *pf_migration_mutex(struct xe_device *xe, unsigned int vfid) |
| 16 | +{ |
| 17 | + xe_assert(xe, IS_SRIOV_PF(xe)); |
| 18 | + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); |
| 19 | + |
| 20 | + return &xe->sriov.pf.vfs[vfid].migration.lock; |
| 21 | +} |
| 22 | + |
| 23 | +static struct xe_sriov_packet **pf_pick_pending(struct xe_device *xe, unsigned int vfid) |
| 24 | +{ |
| 25 | + xe_assert(xe, IS_SRIOV_PF(xe)); |
| 26 | + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); |
| 27 | + lockdep_assert_held(pf_migration_mutex(xe, vfid)); |
| 28 | + |
| 29 | + return &xe->sriov.pf.vfs[vfid].migration.pending; |
| 30 | +} |
| 31 | + |
| 32 | +static struct xe_sriov_packet ** |
| 33 | +pf_pick_descriptor(struct xe_device *xe, unsigned int vfid) |
| 34 | +{ |
| 35 | + xe_assert(xe, IS_SRIOV_PF(xe)); |
| 36 | + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); |
| 37 | + lockdep_assert_held(pf_migration_mutex(xe, vfid)); |
| 38 | + |
| 39 | + return &xe->sriov.pf.vfs[vfid].migration.descriptor; |
| 40 | +} |
| 41 | + |
| 42 | +static struct xe_sriov_packet **pf_pick_trailer(struct xe_device *xe, unsigned int vfid) |
| 43 | +{ |
| 44 | + xe_assert(xe, IS_SRIOV_PF(xe)); |
| 45 | + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); |
| 46 | + lockdep_assert_held(pf_migration_mutex(xe, vfid)); |
| 47 | + |
| 48 | + return &xe->sriov.pf.vfs[vfid].migration.trailer; |
| 49 | +} |
| 50 | + |
| 51 | +static struct xe_sriov_packet **pf_pick_read_packet(struct xe_device *xe, |
| 52 | + unsigned int vfid) |
| 53 | +{ |
| 54 | + struct xe_sriov_packet **data; |
| 55 | + |
| 56 | + data = pf_pick_descriptor(xe, vfid); |
| 57 | + if (*data) |
| 58 | + return data; |
| 59 | + |
| 60 | + data = pf_pick_pending(xe, vfid); |
| 61 | + if (!*data) |
| 62 | + *data = xe_sriov_pf_migration_save_consume(xe, vfid); |
| 63 | + if (*data) |
| 64 | + return data; |
| 65 | + |
| 66 | + data = pf_pick_trailer(xe, vfid); |
| 67 | + if (*data) |
| 68 | + return data; |
| 69 | + |
| 70 | + return NULL; |
| 71 | +} |
11 | 72 |
|
12 | 73 | static bool pkt_needs_bo(struct xe_sriov_packet *data) |
13 | 74 | { |
@@ -135,3 +196,235 @@ int xe_sriov_packet_init_from_hdr(struct xe_sriov_packet *data) |
135 | 196 |
|
136 | 197 | return pkt_init(data); |
137 | 198 | } |
| 199 | + |
| 200 | +static ssize_t pkt_hdr_read(struct xe_sriov_packet *data, |
| 201 | + char __user *buf, size_t len) |
| 202 | +{ |
| 203 | + loff_t offset = sizeof(data->hdr) - data->hdr_remaining; |
| 204 | + |
| 205 | + if (!data->hdr_remaining) |
| 206 | + return -EINVAL; |
| 207 | + |
| 208 | + if (len > data->hdr_remaining) |
| 209 | + len = data->hdr_remaining; |
| 210 | + |
| 211 | + if (copy_to_user(buf, (void *)&data->hdr + offset, len)) |
| 212 | + return -EFAULT; |
| 213 | + |
| 214 | + data->hdr_remaining -= len; |
| 215 | + |
| 216 | + return len; |
| 217 | +} |
| 218 | + |
| 219 | +static ssize_t pkt_data_read(struct xe_sriov_packet *data, |
| 220 | + char __user *buf, size_t len) |
| 221 | +{ |
| 222 | + if (len > data->remaining) |
| 223 | + len = data->remaining; |
| 224 | + |
| 225 | + if (copy_to_user(buf, data->vaddr + (data->hdr.size - data->remaining), len)) |
| 226 | + return -EFAULT; |
| 227 | + |
| 228 | + data->remaining -= len; |
| 229 | + |
| 230 | + return len; |
| 231 | +} |
| 232 | + |
| 233 | +static ssize_t pkt_read_single(struct xe_sriov_packet **data, |
| 234 | + unsigned int vfid, char __user *buf, size_t len) |
| 235 | +{ |
| 236 | + ssize_t copied = 0; |
| 237 | + |
| 238 | + if ((*data)->hdr_remaining) |
| 239 | + copied = pkt_hdr_read(*data, buf, len); |
| 240 | + else |
| 241 | + copied = pkt_data_read(*data, buf, len); |
| 242 | + |
| 243 | + if ((*data)->remaining == 0 && (*data)->hdr_remaining == 0) { |
| 244 | + xe_sriov_packet_free(*data); |
| 245 | + *data = NULL; |
| 246 | + } |
| 247 | + |
| 248 | + return copied; |
| 249 | +} |
| 250 | + |
| 251 | +/** |
| 252 | + * xe_sriov_packet_read_single() - Read migration data from a single packet. |
| 253 | + * @xe: the &xe_device |
| 254 | + * @vfid: the VF identifier |
| 255 | + * @buf: start address of userspace buffer |
| 256 | + * @len: requested read size from userspace |
| 257 | + * |
| 258 | + * Return: number of bytes that has been successfully read, |
| 259 | + * 0 if no more migration data is available, |
| 260 | + * -errno on failure. |
| 261 | + */ |
| 262 | +ssize_t xe_sriov_packet_read_single(struct xe_device *xe, unsigned int vfid, |
| 263 | + char __user *buf, size_t len) |
| 264 | +{ |
| 265 | + struct xe_sriov_packet **data = pf_pick_read_packet(xe, vfid); |
| 266 | + |
| 267 | + if (!data) |
| 268 | + return -ENODATA; |
| 269 | + if (IS_ERR(*data)) |
| 270 | + return PTR_ERR(*data); |
| 271 | + |
| 272 | + return pkt_read_single(data, vfid, buf, len); |
| 273 | +} |
| 274 | + |
| 275 | +static ssize_t pkt_hdr_write(struct xe_sriov_packet *data, |
| 276 | + const char __user *buf, size_t len) |
| 277 | +{ |
| 278 | + loff_t offset = sizeof(data->hdr) - data->hdr_remaining; |
| 279 | + int ret; |
| 280 | + |
| 281 | + if (len > data->hdr_remaining) |
| 282 | + len = data->hdr_remaining; |
| 283 | + |
| 284 | + if (copy_from_user((void *)&data->hdr + offset, buf, len)) |
| 285 | + return -EFAULT; |
| 286 | + |
| 287 | + data->hdr_remaining -= len; |
| 288 | + |
| 289 | + if (!data->hdr_remaining) { |
| 290 | + ret = xe_sriov_packet_init_from_hdr(data); |
| 291 | + if (ret) |
| 292 | + return ret; |
| 293 | + } |
| 294 | + |
| 295 | + return len; |
| 296 | +} |
| 297 | + |
| 298 | +static ssize_t pkt_data_write(struct xe_sriov_packet *data, |
| 299 | + const char __user *buf, size_t len) |
| 300 | +{ |
| 301 | + if (len > data->remaining) |
| 302 | + len = data->remaining; |
| 303 | + |
| 304 | + if (copy_from_user(data->vaddr + (data->hdr.size - data->remaining), buf, len)) |
| 305 | + return -EFAULT; |
| 306 | + |
| 307 | + data->remaining -= len; |
| 308 | + |
| 309 | + return len; |
| 310 | +} |
| 311 | + |
| 312 | +/** |
| 313 | + * xe_sriov_packet_write_single() - Write migration data to a single packet. |
| 314 | + * @xe: the &xe_device |
| 315 | + * @vfid: the VF identifier |
| 316 | + * @buf: start address of userspace buffer |
| 317 | + * @len: requested write size from userspace |
| 318 | + * |
| 319 | + * Return: number of bytes that has been successfully written, |
| 320 | + * -errno on failure. |
| 321 | + */ |
| 322 | +ssize_t xe_sriov_packet_write_single(struct xe_device *xe, unsigned int vfid, |
| 323 | + const char __user *buf, size_t len) |
| 324 | +{ |
| 325 | + struct xe_sriov_packet **data = pf_pick_pending(xe, vfid); |
| 326 | + int ret; |
| 327 | + ssize_t copied; |
| 328 | + |
| 329 | + if (IS_ERR_OR_NULL(*data)) { |
| 330 | + *data = xe_sriov_packet_alloc(xe); |
| 331 | + if (!*data) |
| 332 | + return -ENOMEM; |
| 333 | + } |
| 334 | + |
| 335 | + if ((*data)->hdr_remaining) |
| 336 | + copied = pkt_hdr_write(*data, buf, len); |
| 337 | + else |
| 338 | + copied = pkt_data_write(*data, buf, len); |
| 339 | + |
| 340 | + if ((*data)->hdr_remaining == 0 && (*data)->remaining == 0) { |
| 341 | + ret = xe_sriov_pf_migration_restore_produce(xe, vfid, *data); |
| 342 | + if (ret) { |
| 343 | + xe_sriov_packet_free(*data); |
| 344 | + return ret; |
| 345 | + } |
| 346 | + |
| 347 | + *data = NULL; |
| 348 | + } |
| 349 | + |
| 350 | + return copied; |
| 351 | +} |
| 352 | + |
| 353 | +#define MIGRATION_DESCRIPTOR_DWORDS 0 |
| 354 | +static size_t pf_descriptor_init(struct xe_device *xe, unsigned int vfid) |
| 355 | +{ |
| 356 | + struct xe_sriov_packet **desc = pf_pick_descriptor(xe, vfid); |
| 357 | + struct xe_sriov_packet *data; |
| 358 | + int ret; |
| 359 | + |
| 360 | + data = xe_sriov_packet_alloc(xe); |
| 361 | + if (!data) |
| 362 | + return -ENOMEM; |
| 363 | + |
| 364 | + ret = xe_sriov_packet_init(data, 0, 0, XE_SRIOV_PACKET_TYPE_DESCRIPTOR, |
| 365 | + 0, MIGRATION_DESCRIPTOR_DWORDS * sizeof(u32)); |
| 366 | + if (ret) { |
| 367 | + xe_sriov_packet_free(data); |
| 368 | + return ret; |
| 369 | + } |
| 370 | + |
| 371 | + *desc = data; |
| 372 | + |
| 373 | + return 0; |
| 374 | +} |
| 375 | + |
| 376 | +static void pf_pending_init(struct xe_device *xe, unsigned int vfid) |
| 377 | +{ |
| 378 | + struct xe_sriov_packet **data = pf_pick_pending(xe, vfid); |
| 379 | + |
| 380 | + *data = NULL; |
| 381 | +} |
| 382 | + |
| 383 | +#define MIGRATION_TRAILER_SIZE 0 |
| 384 | +static int pf_trailer_init(struct xe_device *xe, unsigned int vfid) |
| 385 | +{ |
| 386 | + struct xe_sriov_packet **trailer = pf_pick_trailer(xe, vfid); |
| 387 | + struct xe_sriov_packet *data; |
| 388 | + int ret; |
| 389 | + |
| 390 | + data = xe_sriov_packet_alloc(xe); |
| 391 | + if (!data) |
| 392 | + return -ENOMEM; |
| 393 | + |
| 394 | + ret = xe_sriov_packet_init(data, 0, 0, XE_SRIOV_PACKET_TYPE_TRAILER, |
| 395 | + 0, MIGRATION_TRAILER_SIZE); |
| 396 | + if (ret) { |
| 397 | + xe_sriov_packet_free(data); |
| 398 | + return ret; |
| 399 | + } |
| 400 | + |
| 401 | + *trailer = data; |
| 402 | + |
| 403 | + return 0; |
| 404 | +} |
| 405 | + |
| 406 | +/** |
| 407 | + * xe_sriov_packet_save_init() - Initialize the pending save migration packets. |
| 408 | + * @xe: the &xe_device |
| 409 | + * @vfid: the VF identifier |
| 410 | + * |
| 411 | + * Return: 0 on success, -errno on failure. |
| 412 | + */ |
| 413 | +int xe_sriov_packet_save_init(struct xe_device *xe, unsigned int vfid) |
| 414 | +{ |
| 415 | + int ret; |
| 416 | + |
| 417 | + scoped_cond_guard(mutex_intr, return -EINTR, pf_migration_mutex(xe, vfid)) { |
| 418 | + ret = pf_descriptor_init(xe, vfid); |
| 419 | + if (ret) |
| 420 | + return ret; |
| 421 | + |
| 422 | + ret = pf_trailer_init(xe, vfid); |
| 423 | + if (ret) |
| 424 | + return ret; |
| 425 | + |
| 426 | + pf_pending_init(xe, vfid); |
| 427 | + } |
| 428 | + |
| 429 | + return 0; |
| 430 | +} |
0 commit comments