Motr  M0
crate_io.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
83 #include <stdlib.h>
84 #include <stdio.h>
85 #include <fcntl.h>
86 #include <sys/types.h>
87 #include <sys/time.h>
88 #include <assert.h>
89 #include <stdarg.h>
90 #include <unistd.h>
91 
92 #include "lib/finject.h"
93 #include "lib/trace.h"
94 #include "motr/client.h"
95 #include "motr/client_internal.h"
96 #include "motr/idx.h"
97 
98 #include "motr/m0crate/logger.h"
99 #include "motr/m0crate/workload.h"
102 
103 extern struct crate_conf *conf;
104 
105 void integrity(struct m0_uint128 object_id, unsigned char **md5toverify,
106  int block_count, int idx_op);
107 void list_index_return(struct workload *w);
108 
120 };
121 
122 typedef int (*cr_operation_t)(struct m0_workload_io *cwi,
123  struct m0_task_io *cti,
124  struct m0_op_context *op_ctx,
125  struct m0_obj *obj,
126  int free_slot,
127  int obj_idx,
128  int op_index);
129 
135 static size_t cr_rand___range_l(size_t end)
136 {
137  size_t val_h;
138  size_t val_l;
139  size_t res;
140 
141  val_l = rand();
142  val_h = rand();
143  res = (val_h << 32) | val_l;
144  return res % end;
145 }
146 
148 {
149  *t1 = m0_time_add(*t1, t2);
150 }
151 
152 void cr_op_stable(struct m0_op *op)
153 {
154  struct m0_op_context *op_context;
155  struct m0_task_io *cti;
156  m0_time_t op_time;
157 
158  M0_PRE(op != NULL);
159  op_context = op->op_datum;
160  if (op_context != NULL) {
161  cti = op_context->coc_task;
162 
163  op_context->coc_op_finish = m0_time_now();
164  cti->cti_op_status[op_context->coc_index] = CR_OP_COMPLETE;
165  cti->cti_nr_ops_done++;
166  op_time = m0_time_sub(op_context->coc_op_finish,
167  op_context->coc_op_launch);
168  cr_time_acc(&cti->cti_op_acc_time, op_time);
170  op_context->coc_buf_vec = NULL;
171  }
172 }
173 
174 void cr_op_failed(struct m0_op *op)
175 {
176  int op_idx;
177  struct m0_op_context *op_context;
178 
179  M0_PRE(op != NULL);
180  op_context = op->op_datum;
181 
182  cr_log(CLL_DEBUG, "Operation is failed:%d", op->op_sm.sm_rc);
183  if (op_context != NULL) {
184  op_idx = op_context->coc_index;
185  op_context->coc_op_finish = m0_time_now();
186  op_context->coc_task->cti_op_status[op_idx] = CR_OP_COMPLETE;
188  }
189 }
190 
191 static void cti_cleanup_op(struct m0_task_io *cti, int i)
192 {
193  struct m0_op *op = cti->cti_ops[i];
194  struct m0_op_context *op_ctx = op->op_datum;
195 
196  /*
197  * Only capture the op_rc when it is non-zero, otherwise a 0
198  * op_rc could overwrite an error captured into cti->cti_op_rc[i]
199  * if the operation was re-used which previously had failed */
200  if (op->op_rc != 0)
201  cti->cti_op_rcs[i] = op->op_rc;
202  m0_op_fini(op);
203  m0_op_free(op);
204  cti->cti_ops[i] = NULL;
205  if (op_ctx->coc_op_code == CR_WRITE ||
206  op_ctx->coc_op_code == CR_READ) {
207  m0_bufvec_free(op_ctx->coc_buf_vec);
208  m0_bufvec_free(op_ctx->coc_attr);
210  m0_free(op_ctx->coc_buf_vec);
211  m0_free(op_ctx->coc_attr);
212  m0_free(op_ctx->coc_index_vec);
213  }
214  m0_free(op_ctx);
215  cti->cti_op_status[i] = CR_OP_NEW;
216 }
217 
218 int cr_free_op_idx(struct m0_task_io *cti, uint32_t nr_ops)
219 {
220  int i;
221 
222  for (i = 0; i < nr_ops; i++) {
223  if (cti->cti_op_status[i] == CR_OP_NEW ||
224  cti->cti_op_status[i] == CR_OP_COMPLETE)
225  break;
226  }
227 
228  M0_ASSERT(i < nr_ops);
229 
230  if (cti->cti_op_status[i] == CR_OP_COMPLETE)
231  cti_cleanup_op(cti, i);
232 
233  return i;
234 }
235 
236 static void cr_cti_cleanup(struct m0_task_io *cti, int nr_ops)
237 {
238  int i;
239 
240  for (i = 0; i < nr_ops; i++)
241  if (cti->cti_op_status[i] == CR_OP_COMPLETE)
242  cti_cleanup_op(cti, i);
243 }
244 
246  struct m0_task_io *cti,
247  struct m0_op_context *op_ctx,
248  int obj_idx,
249  int op_index)
250 {
251  int i;
252  int rc;
253  uint64_t bitmap_index;
254  uint64_t nr_segments;
255  uint64_t start_offset;
256  uint64_t op_start_offset = 0;
257  size_t rand_offset;
258  uint64_t io_size = cwi->cwi_io_size;
259  uint64_t offset;
260  struct m0_bufvec *buf_vec = NULL;
261  struct m0_bufvec *attr = NULL;
262  struct m0_indexvec *index_vec = NULL;
263  struct m0_bitmap segment_indices;
264 
265  index_vec = m0_alloc(sizeof *index_vec);
266  attr = m0_alloc(sizeof *attr);
267  if (index_vec == NULL || attr == NULL)
268  goto enomem;
269 
270  rc = m0_indexvec_alloc(index_vec, cwi->cwi_bcount_per_op) ?:
272  if (rc != 0)
273  goto enomem;
274  /*
275  * When io_size is not a multiple of cwi_bs then bitmap_index can
276  * become equal to io_size/cwi->cwi_bs, hence '+1' to the size
277  * of bitmap.
278  */
279  nr_segments = io_size/cwi->cwi_bs + 1;
280  rc = m0_bitmap_init(&segment_indices, nr_segments);
281  if (rc != 0)
282  goto enomem;
283 
284  if (!cwi->cwi_random_io)
285  op_start_offset = op_index * cwi->cwi_bs *
286  cwi->cwi_bcount_per_op;
287 
288  if (op_ctx->coc_op_code == CR_READ)
289  buf_vec = &cti->cti_rd_bufvec[obj_idx];
290  else
291  buf_vec = &cti->cti_bufvec[obj_idx];
292 
293  M0_ASSERT(buf_vec != NULL);
294  M0_ASSERT(nr_segments > cwi->cwi_bcount_per_op);
295  for (i = 0; i < cwi->cwi_bcount_per_op; i ++) {
296  if (cwi->cwi_random_io) {
297  do {
298  /* Generate the random offset. */
299  rand_offset = cr_rand___range_l(io_size);
300  /*
301  * m0_round_down() would prevent partially
302  * overlapping indexvec segments.
303  */
304  offset = m0_round_down(rand_offset,
305  cwi->cwi_bs);
306  bitmap_index = offset / cwi->cwi_bs;
307  } while (m0_bitmap_get(&segment_indices, bitmap_index));
308 
309  m0_bitmap_set(&segment_indices, bitmap_index, true);
310  } else
311  offset = op_start_offset + cwi->cwi_bs * i;
312 
313  /* If writing on shared object, start from the alloted range. */
314  if (cwi->cwi_share_object) {
315  start_offset = cti->cti_task_idx * io_size;
316  index_vec->iv_index[i] = start_offset + offset;
317  } else
318  index_vec->iv_index[i] = offset;
319 
320  index_vec->iv_vec.v_count[i] = cwi->cwi_bs;
321  }
322 
323  m0_bitmap_fini(&segment_indices);
324  op_ctx->coc_buf_vec = buf_vec;
325  op_ctx->coc_index_vec = index_vec;
327  op_ctx->coc_attr = NULL;
328 
329  return 0;
330 enomem:
331  if (index_vec != NULL)
332  m0_indexvec_free(index_vec);
333  m0_bufvec_free(buf_vec);
334  m0_free(attr);
335  m0_free(index_vec);
336  m0_free(buf_vec);
337  return -ENOMEM;
338 }
339 
340 static struct m0_fid * check_fid(struct m0_fid *id)
341 {
342  if (m0_fid_is_set(id) && m0_fid_is_valid(id))
343  return id;
344  else
345  return NULL;
346 }
347 
349  struct m0_task_io *cti,
350  struct m0_op_context *op_ctx,
351  struct m0_obj *obj,
352  int free_slot,
353  int obj_idx,
354  int op_index)
355 {
356  return m0_entity_create(check_fid(&cwi->cwi_pool_id),
357  &obj->ob_entity, &cti->cti_ops[free_slot]);
358 }
359 
365  struct m0_task_io *cti,
366  struct m0_op_context *op_ctx,
367  struct m0_obj *obj,
368  int free_slot,
369  int obj_idx,
370  int op_index)
371 {
372  const struct m0_uint128 *id = &cti->cti_ids[op_index];
373  M0_PRE(obj != NULL);
374  M0_SET0(obj);
376  return m0_entity_open(&obj->ob_entity, &cti->cti_ops[free_slot]);
377 }
378 
379 
381  struct m0_task_io *cti,
382  struct m0_op_context *op_ctx,
383  struct m0_obj *obj,
384  int free_slot,
385  int obj_idx,
386  int op_index)
387 {
388  return m0_entity_delete(&obj->ob_entity,
389  &cti->cti_ops[free_slot]);
390 }
391 
392 int cr_io_write(struct m0_workload_io *cwi,
393  struct m0_task_io *cti,
394  struct m0_op_context *op_ctx,
395  struct m0_obj *obj,
396  int free_slot,
397  int obj_idx,
398  int op_index)
399 {
400  int rc;
401 
402  op_ctx->coc_op_code = CR_WRITE;
404  rc = cr_io_vector_prep(cwi, cti, op_ctx, obj_idx, op_index);
405  if (rc != 0)
406  return rc;
408  op_ctx->coc_index_vec, op_ctx->coc_buf_vec,
409  op_ctx->coc_attr, 0, 0, &cti->cti_ops[free_slot]);
410  if (rc != 0)
411  M0_ERR(rc);
412  return rc;
413 }
414 
415 int cr_io_read(struct m0_workload_io *cwi,
416  struct m0_task_io *cti,
417  struct m0_op_context *op_ctx,
418  struct m0_obj *obj,
419  int free_slot,
420  int obj_idx,
421  int op_index)
422 {
423  int rc;
424 
425  op_ctx->coc_op_code = CR_READ;
427  rc = cr_io_vector_prep(cwi, cti, op_ctx, obj_idx, op_index);
428  if (rc != 0)
429  return rc;
431  op_ctx->coc_index_vec, op_ctx->coc_buf_vec,
432  op_ctx->coc_attr, 0, 0, &cti->cti_ops[free_slot]);
433  if (rc != 0)
434  M0_ERR(rc);
435  return rc;
436 }
437 
441  [CR_WRITE] = cr_io_write,
442  [CR_READ] = cr_io_read,
444 };
445 
446 int cr_execute_ops(struct m0_workload_io *cwi, struct m0_task_io *cti,
447  struct m0_obj *obj, struct m0_op_ops *cbs,
448  enum m0_operations op_code, int obj_idx)
449 {
450  int rc = 0;
451  int i;
452  int idx;
453  struct m0_op_context *op_ctx;
454  cr_operation_t spec_op;
455 
456  for (i = 0; i < cti->cti_nr_ops; i++) {
458  /* We can launch at least one more operation. */
459  idx = cr_free_op_idx(cti, cwi->cwi_max_nr_ops);
460  op_ctx = m0_alloc(sizeof *op_ctx);
461  M0_ASSERT(op_ctx != NULL);
462 
463  op_ctx->coc_index = idx;
464  op_ctx->coc_obj_index = obj_idx;
465  op_ctx->coc_task = cti;
466  op_ctx->coc_cwi = cwi;
467  op_ctx->coc_op_code = op_code;
468 
469  spec_op = opcode_operation_map[op_code];
470  rc = spec_op(cwi, cti, op_ctx, obj, idx, obj_idx, i);
471  if (rc != 0)
472  break;
473 
474  M0_ASSERT(cti->cti_ops[idx] != NULL);
475  cti->cti_ops[idx]->op_datum = op_ctx;
476  m0_op_setup(cti->cti_ops[idx], cbs, 0);
477  cti->cti_op_status[idx] = CR_OP_EXECUTING;
478  op_ctx->coc_op_launch = m0_time_now();
479  m0_op_launch(&cti->cti_ops[idx], 1);
480  }
481  return rc;
482 }
483 
484 void cr_cti_report(struct m0_task_io *cti, enum m0_operations op_code)
485 {
486  struct m0_workload_io *cwi = cti->cti_cwi;
487 
489  cr_time_acc(&cwi->cwi_g.cg_cwi_acc_time[op_code], cti->cti_op_acc_time);
490  cwi->cwi_ops_done[op_code] += cti->cti_nr_ops_done;
492 
493  cti->cti_op_acc_time = 0;
494  cti->cti_nr_ops_done = 0;
495 }
496 
497 int cr_op_namei(struct m0_workload_io *cwi, struct m0_task_io *cti,
498  enum m0_operations op_code)
499 {
500  int i;
501  int idx;
502  m0_time_t stime;
503  m0_time_t etime;
504  struct m0_op_context *op_ctx;
505  struct m0_op_ops *cbs;
506  cr_operation_t spec_op;
507  int rc = 0;
508 
509  cbs = m0_alloc(sizeof *cbs);
510  M0_ASSERT(cbs != NULL);
511  cbs->oop_executed = NULL;
512  cbs->oop_stable = cr_op_stable;
513  cbs->oop_failed = cr_op_failed;
514 
515  cr_log(CLL_TRACE, TIME_F" t%02d: %s objects...\n",
517  op_code == CR_CREATE ? "Creating" :
518  op_code == CR_OPEN ? "Opening" : "Deleting");
520  stime = m0_time_now();
521 
522  for (i = 0; i < cwi->cwi_nr_objs; i++) {
524  /* We can launch at least one more operation. */
525  idx = cr_free_op_idx(cti, cwi->cwi_max_nr_ops);
526  op_ctx = m0_alloc(sizeof *op_ctx);
527  M0_ASSERT(op_ctx != NULL);
528 
529  op_ctx->coc_index = idx;
530  op_ctx->coc_task = cti;
531  op_ctx->coc_op_code = op_code;
532  spec_op = opcode_operation_map[op_code];
533  spec_op(cwi, cti, op_ctx, &cti->cti_objs[i], idx, 0, i);
534 
535  cti->cti_ops[idx]->op_datum = op_ctx;
536  m0_op_setup(cti->cti_ops[idx], cbs, 0);
537  cti->cti_op_status[idx] = CR_OP_EXECUTING;
538  op_ctx->coc_op_launch = m0_time_now();
539  m0_op_launch(&cti->cti_ops[idx], 1);
540  }
541  /* Task is done. Wait for all operations to complete. */
542  for (i = 0; i < cwi->cwi_max_nr_ops; i++)
544 
545  etime = m0_time_sub(m0_time_now(), stime);
547  if (etime > cwi->cwi_time[op_code])
548  cwi->cwi_time[op_code] = etime;
550  cr_cti_report(cti, op_code);
551  cr_cti_cleanup(cti, cwi->cwi_max_nr_ops);
553  m0_free(cbs);
554 
555  /*
556  * check for op_rc of all the client operations.
557  * If any of the client operation is failed, then set
558  * rc to cti->cti_op_rcs[i].
559  **/
560  for (i=0; i < cwi->cwi_max_nr_ops; i++) {
561  if (cti->cti_op_rcs[i] != 0) {
562  rc = cti->cti_op_rcs[i];
563  break;
564  }
565  }
566 
567  /*
568  * Create - creates an object and close it.
569  * Open *should* be called before any further operation.
570  * Open call would initialise client object, object would
571  * remain opened till all IO operation are complete.
572  */
573  if (op_code == CR_CREATE && cti->cti_objs != NULL)
575  for (i = 0; i < cti->cti_cwi->cwi_nr_objs; i++)
576  m0_obj_fini(&cti->cti_objs[i]);
577  cr_log(CLL_TRACE, TIME_F" t%02d: %s done.\n",
579  op_code == CR_CREATE ? "Creation" :
580  op_code == CR_OPEN ? "Open" : "Deletion");
581 
582  return rc;
583 }
584 
585 int cr_op_io(struct m0_workload_io *cwi, struct m0_task_io *cti,
586  enum m0_operations op_code)
587 {
588  int rc = 0;
589  int i;
590  m0_time_t stime;
591  m0_time_t etime;
592  struct m0_op_ops *cbs;
593 
594  M0_ALLOC_PTR(cbs);
595  if (cbs == NULL) {
596  m0_free(cbs);
597  return -ENOMEM;
598  }
599 
600  cbs->oop_executed = NULL;
601  cbs->oop_stable = cr_op_stable;
602  cbs->oop_failed = cr_op_failed;
603  cr_log(CLL_TRACE, TIME_F" t%02d: %s objects...\n",
605  op_code == CR_WRITE ? "Writing" : "Reading");
607  stime = m0_time_now();
608 
609  for (i = 0; i < cwi->cwi_nr_objs; i++) {
610  rc = cr_execute_ops(cwi, cti, &cti->cti_objs[i], cbs, op_code,
611  i);
612  if (rc != 0)
613  break;
614 
615  }
616  /* Wait for all operations to complete. */
617  for (i = 0; i < cwi->cwi_max_nr_ops; i++)
619 
620  etime = m0_time_sub(m0_time_now(), stime);
622  if (etime > cwi->cwi_time[op_code])
623  cwi->cwi_time[op_code] = etime;
625  cr_cti_report(cti, op_code);
626  cr_cti_cleanup(cti, cwi->cwi_max_nr_ops);
628  m0_free(cbs);
629  cr_log(CLL_TRACE, TIME_F" t%02d: %s done.\n",
631  op_code == CR_WRITE ? "Write" : "Read");
632 
633  return rc;
634 }
635 
641 {
642  int rc;
643  struct m0_workload_io *cwi;
644 
645  cwi = cti->cti_cwi;
646 
648  M0_ASSERT(cwi->cwi_opcode == CR_WRITE);
650  if (!cwi->cwi_g.cg_created) {
651  rc = cr_op_namei(cti->cti_cwi, cti, CR_CREATE);
652  if (rc != 0) {
654  return rc;
655  }
656  cwi->cwi_g.cg_created = true;
657  }
659 
660  cr_op_io(cti->cti_cwi, cti, cwi->cwi_opcode);
661 
663  cwi->cwi_g.cg_nr_tasks--;
664  if (cwi->cwi_g.cg_nr_tasks == 0) {
665  cr_op_namei(cti->cti_cwi, cti, CR_DELETE);
666  cwi->cwi_g.cg_created = false;
667  }
669 
670  return 0;
671 }
672 
673 int cr_task_execute(struct m0_task_io *cti)
674 {
675  struct m0_workload_io *cwi = cti->cti_cwi;
676  int rc = 0;
682  switch (cwi->cwi_opcode) {
683  case CR_CREATE:
684  cr_op_namei(cwi, cti, CR_CREATE);
685  rc = cr_op_namei(cwi, cti, CR_OPEN);
686  if (rc == 0)
687  cr_op_namei(cwi, cti, CR_DELETE);
688  break;
689  case CR_OPEN:
690  cr_op_namei(cwi, cti, CR_CREATE);
691  rc = cr_op_namei(cwi, cti, CR_OPEN);
692  if (rc == 0)
693  cr_op_namei(cwi, cti, CR_DELETE);
694  break;
695  case CR_WRITE:
696  cr_op_namei(cwi, cti, CR_CREATE);
697  rc = cr_op_namei(cwi, cti, CR_OPEN);
698  if (rc == 0) {
699  cr_op_io(cwi, cti, CR_WRITE);
700  cr_op_namei(cwi, cti, CR_DELETE);
701  }
702  break;
703  case CR_READ:
704  cr_op_namei(cwi, cti, CR_CREATE);
705  rc = cr_op_namei(cwi, cti, CR_OPEN);
706  if (rc == 0) {
707  cr_op_io(cwi, cti, CR_WRITE);
708  cr_op_io(cwi, cti, CR_READ);
709  cr_op_namei(cwi, cti, CR_DELETE);
710  }
711  break;
712  case CR_DELETE:
713  cr_op_namei(cwi, cti, CR_CREATE);
714  rc = cr_op_namei(cwi, cti, CR_OPEN);
715  if (rc == 0)
716  cr_op_namei(cwi, cti, CR_DELETE);
717  break;
718  case CR_POPULATE:
719  cr_op_namei(cwi, cti, CR_CREATE);
720  rc = cr_op_namei(cwi, cti, CR_OPEN);
721  if (rc == 0)
722  rc = cr_op_io(cwi, cti, CR_WRITE);
723  break;
724  case CR_CLEANUP:
725  rc = cr_op_namei(cwi, cti, CR_OPEN);
726  if (rc == 0)
727  cr_op_namei(cwi, cti, CR_DELETE);
728  break;
729  case CR_READ_ONLY:
730  rc = cr_op_namei(cwi, cti, CR_OPEN);
731  if (rc == 0) {
732  cr_op_io(cwi, cti, CR_READ);
733  }
734  break;
735  }
736  return rc;
737 }
738 
739 
740 static int cr_adopt_motr_thread(struct m0_task_io *cti)
741 {
742  int rc = 0;
743  struct m0_thread *mthread;
744  cti->cti_mthread = NULL;
745  if (m0_thread_tls() == NULL) {
746  mthread = m0_alloc(sizeof(struct m0_thread));
747  if (mthread == NULL)
748  return -ENOMEM;
749 
750  memset(mthread, 0, sizeof(struct m0_thread));
751  rc = m0_thread_adopt(mthread, m0_instance->m0c_motr);
752  cti->cti_mthread = mthread;
753  }
754  return rc;
755 }
756 
757 static int cr_release_motr_thread(struct m0_task_io *cti)
758 {
759  if (cti->cti_mthread) {
760  m0_thread_shun();
761  m0_free(cti->cti_mthread);
762  }
763  return 0;
764 }
765 
766 int cr_buffer_read(char *buffer, const char *filename, uint64_t size)
767 {
768  FILE *fp;
769  size_t bytes;
770 
771  fp = fopen(filename, "r");
772  if (fp == NULL) {
773  cr_log(CLL_ERROR, "Unable to open a file: %s\n", filename);
774  return -errno;
775  }
776 
777  bytes = fread(buffer, 1, size, fp);
778  if (bytes < size) {
779  fclose(fp);
780  return -EINVAL;
781  }
782  fclose(fp);
783  return 0;
784 }
785 
786 static uint64_t nz_rand(void)
787 {
788  uint64_t r;
789 
790  do {
791  r = ((uint64_t)rand() << 32) | rand();
792  } while (r == 0);
793 
794  return r;
795 }
796 
797 void cr_get_oids(struct m0_uint128 *ids, uint32_t nr_objs)
798 {
799  int i;
800  for (i = 0; i < nr_objs; i++) {
801  ids[i].u_lo = nz_rand();
802  ids[i].u_hi = nz_rand();
803  /* Highest 8 bits are left for Motr. */
804  ids[i].u_hi = ids[i].u_hi & ~(0xFFUL << 56);
805  cr_log(CLL_TRACE, "oid %016" PRIx64 ":%016" PRIx64 "\n",
806  ids[i].u_hi, ids[i].u_lo);
807  }
808 }
809 
810 void cr_task_bufs_free(struct m0_task_io *cti, int idx)
811 {
813  M0_DEFAULT_BUF_SHIFT);
815  M0_DEFAULT_BUF_SHIFT);
816 }
817 
818 void cr_task_io_cleanup(struct m0_task_io **cti_p)
819 {
820  int i;
821  struct m0_task_io *cti = *cti_p;
822  struct m0_workload_io *cwi = cti->cti_cwi;
823 
824  if (cti->cti_objs != NULL)
825  for (i = 0; i < cti->cti_cwi->cwi_nr_objs; i++) {
826  m0_obj_fini(&cti->cti_objs[i]);
827  if (cwi->cwi_opcode != CR_CLEANUP)
828  cr_task_bufs_free(cti, i);
829  }
830  m0_free(cti->cti_objs);
831  m0_free(cti->cti_ops);
832  m0_free(cti->cti_bufvec);
833  m0_free(cti->cti_rd_bufvec);
834  m0_free(cti->cti_op_status);
835  m0_free(cti->cti_op_rcs);
836  m0_free0(cti_p);
837 }
838 
840  struct m0_task_io *cti)
841 {
842  int i;
843  int k;
844  int rc;
845 
846  M0_ALLOC_ARR(cti->cti_bufvec, cwi->cwi_nr_objs);
848 
849  for (i = 0; i < cwi->cwi_nr_objs; i++) {
851  cwi->cwi_bcount_per_op,
852  cwi->cwi_bs,
853  M0_DEFAULT_BUF_SHIFT) ?:
855  cwi->cwi_bcount_per_op,
856  cwi->cwi_bs,
857  M0_DEFAULT_BUF_SHIFT);
858  if (rc != 0)
859  return rc;
860  for (k = 0; k < cwi->cwi_bcount_per_op; k++) {
861  rc = cr_buffer_read(cti->cti_bufvec[i].ov_buf[k],
862  cwi->cwi_filename, cwi->cwi_bs);
863  if (rc != 0)
864  return rc;
865  }
866  }
867  return 0;
868 }
869 
871  struct m0_task_io **cti_out)
872 {
873  int rc;
874  int i;
875  struct m0_task_io *cti;
876 
877  if (M0_ALLOC_PTR(*cti_out) == NULL)
878  return -ENOMEM;
879  cti = *cti_out;
880 
881  cti->cti_cwi = cwi;
882  cti->cti_progress = 0;
883 
884  if (cwi->cwi_opcode != CR_CLEANUP) {
885  cti->cti_nr_ops = (cwi->cwi_io_size /
886  (cwi->cwi_bs * cwi->cwi_bcount_per_op)) ?: 1;
887  rc = cr_task_prep_bufs(cwi, cti);
888  if (rc != 0)
889  goto error_rc;
890  }
891 
892  M0_ALLOC_ARR(cti->cti_ids, cwi->cwi_nr_objs);
893  if (cti->cti_ids == NULL)
894  goto enomem;
895 
896  if (cwi->cwi_share_object) {
897  cti->cti_ids[0] = cwi->cwi_g.cg_oid;
898  } else if (M0_IN(cwi->cwi_opcode, (CR_POPULATE, CR_CLEANUP,
899  CR_READ_ONLY))) {
900  int i;
901  for (i = 0; i< cwi->cwi_nr_objs; i++) {
902  cwi->cwi_start_obj_id.u_lo++;
903  cti->cti_ids[i] = cwi->cwi_start_obj_id;
904  }
905  } else {
906  cti->cti_start_offset = 0;
907  cr_get_oids(cti->cti_ids, cwi->cwi_nr_objs);
908  }
909 
910  M0_ALLOC_ARR(cti->cti_objs, cwi->cwi_nr_objs);
911  if (cti->cti_objs == NULL)
912  goto enomem;
913 
914  for (i = 0; i < cwi->cwi_nr_objs; i++)
915  m0_obj_init(&cti->cti_objs[i],
917  &cti->cti_ids[i], cwi->cwi_layout_id);
918 
919  M0_ALLOC_ARR(cti->cti_ops, cwi->cwi_max_nr_ops);
920  if (cti->cti_ops == NULL)
921  goto enomem;
922 
924  if (cti->cti_op_status == NULL)
925  goto enomem;
926 
928  if (cti->cti_op_rcs == NULL)
929  goto enomem;
930 
931  return 0;
932 enomem:
933  rc = -ENOMEM;
934 error_rc:
935  cr_task_io_cleanup(cti_out);
936  return rc;
937 }
938 
940 {
941  int i;
942  int rc;
943  uint32_t nr_tasks;
944  struct m0_workload_io *cwi = w->u.cw_io;
945  struct m0_task_io **cti;
946 
947  nr_tasks = w->cw_nr_thread;
948  if (cwi->cwi_opcode == CR_CLEANUP)
949  cwi->cwi_share_object = false;
950 
951  if (cwi->cwi_share_object) {
952  /* Generate only one id */
953  cwi->cwi_nr_objs = 1;
954  cr_get_oids(&cwi->cwi_g.cg_oid, 1);
955  cwi->cwi_g.cg_nr_tasks = nr_tasks;
956  cwi->cwi_g.cg_created = false;
957  }
958 
959  for (i = 0; i < nr_tasks; i++) {
960  cti = (struct m0_task_io **)&tasks[i].u.m0_task;
961  rc = cr_task_prep_one(cwi, cti);
962  if (rc != 0) {
963  cti = NULL;
964  return rc;
965  }
966  M0_ASSERT(*cti != NULL);
967 
968  (*cti)->cti_task_idx = i;
969  }
970  return 0;
971 }
972 
974 {
975  int i;
976  uint32_t nr_tasks;
977  struct m0_task_io *cti;
978 
979  nr_tasks = w->cw_nr_thread;
980 
981  for (i = 0; i < nr_tasks; i++) {
982  cti = tasks[i].u.m0_task;
983  if (cti != NULL)
984  cr_task_io_cleanup(&cti);
985  }
986 
987  return 0;
988 }
989 
991 {
992  struct m0_workload_io *cwi;
994 
995  cwi = w->u.cw_io;
996  time_now = m0_time_now();
997 
998  if (cwi->cwi_execution_time == M0_TIME_NEVER)
999  return true;
1000  return m0_time_sub(time_now, cwi->cwi_start_time) <
1001  cwi->cwi_execution_time ? true : false;
1002 }
1003 
1005 static uint64_t bw(uint64_t bytes, m0_time_t time)
1006 {
1007  return bytes * M0_TIME_ONE_MSEC / (time / 1000);
1008 }
1009 
1010 void run(struct workload *w, struct workload_task *tasks)
1011 {
1012  int i;
1013  uint64_t written;
1014  uint64_t read;
1015  int rc;
1016  struct m0_workload_io *cwi = w->u.cw_io;
1017  struct m0_uint128 start_obj_id;
1018 
1019  start_obj_id = cwi->cwi_start_obj_id;
1020  m0_mutex_init(&cwi->cwi_g.cg_mutex);
1021  cwi->cwi_start_time = m0_time_now();
1022  if (M0_IN(cwi->cwi_opcode, (CR_POPULATE, CR_CLEANUP)) &&
1024  cwi->cwi_start_obj_id = M0_ID_APP;
1025  for (i = 0; i < cwi->cwi_rounds && cr_time_not_expired(w); i++) {
1026  cr_log(CLL_INFO, "cwi->cwi_rounds : %d, iteration : %d\n",
1027  cwi->cwi_rounds, i);
1028  rc = cr_tasks_prepare(w, tasks);
1029  if (rc != 0) {
1030  cr_tasks_release(w, tasks);
1031  m0_mutex_fini(&cwi->cwi_g.cg_mutex);
1032  cr_log(CLL_ERROR, "Task preparation failed.\n");
1033  return;
1034  }
1035  workload_start(w, tasks);
1036  workload_join(w, tasks);
1037  cr_tasks_release(w, tasks);
1038 
1039  /*
1040  * When cwi->cwi_rounds > 1 then we need to re-set starting
1041  * object id to original one, so the read only operation can
1042  * start reading the populated data from that object index.
1043  **/
1044  if (cwi->cwi_opcode == CR_READ_ONLY) {
1045  cwi->cwi_start_obj_id = start_obj_id;
1046  }
1047  }
1048 
1049  m0_mutex_fini(&cwi->cwi_g.cg_mutex);
1050  cwi->cwi_finish_time = m0_time_now();
1051 
1052  cr_log(CLL_INFO, "I/O workload is finished.\n");
1053  cr_log(CLL_INFO, "Total: time="TIME_F" objs=%d ops=%" PRIu64 "\n",
1055  cwi->cwi_nr_objs * w->cw_nr_thread,
1056  cwi->cwi_ops_done[CR_WRITE] + cwi->cwi_ops_done[CR_READ]);
1057  if (cwi->cwi_ops_done[CR_CREATE] != 0)
1058  cr_log(CLL_INFO, "C: "TIME_F" ("TIME_F" per op)\n",
1059  TIME_P(cwi->cwi_time[CR_CREATE]),
1061  cwi->cwi_ops_done[CR_CREATE]));
1062  if (cwi->cwi_ops_done[CR_OPEN] != 0)
1063  cr_log(CLL_INFO, "O: "TIME_F" ("TIME_F" per op)\n",
1064  TIME_P(cwi->cwi_time[CR_OPEN]),
1066  cwi->cwi_ops_done[CR_OPEN]));
1067  if (cwi->cwi_ops_done[CR_DELETE] != 0)
1068  cr_log(CLL_INFO, "D: "TIME_F" ("TIME_F" per op)\n",
1069  TIME_P(cwi->cwi_time[CR_DELETE]),
1071  cwi->cwi_ops_done[CR_DELETE]));
1072  if (cwi->cwi_ops_done[CR_WRITE] == 0)
1073  return;
1074  written = cwi->cwi_bs * cwi->cwi_bcount_per_op *
1075  cwi->cwi_ops_done[CR_WRITE];
1076  cr_log(CLL_INFO, "W: "TIME_F" ("TIME_F" per op), "
1077  "%" PRIu64 " KiB, %" PRIu64 " KiB/s\n",
1078  TIME_P(cwi->cwi_time[CR_WRITE]),
1080  cwi->cwi_ops_done[CR_WRITE]), written/1024,
1081  bw(written, cwi->cwi_time[CR_WRITE]) /1024);
1082  if (cwi->cwi_ops_done[CR_READ] == 0)
1083  return;
1084  read = cwi->cwi_bs * cwi->cwi_bcount_per_op *
1085  cwi->cwi_ops_done[CR_READ];
1086  cr_log(CLL_INFO, "R: "TIME_F" ("TIME_F" per op), "
1087  "%" PRIu64 " KiB, %" PRIu64 " KiB/s\n",
1088  TIME_P(cwi->cwi_time[CR_READ]),
1090  cwi->cwi_ops_done[CR_READ]), read/1024,
1091  bw(read, cwi->cwi_time[CR_READ]) /1024);
1092 }
1093 
1094 void m0_op_run(struct workload *w, struct workload_task *task,
1095  const struct workload_op *op)
1096 {
1097  struct m0_task_io *cti = task->u.m0_task;
1098  int rc;
1099 
1101  if (cti == NULL)
1102  return;
1103 
1104  rc = cr_adopt_motr_thread(cti);
1105  if (rc < 0)
1106  cr_log(CLL_ERROR, "Motr adoption failed with rc=%d", rc);
1107 
1108  if (cti->cti_cwi->cwi_share_object)
1109  cr_task_share_execute(cti);
1110  else {
1111  rc = cr_task_execute(cti);
1112  if (rc < 0)
1113  cr_log(CLL_ERROR, "task execution failed with rc=%d", rc);
1114  }
1116 }
1117 
1118 /*
1119  * Local variables:
1120  * c-indentation-style: "K&R"
1121  * c-basic-offset: 8
1122  * tab-width: 8
1123  * fill-column: 80
1124  * scroll-step: 1
1125  * End:
1126  */
struct m0_task_io * coc_task
Definition: crate_io.c:116
uint64_t id
Definition: cob.h:2380
#define M0_PRE(cond)
#define M0_ALLOC_ARR(arr, nr)
Definition: memory.h:84
uint32_t cwi_rounds
Definition: crate_client.h:176
struct m0_mutex cg_mutex
Definition: crate_client.h:156
M0_INTERNAL int m0_bitmap_init(struct m0_bitmap *map, size_t nr)
Definition: bitmap.c:86
M0_INTERNAL void m0_mutex_unlock(struct m0_mutex *mutex)
Definition: mutex.c:66
Definition: client.h:794
int cr_buffer_read(char *buffer, const char *filename, uint64_t size)
Definition: crate_io.c:766
uint64_t cwi_ops_done[CR_OPS_NR]
Definition: crate_client.h:172
M0_INTERNAL int m0_indexvec_alloc(struct m0_indexvec *ivec, uint32_t len)
Definition: vec.c:532
#define NULL
Definition: misc.h:38
bool cr_time_not_expired(struct workload *w)
Definition: crate_io.c:990
M0_INTERNAL void m0_bitmap_fini(struct m0_bitmap *map)
Definition: bitmap.c:97
struct m0_semaphore cti_max_ops_sem
Definition: crate_client.h:211
int32_t cwi_nr_objs
Definition: crate_client.h:175
int cr_op_io(struct m0_workload_io *cwi, struct m0_task_io *cti, enum m0_operations op_code)
Definition: crate_io.c:585
void list_index_return(struct workload *w)
uint64_t cti_nr_ops_done
Definition: crate_client.h:202
M0_INTERNAL bool entity_id_is_valid(const struct m0_uint128 *id)
Definition: client.c:354
const m0_time_t M0_TIME_NEVER
Definition: time.c:108
void m0_op_fini(struct m0_op *op)
Definition: client.c:848
union @126 u
uint64_t m0_time_t
Definition: time.h:37
void cr_cti_report(struct m0_task_io *cti, enum m0_operations op_code)
Definition: crate_io.c:484
void m0_op_run(struct workload *w, struct workload_task *task, const struct workload_op *op)
Definition: crate_io.c:1094
void(* oop_executed)(struct m0_op *op)
Definition: client.h:915
void run(struct workload *w, struct workload_task *tasks)
Definition: crate_io.c:1010
int cr_task_prep_one(struct m0_workload_io *cwi, struct m0_task_io **cti_out)
Definition: crate_io.c:870
uint32_t * cti_op_status
Definition: crate_client.h:195
uint64_t cwi_bs
Definition: crate_client.h:164
void cr_time_acc(m0_time_t *t1, m0_time_t t2)
Definition: crate_io.c:147
uint64_t cti_start_offset
Definition: crate_client.h:198
m0_operations
Definition: crate_client.h:128
uint64_t u_lo
Definition: types.h:58
M0_INTERNAL void m0_indexvec_free(struct m0_indexvec *ivec)
Definition: vec.c:553
static void cr_cti_cleanup(struct m0_task_io *cti, int nr_ops)
Definition: crate_io.c:236
char * cwi_filename
Definition: crate_client.h:185
int cr_task_share_execute(struct m0_task_io *cti)
Definition: crate_io.c:640
void cr_op_stable(struct m0_op *op)
Definition: crate_io.c:152
int cr_tasks_prepare(struct workload *w, struct workload_task *tasks)
Definition: crate_io.c:939
M0_INTERNAL int m0_thread_adopt(struct m0_thread *thread, struct m0 *instance)
Definition: thread.c:127
#define M0_SET0(obj)
Definition: misc.h:64
M0_INTERNAL void m0_mutex_lock(struct m0_mutex *mutex)
Definition: mutex.c:49
struct m0_uint128 cg_oid
Definition: crate_client.h:152
int32_t cwi_opcode
Definition: crate_client.h:179
#define TIME_P(t)
Definition: time.h:45
M0_INTERNAL bool m0_fid_is_set(const struct m0_fid *fid)
Definition: fid.c:106
m0_time_t cwi_finish_time
Definition: crate_client.h:182
void ** ov_buf
Definition: vec.h:149
static struct foo * obj
Definition: tlist.c:302
#define PRIx64
Definition: types.h:61
int cr_task_execute(struct m0_task_io *cti)
Definition: crate_io.c:673
void workload_start(struct workload *w, struct workload_task *task)
Definition: crate.c:340
static int cr_adopt_motr_thread(struct m0_task_io *cti)
Definition: crate_io.c:740
int cr_namei_open(struct m0_workload_io *cwi, struct m0_task_io *cti, struct m0_op_context *op_ctx, struct m0_obj *obj, int free_slot, int obj_idx, int op_index)
Definition: crate_io.c:364
int cr_free_op_idx(struct m0_task_io *cti, uint32_t nr_ops)
Definition: crate_io.c:218
m0_time_t cwi_time[CR_OPS_NR]
Definition: crate_client.h:184
struct cwi_global cwi_g
Definition: crate_client.h:161
int cr_execute_ops(struct m0_workload_io *cwi, struct m0_task_io *cti, struct m0_obj *obj, struct m0_op_ops *cbs, enum m0_operations op_code, int obj_idx)
Definition: crate_io.c:446
void cr_op_failed(struct m0_op *op)
Definition: crate_io.c:174
const struct m0_uint128 M0_ID_APP
Definition: client.c:92
int m0_bufvec_alloc_aligned(struct m0_bufvec *bufvec, uint32_t num_segs, m0_bcount_t seg_size, unsigned shift)
Definition: vec.c:355
M0_INTERNAL uint64_t m0_round_down(uint64_t val, uint64_t size)
Definition: misc.c:187
M0_INTERNAL int m0_bufvec_alloc(struct m0_bufvec *bufvec, uint32_t num_segs, m0_bcount_t seg_size)
Definition: vec.c:220
op
Definition: libdemo.c:64
int m0_obj_op(struct m0_obj *obj, enum m0_obj_opcode opcode, struct m0_indexvec *ext, struct m0_bufvec *data, struct m0_bufvec *attr, uint64_t mask, uint32_t flags, struct m0_op **op)
Definition: io.c:717
struct m0_bufvec * coc_buf_vec
Definition: crate_io.c:117
M0_INTERNAL struct m0_thread_tls * m0_thread_tls(void)
Definition: kthread.c:67
int cr_op_namei(struct m0_workload_io *cwi, struct m0_task_io *cti, enum m0_operations op_code)
Definition: crate_io.c:497
m0_time_t cg_cwi_acc_time[CR_OPS_NR]
Definition: crate_client.h:155
static void t2(int n)
Definition: thread.c:48
M0_INTERNAL void m0_bufvec_free(struct m0_bufvec *bufvec)
Definition: vec.c:395
struct m0_op ** cti_ops
Definition: crate_client.h:200
union workload::@328 u
void cr_task_io_cleanup(struct m0_task_io **cti_p)
Definition: crate_io.c:818
#define TIME_F
Definition: time.h:44
int i
Definition: dir.c:1033
#define PRIu64
Definition: types.h:58
uint32_t cwi_bcount_per_op
Definition: crate_client.h:169
cr_operation_t opcode_operation_map[]
Definition: crate_io.c:438
Definition: client.h:647
static struct m0_fid * check_fid(struct m0_fid *id)
Definition: crate_io.c:340
void * cw_io
Definition: workload.h:98
return M0_ERR(-EOPNOTSUPP)
struct crate_conf * conf
static void attr(struct m0_addb2__context *ctx, const uint64_t *v, char *buf)
Definition: dump.c:949
void workload_join(struct workload *w, struct workload_task *task)
Definition: crate.c:363
void(* oop_stable)(struct m0_op *op)
Definition: client.h:917
int cr_io_read(struct m0_workload_io *cwi, struct m0_task_io *cti, struct m0_op_context *op_ctx, struct m0_obj *obj, int free_slot, int obj_idx, int op_index)
Definition: crate_io.c:415
#define m0_free0(pptr)
Definition: memory.h:77
m0_time_t cti_op_acc_time
Definition: crate_client.h:208
#define M0_ASSERT(cond)
struct m0_bufvec * cti_bufvec
Definition: crate_client.h:205
union workload_task::@329 u
m0_time_t m0_time_now(void)
Definition: time.c:134
void m0_obj_fini(struct m0_obj *obj)
Definition: client.c:467
int cr_task_prep_bufs(struct m0_workload_io *cwi, struct m0_task_io *cti)
Definition: crate_io.c:839
struct m0_uint128 * cti_ids
Definition: crate_client.h:207
struct m0_indexvec * coc_index_vec
Definition: crate_io.c:119
void cr_task_bufs_free(struct m0_task_io *cti, int idx)
Definition: crate_io.c:810
int rand(void)
void m0_op_launch(struct m0_op **op, uint32_t nr)
Definition: client.c:724
uint64_t u_hi
Definition: types.h:57
struct m0_workload_io * cti_cwi
Definition: crate_client.h:193
m0_time_t cwi_start_time
Definition: crate_client.h:181
M0_INTERNAL int m0_semaphore_init(struct m0_semaphore *semaphore, unsigned value)
Definition: semaphore.c:38
void * m0_alloc(size_t size)
Definition: memory.c:126
M0_INTERNAL void m0_mutex_init(struct m0_mutex *mutex)
Definition: mutex.c:35
int(* cr_operation_t)(struct m0_workload_io *cwi, struct m0_task_io *cti, struct m0_op_context *op_ctx, struct m0_obj *obj, int free_slot, int obj_idx, int op_index)
Definition: crate_io.c:122
int cr_namei_delete(struct m0_workload_io *cwi, struct m0_task_io *cti, struct m0_op_context *op_ctx, struct m0_obj *obj, int free_slot, int obj_idx, int op_index)
Definition: crate_io.c:380
void cr_get_oids(struct m0_uint128 *ids, uint32_t nr_objs)
Definition: crate_io.c:797
M0_INTERNAL void m0_bitmap_set(struct m0_bitmap *map, size_t idx, bool val)
Definition: bitmap.c:139
struct m0_fid cwi_pool_id
Definition: crate_client.h:170
static void cti_cleanup_op(struct m0_task_io *cti, int i)
Definition: crate_io.c:191
static m0_bindex_t offset
Definition: dump.c:173
void(* oop_failed)(struct m0_op *op)
Definition: client.h:916
m0_time_t m0_time_add(const m0_time_t t1, const m0_time_t t2)
Definition: time.c:47
struct m0_addb2__id_intrp ids[]
Definition: dump.c:1074
M0_INTERNAL void m0_thread_shun(void)
Definition: thread.c:134
void * m0_task
Definition: workload.h:146
M0_INTERNAL void m0_bufvec_free_aligned(struct m0_bufvec *bufvec, unsigned shift)
Definition: vec.c:436
uint32_t * cti_op_rcs
Definition: crate_client.h:196
unsigned cw_nr_thread
Definition: workload.h:78
uint64_t time_now(void)
Definition: st_misc.c:69
int m0_entity_create(struct m0_fid *pool, struct m0_entity *entity, struct m0_op **op)
Definition: obj.c:801
static uint64_t bw(uint64_t bytes, m0_time_t time)
Definition: crate_io.c:1005
uint64_t cti_nr_ops
Definition: crate_client.h:201
Definition: fid.h:38
struct m0_uint128 cwi_start_obj_id
Definition: crate_client.h:180
uint32_t cwi_max_nr_ops
Definition: crate_client.h:173
static uint64_t nz_rand(void)
Definition: crate_io.c:786
#define M0_ALLOC_PTR(ptr)
Definition: memory.h:86
M0_INTERNAL void m0_semaphore_fini(struct m0_semaphore *semaphore)
Definition: semaphore.c:45
static int r[NR]
Definition: thread.c:46
enum m0_operations coc_op_code
Definition: crate_io.c:115
static int cr_release_motr_thread(struct m0_task_io *cti)
Definition: crate_io.c:757
Definition: addb2.c:200
int cti_task_idx
Definition: crate_client.h:194
m0_time_t m0_time_sub(const m0_time_t t1, const m0_time_t t2)
Definition: time.c:65
void cr_log(enum cr_log_level lev, const char *fmt,...)
Definition: logger.c:39
M0_INTERNAL bool m0_bitmap_get(const struct m0_bitmap *map, size_t idx)
Definition: bitmap.c:105
m0_bcount_t size
Definition: di.c:39
m0_time_t coc_op_finish
Definition: crate_io.c:111
static size_t cr_rand___range_l(size_t end)
Definition: crate_io.c:135
M0_INTERNAL void m0_mutex_fini(struct m0_mutex *mutex)
Definition: mutex.c:42
void m0_obj_init(struct m0_obj *obj, struct m0_realm *parent, const struct m0_uint128 *id, uint64_t layout_id)
Definition: client.c:403
int cr_io_vector_prep(struct m0_workload_io *cwi, struct m0_task_io *cti, struct m0_op_context *op_ctx, int obj_idx, int op_index)
Definition: crate_io.c:245
Definition: tasks.py:1
int cr_io_write(struct m0_workload_io *cwi, struct m0_task_io *cti, struct m0_op_context *op_ctx, struct m0_obj *obj, int free_slot, int obj_idx, int op_index)
Definition: crate_io.c:392
void * op_datum
Definition: client.h:685
void integrity(struct m0_uint128 object_id, unsigned char **md5toverify, int block_count, int idx_op)
struct m0_bufvec * coc_attr
Definition: crate_io.c:118
int cr_tasks_release(struct workload *w, struct workload_task *tasks)
Definition: crate_io.c:973
static void t1(int n)
Definition: mutex.c:48
int m0_entity_delete(struct m0_entity *entity, struct m0_op **op)
Definition: obj.c:824
struct m0_obj * cti_objs
Definition: crate_client.h:199
uint32_t cwi_layout_id
Definition: crate_client.h:162
M0_INTERNAL void m0_semaphore_down(struct m0_semaphore *semaphore)
Definition: semaphore.c:49
uint64_t cwi_io_size
Definition: crate_client.h:171
struct m0_realm * crate_uber_realm()
void m0_op_free(struct m0_op *op)
Definition: client.c:886
Definition: rcv_session.c:58
M0_INTERNAL void m0_semaphore_up(struct m0_semaphore *semaphore)
Definition: semaphore.c:65
struct m0_bufvec * cti_rd_bufvec
Definition: crate_client.h:206
int m0_entity_open(struct m0_entity *entity, struct m0_op **op)
Definition: obj.c:885
struct m0_workload_io * coc_cwi
Definition: crate_io.c:114
int32_t cti_progress
Definition: crate_client.h:197
M0_INTERNAL bool m0_fid_is_valid(const struct m0_fid *fid)
Definition: fid.c:96
uint64_t u_lo
Definition: types.h:37
struct m0_thread * cti_mthread
Definition: crate_client.h:204
m0_time_t coc_op_launch
Definition: crate_io.c:110
void m0_free(void *data)
Definition: memory.c:146
int cr_namei_create(struct m0_workload_io *cwi, struct m0_task_io *cti, struct m0_op_context *op_ctx, struct m0_obj *obj, int free_slot, int obj_idx, int op_index)
Definition: crate_io.c:348
bool cg_created
Definition: crate_client.h:153
int32_t rc
Definition: trigger_fop.h:47
m0_time_t cwi_execution_time
Definition: crate_client.h:183
Definition: vec.h:145
int coc_obj_index
Definition: crate_io.c:113
void m0_op_setup(struct m0_op *op, const struct m0_op_ops *cbs, m0_time_t linger)
Definition: client.c:909