Motr  M0
io_req_fop.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 #include "motr/client.h"
23 #include "motr/client_internal.h"
24 #include "motr/addb.h"
25 #include "motr/pg.h"
26 #include "motr/io.h"
27 #include "motr/sync.h"
28 
29 #include "lib/memory.h" /* m0_alloc, m0_free */
30 #include "lib/errno.h" /* ENOMEM */
31 #include "lib/atomic.h" /* m0_atomic_{inc,dec,get} */
32 #include "lib/cksum_utils.h"
33 #include "rpc/rpc_machine_internal.h" /* m0_rpc_machine_lock */
34 #include "fop/fom_generic.h" /* m0_rpc_item_generic_reply_rc */
35 #include "cob/cob.h" /* M0_COB_IO M0_COB_PVER M0_COB_NLINK */
36 #include "rpc/addb2.h"
37 #include "rpc/item.h"
38 #include "rpc/rpc_internal.h"
39 
40 #define M0_TRACE_SUBSYSTEM M0_TRACE_SUBSYS_CLIENT
41 #include "lib/trace.h" /* M0_LOG */
42 
43 /*
44  * No initialisation for iofop_bobtype as it isn't const,
45  * iofop_bobtype is initialised as a list type.
46  */
48 M0_BOB_DEFINE(M0_INTERNAL, &iofop_bobtype, ioreq_fop);
49 
54 M0_TL_DESCR_DEFINE(iofops, "List of IO fops", M0_INTERNAL,
55  struct ioreq_fop, irf_link, irf_magic,
57 M0_TL_DEFINE(iofops, M0_INTERNAL, struct ioreq_fop);
58 
62 M0_INTERNAL bool ioreq_fop_invariant(const struct ioreq_fop *fop)
63 {
64  return M0_RC(fop != NULL &&
65  _0C(ioreq_fop_bob_check(fop)) &&
66  _0C(fop->irf_tioreq != NULL) &&
67  _0C(fop->irf_ast.sa_cb != NULL) &&
68  _0C(fop->irf_ast.sa_mach != NULL));
69 }
70 
71 static bool should_ioreq_sm_complete(struct m0_op_io *ioo)
72 {
73  struct m0_client *instance;
74 
76  /* Ensure that replies for iofops and bulk data have been received. */
77  return m0_atomic64_get(&ioo->ioo_nwxfer.nxr_iofop_nr) == 0 &&
79  /*
80  * In case of writing in oostore mode, ensure that all
81  * cob creation fops (if any) have received reply.
82  */
84  (M0_IN(ioreq_sm_state(ioo), (IRS_WRITING, IRS_TRUNCATE)))) ?
85  m0_atomic64_get(&ioo->ioo_nwxfer.nxr_ccfop_nr) == 0 : true);
86 }
87 
88 M0_INTERNAL struct m0_file *m0_client_fop_to_file(struct m0_fop *fop)
89 {
90  struct m0_op_io *ioo;
91  struct nw_xfer_request *xfer;
92  struct m0_io_fop *iofop;
93  struct ioreq_fop *irfop;
94 
95  iofop = M0_AMB(iofop, fop, if_fop);
96  irfop = bob_of(iofop, struct ioreq_fop, irf_iofop, &iofop_bobtype);
97  xfer = irfop->irf_tioreq->ti_nwxfer;
98 
99  ioo = bob_of(xfer, struct m0_op_io, ioo_nwxfer, &ioo_bobtype);
100 
101  return &ioo->ioo_flock;
102 }
103 
134 static void application_attribute_copy(struct m0_indexvec *rep_ivec,
135  struct target_ioreq *ti,
136  struct m0_op_io *ioo,
137  struct m0_buf *buf)
138 {
139  uint32_t unit_size;
140  uint32_t off;
141  uint32_t cs_sz;
142  m0_bindex_t rep_index;
143  m0_bindex_t ti_cob_index;
144  m0_bindex_t ti_goff_index;
145  struct m0_ivec_cursor rep_cursor;
146  struct m0_ivec_cursor ti_cob_cursor;
147  struct m0_ivec_cursor ti_goff_cursor;
148  struct m0_indexvec *ti_ivec = &ti->ti_ivec;
149  struct m0_indexvec *ti_goff_ivec = &ti->ti_goff_ivec;
150  void *dst;
151  void *src;
152 
153  if (!m0__obj_is_di_enabled(ioo)) {
154  return;
155  }
156  src = buf->b_addr;
157 
158  if (!buf->b_nob) {
159  /* Return as no checksum is present */
160  return;
161  }
162 
164  cs_sz = ioo->ioo_attr.ov_vec.v_count[0];
165 
166  m0_ivec_cursor_init(&rep_cursor, rep_ivec);
167  m0_ivec_cursor_init(&ti_cob_cursor, ti_ivec);
168  m0_ivec_cursor_init(&ti_goff_cursor, ti_goff_ivec);
169 
170  rep_index = m0_ivec_cursor_index(&rep_cursor);
171  ti_cob_index = m0_ivec_cursor_index(&ti_cob_cursor);
172  ti_goff_index = m0_ivec_cursor_index(&ti_goff_cursor);
173 
174  /* Move rep_cursor on unit boundary */
175  off = rep_index % unit_size;
176  if (off) {
177  if (!m0_ivec_cursor_move(&rep_cursor, unit_size - off))
178  rep_index = m0_ivec_cursor_index(&rep_cursor);
179  else
180  return;
181  }
182  off = ti_cob_index % unit_size;
183  if (off != 0) {
184  if (!m0_ivec_cursor_move(&ti_cob_cursor, unit_size - off)) {
185  ti_cob_index = m0_ivec_cursor_index(&ti_cob_cursor);
186  }
187  }
188  off = ti_goff_index % unit_size;
189  if (off != 0) {
190  if (!m0_ivec_cursor_move(&ti_goff_cursor, unit_size - off)) {
191  ti_goff_index = m0_ivec_cursor_index(&ti_goff_cursor);
192  }
193  }
194  M0_ASSERT(ti_cob_index <= rep_index);
195 
208  do {
209  rep_index = m0_ivec_cursor_index(&rep_cursor);
210  while (ti_cob_index != rep_index) {
211  if (m0_ivec_cursor_move(&ti_cob_cursor, unit_size) ||
212  m0_ivec_cursor_move(&ti_goff_cursor, unit_size)) {
213  M0_ASSERT(0);
214  }
215  ti_cob_index = m0_ivec_cursor_index(&ti_cob_cursor);
216  ti_goff_index = m0_ivec_cursor_index(&ti_goff_cursor);
217  }
218 
219  /* GOB offset should be in span of application provided GOB extent */
220  M0_ASSERT(ti_goff_index <=
221  (ioo->ioo_ext.iv_index[ioo->ioo_ext.iv_vec.v_nr-1] +
222  ioo->ioo_ext.iv_vec.v_count[ioo->ioo_ext.iv_vec.v_nr-1]));
223 
225  ti_goff_index,
226  &ioo->ioo_ext,
227  unit_size, cs_sz);
228  M0_ASSERT(dst != NULL);
229  memcpy(dst, src, cs_sz);
230  src = (char *)src + cs_sz;
231 
232  /* Source is m0_buf and we have to copy all the checksum one at a time */
233  M0_ASSERT(src <= (buf->b_addr + buf->b_nob));
234 
235  } while (!m0_ivec_cursor_move(&rep_cursor, unit_size));
236 }
237 
246 static void io_bottom_half(struct m0_sm_group *grp, struct m0_sm_ast *ast)
247 {
248  int rc;
249  uint64_t actual_bytes = 0;
250  struct m0_client *instance;
251  struct m0_op *op;
252  struct m0_op_io *ioo;
253  struct nw_xfer_request *xfer;
254  struct m0_io_fop *iofop;
255  struct ioreq_fop *irfop;
256  struct target_ioreq *tioreq;
257  struct m0_fop *reply_fop = NULL;
258  struct m0_rpc_item *req_item;
259  struct m0_rpc_item *reply_item;
260  struct m0_rpc_bulk *rbulk;
261  struct m0_fop_cob_rw_reply *rw_reply;
262  struct m0_indexvec rep_attr_ivec;
263  struct m0_fop_generic_reply *gen_rep;
264  struct m0_fop_cob_rw *rwfop;
265 
266  M0_ENTRY("sm_group %p sm_ast %p", grp, ast);
267 
268  M0_PRE(grp != NULL);
269  M0_PRE(ast != NULL);
270 
271  irfop = bob_of(ast, struct ioreq_fop, irf_ast, &iofop_bobtype);
272  iofop = &irfop->irf_iofop;
273  tioreq = irfop->irf_tioreq;
274  xfer = tioreq->ti_nwxfer;
275 
276  ioo = bob_of(xfer, struct m0_op_io, ioo_nwxfer, &ioo_bobtype);
277  op = &ioo->ioo_oo.oo_oc.oc_op;
279  M0_PRE(instance != NULL);
280  M0_PRE(M0_IN(irfop->irf_pattr, (PA_DATA, PA_PARITY)));
281  M0_PRE(M0_IN(ioreq_sm_state(ioo),
284  IRS_FAILED)));
285 
286  /* Check errors in rpc items of an IO reqest and its reply. */
287  rbulk = &iofop->if_rbulk;
288  req_item = &iofop->if_fop.f_item;
289  rwfop = io_rw_get(&iofop->if_fop);
290  reply_item = req_item->ri_reply;
291  rc = req_item->ri_error;
292  if (reply_item != NULL) {
293  reply_fop = m0_rpc_item_to_fop(reply_item);
294  rc = rc?: m0_rpc_item_generic_reply_rc(reply_item);
295  }
296  if (rc < 0 || reply_item == NULL) {
297  M0_ASSERT(ergo(reply_item == NULL, rc != 0));
298  M0_LOG(M0_ERROR, "[%p] rpc item %p rc=%d", ioo, req_item, rc);
299  goto ref_dec;
300  }
303 
304  /* Check errors in an IO request's reply. */
305  gen_rep = m0_fop_data(m0_rpc_item_to_fop(reply_item));
306  rw_reply = io_rw_rep_get(reply_fop);
307 
308  /*
309  * Copy attributes to client if reply received from read operation
310  * Skipping attribute_copy() if cksum validation is not allowed.
311  */
312  if (m0_is_read_rep(reply_fop) && op->op_code == M0_OC_READ &&
315  rwfop->crw_ivec.ci_nr, 0,
316  &rep_attr_ivec);
317 
318  application_attribute_copy(&rep_attr_ivec, tioreq, ioo,
319  &rw_reply->rwr_di_data_cksum);
320 
321  m0_indexvec_free(&rep_attr_ivec);
322  }
323  ioo->ioo_sns_state = rw_reply->rwr_repair_done;
324  M0_LOG(M0_DEBUG, "[%p] item %p[%u], reply received = %d, "
325  "sns state = %d", ioo, req_item,
326  req_item->ri_type->rit_opcode, rc, ioo->ioo_sns_state);
327  actual_bytes = rw_reply->rwr_count;
328  rc = gen_rep->gr_rc;
329  rc = rc ?: rw_reply->rwr_rc;
330  irfop->irf_reply_rc = rc;
331 
332  /* Update pending transaction number */
335  &ioo->ioo_obj->ob_entity, op, &rw_reply->rwr_mod_rep.fmr_remid);
336 
337 ref_dec:
338  /* For whatever reason, io didn't complete successfully.
339  * Reduce expected read bulk count */
340  if (rc < 0 && m0_is_read_fop(&iofop->if_fop))
342  m0_rpc_bulk_buf_length(rbulk));
343 
344  /* Propogate the error up as many stashed-rc layers as we can */
345  if (tioreq->ti_rc == 0)
346  tioreq->ti_rc = rc;
347 
348 #define LOGMSG "ioo=%p off=%llu from=%s rc=%d ti_rc=%d @"FID_F, ioo,\
349  (unsigned long long)tioreq->ti_goff,\
350  m0_rpc_conn_addr(tioreq->ti_session->s_conn),\
351  rc, tioreq->ti_rc, FID_P(&tioreq->ti_fid)
352  /*
353  * Note: this is not necessary mean that this is 'real' error in the
354  * case of CROW is used (object is only created when it is first
355  * write)
356  */
357  if (xfer->nxr_rc == 0 && rc != 0) {
358  xfer->nxr_rc = rc;
359 
360  if (rc == -ENOENT) /* normal for CROW */
362  else
364  } else {
366  }
367 #undef LOGMSG
368 
369  if (irfop->irf_pattr == PA_DATA)
370  tioreq->ti_databytes += rbulk->rb_bytes;
371  else
372  tioreq->ti_parbytes += rbulk->rb_bytes;
373 
374  M0_LOG(M0_INFO, "ioo=%p fop=%p expected=%llu returned=%llu rc=%d",
375  ioo, &iofop->if_fop, (unsigned long long)rbulk->rb_bytes,
376  (unsigned long long)actual_bytes, rc);
377 
378  /* Drops reference on reply fop. */
379  m0_fop_put0_lock(&iofop->if_fop);
381  m0_atomic64_dec(&instance->m0c_pending_io_nr);
382 
383  m0_mutex_lock(&xfer->nxr_lock);
385  if (should_ioreq_sm_complete(ioo)) {
386  m0_sm_state_set(&ioo->ioo_sm,
387  (M0_IN(ioreq_sm_state(ioo),
390 
391  /* post an ast to run iosm_handle_executed */
393  m0_sm_ast_post(ioo->ioo_oo.oo_sm_grp, &ioo->ioo_ast);
394  }
395  m0_mutex_unlock(&xfer->nxr_lock);
396 
397  M0_LOG(M0_DEBUG, "[%p] irfop=%p bulk=%p "FID_F
398  " Pending fops = %"PRIu64" bulk=%"PRIu64,
399  ioo, irfop, rbulk, FID_P(&tioreq->ti_fid),
402 
403  M0_LEAVE();
404 }
405 
413 static void io_rpc_item_cb(struct m0_rpc_item *item)
414 {
415  struct m0_fop *fop;
416  struct m0_fop *rep_fop;
417  struct m0_io_fop *iofop;
418  struct ioreq_fop *reqfop;
419  struct m0_op_io *ioo;
420 
421  M0_PRE(item != NULL);
422  M0_ENTRY("rpc_item %p", item);
423 
425  iofop = M0_AMB(iofop, fop, if_fop);
426  reqfop = bob_of(iofop, struct ioreq_fop, irf_iofop, &iofop_bobtype);
427  ioo = bob_of(reqfop->irf_tioreq->ti_nwxfer, struct m0_op_io,
429  /*
430  * NOTE: RPC errors are handled in io_bottom_half(), which is called
431  * by reqfop->irf_ast.
432  */
433 
434  /*
435  * Acquires a reference on IO reply fop since its contents
436  * are needed for policy decisions in io_bottom_half().
437  * io_bottom_half() takes care of releasing the reference.
438  */
439  if (item->ri_reply != NULL) {
442  }
443  M0_LOG(M0_INFO, "ioreq_fop %p, target_ioreq %p io_request %p",
444  reqfop, reqfop->irf_tioreq, ioo);
445 
446  m0_fop_get(&reqfop->irf_iofop.if_fop);
447  m0_sm_ast_post(ioo->ioo_sm.sm_grp, &reqfop->irf_ast);
448 
449  M0_LEAVE();
450 }
451 
453  struct m0_sm_ast *ast)
454 {
455  struct nw_xfer_request *xfer;
456  struct target_ioreq *ti;
457  struct cc_req_fop *cc_fop;
458  struct m0_op *op;
459  struct m0_op_io *ioo;
460  struct m0_fop_cob_op_reply *reply;
461  struct m0_fop *reply_fop = NULL;
462  struct m0_rpc_item *req_item;
463  struct m0_rpc_item *reply_item;
464  struct m0_be_tx_remid *remid = NULL;
465  int rc;
466 
467  ti = (struct target_ioreq *)ast->sa_datum;
468  ioo = bob_of(ti->ti_nwxfer, struct m0_op_io, ioo_nwxfer,
469  &ioo_bobtype);
470  op = &ioo->ioo_oo.oo_oc.oc_op;
471  xfer = ti->ti_nwxfer;
472  cc_fop = &ti->ti_cc_fop;
473  req_item = &cc_fop->crf_fop.f_item;
474  reply_item = req_item->ri_reply;
475  rc = req_item->ri_error;
476  if (reply_item != NULL) {
477  reply_fop = m0_rpc_item_to_fop(reply_item);
478  rc = rc ?: m0_rpc_item_generic_reply_rc(reply_item);
479  }
480  if (rc < 0 || reply_item == NULL) {
481  M0_ASSERT(ergo(reply_item == NULL, rc != 0));
482  goto ref_dec;
483  }
484  reply = m0_fop_data(m0_rpc_item_to_fop(reply_item));
485  /*
486  * Ignoring the case when an attempt is made to create a cob on target
487  * where previous IO had created it.
488  */
489  rc = rc ? M0_IN(reply->cor_rc, (0, -EEXIST)) ? 0 : reply->cor_rc : 0;
490 
491  remid = &reply->cor_common.cor_mod_rep.fmr_remid;
492 
493  /* Update pending transaction number */
496  &ioo->ioo_obj->ob_entity, op, remid);
497  /*
498  * @todo: in case confd is updated, a check is necessary similar to
499  * that present in m0t1fs. See
500  * m0t1fs/linux_kernel/file.c::io_bottom_half().
501  */
502 
503 ref_dec:
504  if (ti->ti_rc == 0 && rc != 0)
505  ti->ti_rc = rc;
506  if (xfer->nxr_rc == 0 && rc != 0)
507  xfer->nxr_rc = rc;
508  if (ioo->ioo_rc == 0 && rc != 0)
509  ioo->ioo_rc = rc;
510  m0_fop_put0_lock(&cc_fop->crf_fop);
511  if (reply_fop != NULL)
513  m0_mutex_lock(&xfer->nxr_lock);
515  if (should_ioreq_sm_complete(ioo)) {
516  if (ioreq_sm_state(ioo) == IRS_TRUNCATE)
518  else
521  m0_sm_ast_post(ioo->ioo_oo.oo_sm_grp, &ioo->ioo_ast);
522  }
523  m0_mutex_unlock(&xfer->nxr_lock);
524 }
525 
527 {
528  struct m0_op_io *ioo;
529  struct cc_req_fop *cc_fop;
530  struct target_ioreq *ti;
531  struct m0_fop *fop;
532  struct m0_fop *rep_fop;
533 
535  cc_fop = M0_AMB(cc_fop, fop, crf_fop);
536  ti = M0_AMB(ti, cc_fop, ti_cc_fop);
537  ioo = bob_of(ti->ti_nwxfer, struct m0_op_io,
538  ioo_nwxfer, &ioo_bobtype);
540  cc_fop->crf_ast.sa_datum = (void *)ti;
541  /* Reference on fop and its reply are released in cc_bottom_half. */
542  m0_fop_get(fop);
543  if (item->ri_reply != NULL) {
546  }
547 
548  m0_sm_ast_post(ioo->ioo_oo.oo_sm_grp, &cc_fop->crf_ast);
549 }
550 
551 /*
552  * io_rpc_item_cb can not be directly invoked from io fops code since it
553  * leads to build dependency of ioservice code over kernel code (kernel client).
554  * Hence, a new m0_rpc_item_ops structure is used for fops dispatched
555  * by client io requests in all cases.
556  */
557 static const struct m0_rpc_item_ops item_ops = {
559 };
560 
561 static const struct m0_rpc_item_ops cc_item_ops = {
563 };
564 
565 static void
567 {
568  struct m0_op_io *ioo;
569 
570  ioo = bob_of(ast, struct m0_op_io, ioo_done_ast, &ioo_bobtype);
571  m0_sm_state_set(&ioo->ioo_sm,
572  (M0_IN(ioreq_sm_state(ioo),
573  (IRS_READING,
577 
579  &ioo->ioo_ast);
580 }
587 static void client_passive_recv(const struct m0_net_buffer_event *evt)
588 {
589  struct m0_rpc_bulk *rbulk;
590  struct m0_rpc_bulk_buf *buf;
591  struct m0_net_buffer *nb;
592  struct m0_io_fop *iofop;
593  struct ioreq_fop *reqfop;
594  struct m0_op_io *ioo;
595  uint32_t req_sm_state;
596 
597  M0_ENTRY();
598 
599  M0_PRE(evt != NULL);
600  M0_PRE(evt->nbe_buffer != NULL);
601 
602  nb = evt->nbe_buffer;
603  buf = (struct m0_rpc_bulk_buf *)nb->nb_app_private;
604  rbulk = buf->bb_rbulk;
605  M0_LOG(M0_DEBUG, "PASSIVE recv, e=%p status=%d, len=%"PRIu64" rbulk=%p",
606  evt, evt->nbe_status, evt->nbe_length, rbulk);
607 
608  iofop = M0_AMB(iofop, rbulk, if_rbulk);
609  reqfop = bob_of(iofop, struct ioreq_fop, irf_iofop, &iofop_bobtype);
610  ioo = bob_of(reqfop->irf_tioreq->ti_nwxfer, struct m0_op_io,
611  ioo_nwxfer, &ioo_bobtype);
612 
613  M0_ASSERT(m0_is_read_fop(&iofop->if_fop));
615  "irfop=%p "FID_F" Pending fops = %"PRIu64"bulk = %"PRIu64,
616  reqfop, FID_P(&reqfop->irf_tioreq->ti_fid),
619 
620  /*
621  * buf will be released in this callback. But rbulk is still valid
622  * after that.
623  */
625  if ((evt->nbe_status != 0) ||
626  (iofop->if_fop.f_item.ri_error != 0))
627  return;
628 
629  /* Set io request's state*/
631 
632  req_sm_state = ioreq_sm_state(ioo);
633  if (req_sm_state != IRS_READ_COMPLETE &&
634  req_sm_state != IRS_WRITE_COMPLETE) {
635  /*
636  * It is possible that io_bottom_half() has already
637  * reduced the nxr_rdbulk_nr to 0 by this time, due to FOP
638  * receiving some error.
639  */
640 
643  if (should_ioreq_sm_complete(ioo)) {
646  }
647  }
649 
650  M0_LEAVE();
651 }
652 
655  .nbc_cb = {
660  }
661 };
662 
666 M0_INTERNAL int ioreq_fop_async_submit(struct m0_io_fop *iofop,
667  struct m0_rpc_session *session)
668 {
669  int rc;
670  struct m0_fop_cob_rw *rwfop;
671  struct m0_rpc_item *item;
672 
673  M0_ENTRY("m0_io_fop %p m0_rpc_session %p", iofop, session);
674 
675  M0_PRE(iofop != NULL);
676  M0_PRE(session != NULL);
677 
678  rwfop = io_rw_get(&iofop->if_fop);
679  M0_ASSERT(rwfop != NULL);
680 
682  rwfop->crw_desc.id_descs,
684  if (rc != 0)
685  goto out;
686 
687  item = &iofop->if_fop.f_item;
691  rc = m0_rpc_post(item);
692  M0_LOG(M0_INFO, "IO fops submitted to rpc, rc = %d", rc);
693 
695  m0_sm_id_get(&item->ri_sm));
696  /*
697  * Ignoring error from m0_rpc_post() so that the subsequent fop
698  * submission goes on. This is to ensure that the ioreq gets into dgmode
699  * subsequently without exiting from the healthy mode IO itself.
700  */
701  return M0_RC(0);
702 
703 out:
704  /*
705  * In case error is encountered either by m0_rpc_bulk_store() or
706  * queued net buffers, if any, will be deleted at io_req_fop_release.
707  */
708  return M0_RC(rc);
709 }
710 
711 /* Finds out pargrp_iomap from array of such structures in m0_op_ioo. */
712 static void ioreq_pgiomap_find(struct m0_op_io *ioo,
713  uint64_t grpid,
714  uint64_t *cursor,
715  struct pargrp_iomap **out)
716 {
717  uint64_t i;
718 
719  M0_PRE(ioo != NULL);
720  M0_PRE(out != NULL);
721  M0_PRE(cursor != NULL);
722  M0_PRE(*cursor < ioo->ioo_iomap_nr);
723  M0_ENTRY("group_id = %3"PRIu64", cursor = %3"PRIu64, grpid, *cursor);
724 
725  for (i = *cursor; i < ioo->ioo_iomap_nr; ++i)
726  if (ioo->ioo_iomaps[i]->pi_grpid == grpid) {
727  *out = ioo->ioo_iomaps[i];
728  *cursor = i;
729  break;
730  }
731 
732  M0_POST(i < ioo->ioo_iomap_nr);
733  M0_LEAVE();
734 }
735 
739 M0_INTERNAL int ioreq_fop_dgmode_read(struct ioreq_fop *irfop)
740 {
741  int rc;
742  uint32_t cnt;
743  uint32_t seg;
744  uint32_t seg_nr;
745  uint64_t grpid;
746  uint64_t pgcur = 0;
748  struct m0_op_io *ioo;
749  struct m0_rpc_bulk *rbulk;
750  struct pargrp_iomap *map = NULL;
751  struct m0_rpc_bulk_buf *rbuf;
752 
753  M0_PRE(irfop != NULL);
754  M0_ENTRY("target fid = "FID_F, FID_P(&irfop->irf_tioreq->ti_fid));
755 
756  ioo = bob_of(irfop->irf_tioreq->ti_nwxfer, struct m0_op_io,
757  ioo_nwxfer, &ioo_bobtype);
758  rbulk = &irfop->irf_iofop.if_rbulk;
759 
760  m0_tl_for (rpcbulk, &rbulk->rb_buflist, rbuf) {
761 
762  index = rbuf->bb_zerovec.z_index;
764 
765  for (seg = 0; seg < seg_nr; ) {
766 
767  grpid = pargrp_id_find(index[seg], ioo, irfop);
768  for (cnt = 1, ++seg; seg < seg_nr; ++seg) {
769 
770  M0_ASSERT(ergo(seg > 0, index[seg] >
771  index[seg - 1]));
773  (void *)index[seg]));
774 
775  if (grpid ==
776  pargrp_id_find(index[seg], ioo, irfop))
777  ++cnt;
778  else
779  break;
780  }
781 
782  ioreq_pgiomap_find(ioo, grpid, &pgcur, &map);
783  M0_ASSERT(map != NULL);
784  rc = map->pi_ops->pi_dgmode_process(map,
785  irfop->irf_tioreq, &index[seg - cnt],
786  cnt);
787  if (rc != 0)
788  return M0_ERR(rc);
789  }
790  } m0_tl_endfor;
791  return M0_RC(0);
792 }
793 
794 static void ioreq_cc_fop_release(struct m0_ref *ref)
795 {
796  struct m0_fop *fop = M0_AMB(fop, ref, f_ref);
797 
798  M0_ENTRY("fop: %p %s", fop, m0_fop_name(fop));
799  m0_fop_fini(fop);
800  /* no need to free the memory, because it is embedded into ti */
801  M0_LEAVE();
802 }
803 
804 M0_INTERNAL int ioreq_cc_fop_init(struct target_ioreq *ti)
805 {
806  struct m0_fop *fop;
807  struct m0_fop_cob_common *common;
808  struct m0_op_io *ioo;
809  struct m0_obj_attr *io_attr;
810  int rc;
811  struct m0_fop_type *fopt;
812  struct m0_rpc_item *item;
813 
814  fopt = ti->ti_req_type == TI_COB_TRUNCATE ?
816  if (ti->ti_req_type == TI_COB_TRUNCATE &&
817  ti->ti_trunc_ivec.iv_vec.v_nr == 0)
818  return 0;
819  fop = &ti->ti_cc_fop.crf_fop;
820  M0_LOG(M0_DEBUG, "fop=%p", fop);
823  if (rc != 0) {
824  m0_fop_fini(fop);
825  goto out;
826  }
827  ti->ti_cc_fop_inited = true;
829 
834  ioo = bob_of(ti->ti_nwxfer, struct m0_op_io, ioo_nwxfer,
835  &ioo_bobtype);
836  common = m0_cobfop_common_get(fop);
837  common->c_gobfid = ioo->ioo_oo.oo_fid;
838  common->c_cobfid = ti->ti_fid;
839  common->c_pver = ioo->ioo_pver;
840  common->c_cob_type = M0_COB_IO;
841  common->c_cob_idx = m0_fid_cob_device_id(&ti->ti_fid);
842  if (ti->ti_req_type == TI_COB_CREATE) {
843  common->c_flags |= M0_IO_FLAG_CROW;
844  common->c_body.b_pver = ioo->ioo_pver;
845  common->c_body.b_nlink = 1;
846  common->c_body.b_valid |= M0_COB_PVER;
847  common->c_body.b_valid |= M0_COB_NLINK;
848  common->c_body.b_valid |= M0_COB_LID;
849  io_attr = m0_io_attr(ioo);
850  common->c_body.b_lid = io_attr->oa_layout_id;
851  } else if (ti->ti_req_type == TI_COB_TRUNCATE) {
852  struct m0_fop_cob_truncate *trunc = m0_fop_data(fop);
853  uint32_t diff;
854 
857  ti->ti_trunc_ivec.iv_vec.v_nr, 0, &trunc->ct_io_ivec);
858  if (rc != 0)
859  goto out;
860 
861  trunc->ct_size = m0_io_count(&trunc->ct_io_ivec);
862  M0_LOG(M0_DEBUG, "trunc count%"PRIu64" diff:%d\n",
863  trunc->ct_size, diff);
864  }
866 
867  item = &fop->f_item;
868  M0_LOG(M0_DEBUG, "item="ITEM_FMT" osr_xid=%"PRIu64,
870 out:
871  return M0_RC(rc);
872 }
873 
880 static void ioreq_fop_release(struct m0_ref *ref)
881 {
882  struct m0_fop *fop;
883  struct m0_io_fop *iofop;
884  struct ioreq_fop *reqfop;
885  struct m0_fop_cob_rw *rwfop;
886  struct m0_rpc_bulk *rbulk;
887  struct nw_xfer_request *xfer;
888  struct m0_rpc_machine *rmach;
889  struct m0_rpc_item *item;
890 
891  M0_ENTRY("ref %p", ref);
892  M0_PRE(ref != NULL);
893 
894  fop = M0_AMB(fop, ref, f_ref);
895  rmach = m0_fop_rpc_machine(fop);
896  iofop = M0_AMB(iofop, fop, if_fop);
897  reqfop = bob_of(iofop, struct ioreq_fop, irf_iofop, &iofop_bobtype);
898  rbulk = &iofop->if_rbulk;
899  xfer = reqfop->irf_tioreq->ti_nwxfer;
900  item = &fop->f_item;
901 
902  /*
903  * Release the net buffers if rpc bulk object is still dirty.
904  * And wait on channel till all net buffers are deleted from
905  * transfer machine.
906  */
907  m0_mutex_lock(&xfer->nxr_lock);
908  m0_mutex_lock(&rbulk->rb_mutex);
909  if (!m0_tlist_is_empty(&rpcbulk_tl, &rbulk->rb_buflist)) {
910  struct m0_clink clink;
911  size_t buf_nr;
912  size_t non_queued_buf_nr;
913 
915  m0_clink_add(&rbulk->rb_chan, &clink);
916  buf_nr = rpcbulk_tlist_length(&rbulk->rb_buflist);
917  non_queued_buf_nr = m0_rpc_bulk_store_del_unqueued(rbulk);
918  m0_mutex_unlock(&rbulk->rb_mutex);
919 
920  m0_rpc_bulk_store_del(rbulk);
921  M0_LOG(M0_DEBUG, "fop %p, %p[%u], bulk %p, buf_nr %llu, "
922  "non_queued_buf_nr %llu", &iofop->if_fop, item,
923  item->ri_type->rit_opcode, rbulk,
924  (unsigned long long)buf_nr,
925  (unsigned long long)non_queued_buf_nr);
926 
927  if (m0_is_read_fop(&iofop->if_fop))
929  non_queued_buf_nr);
931  /* rio_replied() is not invoked for this item. */
933  m0_mutex_unlock(&xfer->nxr_lock);
934 
935  /*
936  * If there were some queued net bufs which had to be deleted,
937  * then it is required to wait for their callbacks.
938  */
939  if (buf_nr > non_queued_buf_nr) {
940  /*
941  * rpc_machine_lock may be needed from nlx_tm_ev_worker
942  * thread, which is going to wake us up. So we should
943  * release it to avoid deadlock.
944  */
945  m0_rpc_machine_unlock(rmach);
947  m0_rpc_machine_lock(rmach);
948  }
951  } else {
952  m0_mutex_unlock(&rbulk->rb_mutex);
953  m0_mutex_unlock(&xfer->nxr_lock);
954  }
956 
957  rwfop = io_rw_get(&iofop->if_fop);
958  M0_ASSERT(rwfop != NULL);
959  ioreq_fop_fini(reqfop);
960  /* see ioreq_fop_fini(). */
961  ioreq_fop_bob_fini(reqfop);
962  m0_io_fop_fini(iofop);
963  m0_free(reqfop);
964 
965  M0_LEAVE();
966 }
967 
971 M0_INTERNAL int ioreq_fop_init(struct ioreq_fop *fop,
972  struct target_ioreq *ti,
973  enum page_attr pattr)
974 {
975  int rc;
976  struct m0_fop_type *fop_type;
977  struct m0_op_io *ioo;
978  struct m0_fop_cob_rw *rwfop;
979 
980  M0_ENTRY("ioreq_fop %p, target_ioreq %p", fop, ti);
981 
982  M0_PRE(fop != NULL);
983  M0_PRE(ti != NULL);
984  M0_PRE(M0_IN(pattr, (PA_DATA, PA_PARITY)));
985 
986  ioo = bob_of(ti->ti_nwxfer, struct m0_op_io, ioo_nwxfer,
987  &ioo_bobtype);
988  M0_ASSERT(M0_IN(ioreq_sm_state(ioo),
991 
992  ioreq_fop_bob_init(fop);
993  iofops_tlink_init(fop);
994  fop->irf_pattr = pattr;
995  fop->irf_tioreq = ti;
996  fop->irf_reply_rc = 0;
997  fop->irf_ast.sa_cb = io_bottom_half;
998  fop->irf_ast.sa_mach = &ioo->ioo_sm;
999 
1000  fop_type = M0_IN(ioreq_sm_state(ioo),
1003  rc = m0_io_fop_init(&fop->irf_iofop, &ioo->ioo_oo.oo_fid,
1005  if (rc == 0) {
1006  /*
1007  * Currently m0_io_fop_init sets CROW flag for a READ op.
1008  * Diable the flag to force ioservice to return -ENOENT for
1009  * non-existing objects. (Temporary solution)
1010  */
1011  rwfop = io_rw_get(&fop->irf_iofop.if_fop);
1012  if (ioo->ioo_oo.oo_oc.oc_op.op_code == M0_OC_READ) {
1013  rwfop->crw_flags &= ~M0_IO_FLAG_CROW;
1014  }
1015 
1016  /*
1017  * Changes ri_ops of rpc item so as to execute client's own
1018  * callback on receiving a reply.
1019  */
1020  fop->irf_iofop.if_fop.f_item.ri_ops = &item_ops;
1021  }
1022 
1024  return M0_RC(rc);
1025 }
1026 
1030 M0_INTERNAL void ioreq_fop_fini(struct ioreq_fop *fop)
1031 {
1032  M0_ENTRY("ioreq_fop %p", fop);
1033 
1035 
1036  /*
1037  * IO fop is finalized (m0_io_fop_fini()) through rpc sessions code
1038  * using m0_rpc_item::m0_rpc_item_ops::rio_free().
1039  * see m0_io_item_free().
1040  */
1041 
1042  iofops_tlink_fini(fop);
1043 
1044  /*
1045  * ioreq_bob_fini() is not done here so that struct ioreq_fop
1046  * can be retrieved from struct m0_rpc_item using bob_of() and
1047  * magic numbers can be checked.
1048  */
1049 
1050  fop->irf_tioreq = NULL;
1051  fop->irf_ast.sa_cb = NULL;
1052  fop->irf_ast.sa_mach = NULL;
1053 
1054  M0_LEAVE();
1055 }
1056 
1057 #undef M0_TRACE_SUBSYSTEM
1058 
1059 /*
1060  * Local variables:
1061  * c-indentation-style: "K&R"
1062 
1063  * c-basic-offset: 8
1064  * tab-width: 8
1065  * fill-column: 80
1066  * scroll-step: 1
1067  * End:
1068  */
1069 /*
1070  * vim: tabstop=8 shiftwidth=8 noexpandtab textwidth=80 nowrap
1071  */
struct m0_file ioo_flock
static void m0_atomic64_inc(struct m0_atomic64 *a)
uint32_t b_nlink
Definition: md_fops.h:76
M0_INTERNAL void m0_ivec_cursor_init(struct m0_ivec_cursor *cur, const struct m0_indexvec *ivec)
Definition: vec.c:707
M0_INTERNAL int m0_rpc_post(struct m0_rpc_item *item)
Definition: rpc.c:63
uint32_t rit_opcode
Definition: item.h:474
M0_INTERNAL void m0_chan_wait(struct m0_clink *link)
Definition: chan.c:336
uint64_t c_flags
Definition: io_fops.h:475
m0_time_t ri_resend_interval
Definition: item.h:144
uint64_t rwr_count
Definition: io_fops.h:322
static void io_bottom_half(struct m0_sm_group *grp, struct m0_sm_ast *ast)
Definition: io_req_fop.c:246
#define M0_PRE(cond)
static void application_attribute_copy(struct m0_indexvec *rep_ivec, struct target_ioreq *ti, struct m0_op_io *ioo, struct m0_buf *buf)
Definition: io_req_fop.c:134
M0_INTERNAL void m0_mutex_unlock(struct m0_mutex *mutex)
Definition: mutex.c:66
M0_INTERNAL struct m0_fop_cob_common * m0_cobfop_common_get(struct m0_fop *fop)
Definition: io_fops.c:990
M0_INTERNAL int ioreq_fop_dgmode_read(struct ioreq_fop *irfop)
Definition: io_req_fop.c:739
struct m0_fop crf_fop
static uint32_t seg_nr
Definition: net.c:119
static int(* diff[M0_PARITY_CAL_ALGO_NR])(struct m0_parity_math *math, struct m0_buf *old, struct m0_buf *new, struct m0_buf *parity, uint32_t index)
Definition: parity_math.c:290
uint32_t b_valid
Definition: md_fops.h:71
int32_t gr_rc
Definition: wire.h:63
#define NULL
Definition: misc.h:38
M0_INTERNAL void m0_clink_init(struct m0_clink *link, m0_chan_cb_t cb)
Definition: chan.c:201
static struct m0_bufvec dst
Definition: xform.c:61
map
Definition: processor.c:112
struct m0_atomic64 nxr_rdbulk_nr
M0_INTERNAL bool m0__obj_is_di_enabled(struct m0_op_io *ioo)
Definition: io.c:660
M0_INTERNAL int m0_rpc_bulk_store(struct m0_rpc_bulk *rbulk, const struct m0_rpc_conn *conn, struct m0_net_buf_desc_data *to_desc, const struct m0_net_buffer_callbacks *bulk_cb)
Definition: bulk.c:520
M0_INTERNAL void m0_clink_del_lock(struct m0_clink *link)
Definition: chan.c:293
m0_bindex_t * z_index
Definition: vec.h:516
#define ergo(a, b)
Definition: misc.h:293
uint32_t rwr_repair_done
Definition: io_fops.h:331
#define LOGMSG
void(* sa_cb)(struct m0_sm_group *grp, struct m0_sm_ast *)
Definition: sm.h:506
bool m0_rpc_item_is_generic_reply_fop(const struct m0_rpc_item *item)
Definition: fom_generic.c:75
M0_TL_DESCR_DEFINE(iofops, "List of IO fops", M0_INTERNAL, struct ioreq_fop, irf_link, irf_magic, M0_IOFOP_MAGIC, M0_TIOREQ_MAGIC)
M0_INTERNAL bool m0__is_oostore(struct m0_client *instance)
Definition: client.c:255
static struct m0_sm_group * grp
Definition: bytecount.c:38
M0_INTERNAL void m0_fop_init(struct m0_fop *fop, struct m0_fop_type *fopt, void *data, void(*fop_release)(struct m0_ref *))
Definition: fop.c:78
#define M0_LOG(level,...)
Definition: trace.h:167
M0_LEAVE()
const struct m0_op_io_ops * ioo_ops
struct m0_sm_ast crf_ast
struct m0_io_fop irf_iofop
Definition: pg.h:866
M0_INTERNAL void m0_sm_ast_post(struct m0_sm_group *grp, struct m0_sm_ast *ast)
Definition: sm.c:135
static void m0_atomic64_sub(struct m0_atomic64 *a, int64_t num)
struct m0_sm_group * oo_sm_grp
struct m0_vec ov_vec
Definition: vec.h:147
struct m0_chan rb_chan
Definition: bulk.h:258
static const struct m0_rpc_item_ops cc_item_ops
Definition: io_req_fop.c:561
struct m0_rpc_bulk if_rbulk
Definition: io_fops.h:175
struct m0_sm ri_sm
Definition: item.h:181
static void m0_sm_io_done_ast(struct m0_sm_group *grp, struct m0_sm_ast *ast)
Definition: io_req_fop.c:566
enum target_ioreq_type ti_req_type
struct m0_op oc_op
int32_t ri_error
Definition: item.h:161
struct m0_net_buf_desc_data * id_descs
Definition: io_fops.h:311
void * m0_fop_data(const struct m0_fop *fop)
Definition: fop.c:219
uint32_t c_cob_type
Definition: io_fops.h:472
M0_INTERNAL void m0_indexvec_free(struct m0_indexvec *ivec)
Definition: vec.c:553
struct m0_indexvec ti_trunc_ivec
Definition: pg.h:799
uint64_t m0_bindex_t
Definition: types.h:80
struct m0_fid c_cobfid
Definition: io_fops.h:463
Definition: sm.h:504
static int void * buf
Definition: dir.c:1019
static struct m0_rpc_session session
Definition: formation2.c:38
M0_INTERNAL void m0_mutex_lock(struct m0_mutex *mutex)
Definition: mutex.c:49
uint32_t ci_nr
Definition: vec.h:635
M0_ADDB2_ADD(M0_AVI_FS_CREATE, new_fid.f_container, new_fid.f_key, mode, rc)
m0_bcount_t nbe_length
Definition: net.h:1226
struct m0_net_buffer * nbe_buffer
Definition: net.h:1194
M0_INTERNAL bool m0_is_read_rep(const struct m0_fop *fop)
Definition: io_fops.c:933
static struct m0_rpc_item * item
Definition: item.c:56
M0_INTERNAL uint64_t m0__obj_lid(struct m0_obj *obj)
Definition: obj.c:126
Definition: sock.c:887
#define ITEM_ARG(item)
Definition: item.h:618
M0_INTERNAL bool m0_tlist_is_empty(const struct m0_tl_descr *d, const struct m0_tl *list)
Definition: tlist.c:96
struct m0_sm ioo_sm
#define m0_tl_endfor
Definition: tlist.h:700
struct m0_vec iv_vec
Definition: vec.h:139
return M0_RC(rc)
op
Definition: libdemo.c:64
unsigned int op_code
Definition: client.h:656
static uint32_t unit_size
Definition: layout.c:53
#define M0_ENTRY(...)
Definition: trace.h:170
Definition: buf.h:37
static struct m0_sm_ast ast[NR]
Definition: locality.c:44
uint64_t osr_xid
Definition: onwire.h:105
void(* iro_iosm_handle_executed)(struct m0_sm_group *grp, struct m0_sm_ast *ast)
Definition: pg.h:622
m0_bindex_t * iv_index
Definition: vec.h:141
int32_t m0_rpc_item_generic_reply_rc(const struct m0_rpc_item *reply)
Definition: fom_generic.c:81
int m0_obj_layout_id_to_unit_size(uint64_t layout_id)
Definition: obj.c:851
void m0_fop_put0_lock(struct m0_fop *fop)
Definition: fop.c:212
int i
Definition: dir.c:1033
const struct m0_bob_type ioo_bobtype
Definition: io_req.c:153
#define PRIu64
Definition: types.h:58
static void ioreq_fop_release(struct m0_ref *ref)
Definition: io_req_fop.c:880
Definition: client.h:647
int32_t nbe_status
Definition: net.h:1218
M0_INTERNAL bool ioreq_fop_invariant(const struct ioreq_fop *fop)
Definition: io_req_fop.c:62
struct nw_xfer_request ioo_nwxfer
struct m0_rpc_machine * m0_fop_rpc_machine(const struct m0_fop *fop)
Definition: fop.c:359
uint64_t ti_parbytes
return M0_ERR(-EOPNOTSUPP)
M0_INTERNAL const char * m0_fop_name(const struct m0_fop *fop)
Definition: fop.c:54
struct m0_op_obj ioo_oo
void * sa_datum
Definition: sm.h:508
M0_INTERNAL void m0_rpc_machine_unlock(struct m0_rpc_machine *machine)
Definition: rpc_machine.c:558
M0_INTERNAL struct m0_fop_cob_rw_reply * io_rw_rep_get(struct m0_fop *fop)
Definition: io_fops.c:1056
struct m0_fop if_fop
Definition: io_fops.h:172
M0_INTERNAL void m0_rpc_bulk_default_cb(const struct m0_net_buffer_event *evt)
Definition: bulk.c:140
Definition: trace.h:482
M0_INTERNAL struct m0_client * m0__op_instance(const struct m0_op *op)
Definition: client.c:236
Definition: cnt.h:36
const struct m0_net_buffer_callbacks client__buf_bulk_cb
Definition: io_req_fop.c:654
enum sns_repair_state ioo_sns_state
struct m0_indexvec ioo_ext
#define M0_AMB(obj, ptr, field)
Definition: misc.h:320
Definition: refs.h:34
int irf_reply_rc
Definition: pg.h:863
struct m0_io_descs crw_desc
Definition: io_fops.h:398
#define M0_ASSERT(cond)
struct m0_fid ioo_pver
struct m0_rpc_item_header2 ri_header
Definition: item.h:193
void m0_sm_state_set(struct m0_sm *mach, int state)
Definition: sm.c:478
struct m0_rpc_machine * m0_fop_session_machine(const struct m0_rpc_session *s)
Definition: fop.c:452
uint32_t c_cob_idx
Definition: io_fops.h:469
M0_INTERNAL bool m0_is_io_fop_rep(const struct m0_fop *fop)
Definition: io_fops.c:945
static struct m0_fop reply_fop
Definition: fsync.c:64
#define bob_of(ptr, type, field, bt)
Definition: bob.h:140
static void m0_atomic64_dec(struct m0_atomic64 *a)
struct m0_atomic64 nxr_ccfop_nr
struct m0_sm_ast ioo_done_ast
M0_INTERNAL int m0_indexvec_wire2mem(struct m0_io_indexvec *wire_ivec, int max_frags_nr, uint32_t bshift, struct m0_indexvec *mem_ivec)
Definition: vec.c:1058
#define ITEM_FMT
Definition: item.h:617
uint64_t pi_grpid
void * nb_app_private
Definition: net.h:1477
struct m0_obj * ioo_obj
M0_INTERNAL struct m0_obj_attr * m0_io_attr(struct m0_op_io *ioo)
Definition: utils.c:302
uint64_t b_lid
Definition: md_fops.h:85
struct m0_fop * m0_fop_get(struct m0_fop *fop)
Definition: fop.c:161
const struct m0_rpc_item_type * ri_type
Definition: item.h:200
struct m0_fid c_gobfid
Definition: io_fops.h:458
struct m0_rpc_item * ri_reply
Definition: item.h:163
struct m0_fop_mod_rep rwr_mod_rep
Definition: io_fops.h:337
struct m0_sm_group * sm_grp
Definition: sm.h:321
M0_INTERNAL uint32_t m0_fid_cob_device_id(const struct m0_fid *cob_fid)
Definition: fid_convert.c:81
struct m0_buf rwr_di_data_cksum
Definition: io_fops.h:340
struct m0_fid b_pver
Definition: md_fops.h:88
uint64_t ri_nr_sent_max
Definition: item.h:146
#define M0_POST(cond)
struct m0_0vec bb_zerovec
Definition: bulk.h:179
struct m0_fid oo_fid
struct m0_sm_ast ioo_ast
uint32_t v_nr
Definition: vec.h:51
struct m0_sm_ast irf_ast
Definition: pg.h:872
m0_net_buffer_cb_proc_t nbc_cb[M0_NET_QT_NR]
Definition: net.h:1272
M0_INTERNAL int ioreq_fop_async_submit(struct m0_io_fop *iofop, struct m0_rpc_session *session)
Definition: io_req_fop.c:666
M0_INTERNAL int m0_fop_data_alloc(struct m0_fop *fop)
Definition: fop.c:70
m0_bcount_t * v_count
Definition: vec.h:53
M0_INTERNAL void m0_fop_fini(struct m0_fop *fop)
Definition: fop.c:135
struct m0_rpc_session * ti_session
static struct m0_clink clink[RDWR_REQUEST_MAX]
M0_INTERNAL bool m0_ivec_cursor_move(struct m0_ivec_cursor *cur, m0_bcount_t count)
Definition: vec.c:718
struct m0_op_common oo_oc
static void ioreq_cc_bottom_half(struct m0_sm_group *grp, struct m0_sm_ast *ast)
Definition: io_req_fop.c:452
struct m0_io_indexvec ct_io_ivec
Definition: io_fops.h:507
#define FID_P(f)
Definition: fid.h:77
M0_INTERNAL bool addr_is_network_aligned(void *addr)
Definition: utils.c:29
uint64_t rb_id
Definition: bulk.h:267
M0_INTERNAL struct m0_op * m0__ioo_to_op(struct m0_op_io *ioo)
Definition: client.c:249
struct m0_bob_type iofop_bobtype
Definition: io_req_fop.c:47
static void ioreq_cc_rpc_item_cb(struct m0_rpc_item *item)
Definition: io_req_fop.c:526
struct m0_bufvec z_bvec
Definition: vec.h:514
static int64_t m0_atomic64_get(const struct m0_atomic64 *a)
void(* rio_replied)(struct m0_rpc_item *item)
Definition: item.h:300
int32_t ioo_rc
M0_INTERNAL uint32_t m0_indexvec_pack(struct m0_indexvec *iv)
Definition: vec.c:521
struct m0_fop_type m0_fop_cob_readv_fopt
Definition: io_fops.c:71
M0_INTERNAL size_t m0_rpc_bulk_buf_length(struct m0_rpc_bulk *rbulk)
Definition: bulk.c:550
uint64_t ti_databytes
M0_INTERNAL size_t m0_rpc_bulk_store_del_unqueued(struct m0_rpc_bulk *rbulk)
Definition: bulk.c:190
M0_INTERNAL int ioreq_cc_fop_init(struct target_ioreq *ti)
Definition: io_req_fop.c:804
struct target_ioreq * irf_tioreq
Definition: pg.h:881
struct m0_ref f_ref
Definition: fop.h:81
M0_INTERNAL int m0_indexvec_mem2wire(struct m0_indexvec *mem_ivec, int max_frags_nr, uint32_t bshift, struct m0_io_indexvec *wire_ivec)
Definition: vec.c:1087
M0_INTERNAL void m0_rpc_machine_lock(struct m0_rpc_machine *machine)
Definition: rpc_machine.c:551
struct m0_fid ti_fid
static void ioreq_pgiomap_find(struct m0_op_io *ioo, uint64_t grpid, uint64_t *cursor, struct pargrp_iomap **out)
Definition: io_req_fop.c:712
struct cc_req_fop ti_cc_fop
M0_INTERNAL void m0_clink_add(struct m0_chan *chan, struct m0_clink *link)
Definition: chan.c:228
const struct m0_rpc_item_ops * ri_ops
Definition: item.h:149
M0_INTERNAL bool m0__obj_is_cksum_validation_allowed(struct m0_op_io *ioo)
Definition: io.c:665
struct m0_mutex nxr_lock
struct m0_rpc_session * ri_session
Definition: item.h:147
M0_INTERNAL m0_bcount_t m0_io_count(const struct m0_io_indexvec *io_info)
Definition: vec.c:999
struct m0_fop_type m0_fop_cob_create_fopt
Definition: io_fops.c:75
struct m0_entity ob_entity
Definition: client.h:795
page_attr
M0_INTERNAL int ioreq_fop_init(struct ioreq_fop *fop, struct target_ioreq *ti, enum page_attr pattr)
Definition: io_req_fop.c:971
#define _0C(exp)
Definition: assert.h:311
M0_INTERNAL m0_bindex_t m0_ivec_cursor_index(const struct m0_ivec_cursor *cur)
Definition: vec.c:733
M0_INTERNAL void m0_clink_fini(struct m0_clink *link)
Definition: chan.c:208
m0_bcount_t rb_bytes
Definition: bulk.h:260
M0_INTERNAL bool m0_rpc_bulk_is_empty(struct m0_rpc_bulk *rbulk)
Definition: bulk.c:539
struct m0_atomic64 nxr_iofop_nr
static struct m0_fop * fop
Definition: item.c:57
M0_TL_DEFINE(iofops, M0_INTERNAL, struct ioreq_fop)
static struct m0 instance
Definition: main.c:78
struct m0_fop * m0_rpc_item_to_fop(const struct m0_rpc_item *item)
Definition: fop.c:345
static struct m0_be_seg * seg
Definition: btree.c:40
uint64_t ioo_iomap_nr
static uint32_t ioreq_sm_state(const struct io_request *req)
Definition: file.c:975
struct m0_fid c_pver
Definition: io_fops.h:466
struct m0_tl rb_buflist
Definition: bulk.h:256
M0_INTERNAL void m0_io_fop_fini(struct m0_io_fop *iofop)
Definition: io_fops.c:897
M0_INTERNAL struct m0_file * m0_client_fop_to_file(struct m0_fop *fop)
Definition: io_req_fop.c:88
M0_INTERNAL int m0_io_fop_init(struct m0_io_fop *iofop, const struct m0_fid *gfid, struct m0_fop_type *ftype, void(*fop_release)(struct m0_ref *))
Definition: io_fops.c:865
struct nw_xfer_request * ti_nwxfer
uint64_t ct_size
Definition: io_fops.h:505
#define out(...)
Definition: gen.c:41
M0_INTERNAL void m0_rpc_bulk_store_del(struct m0_rpc_bulk *rbulk)
Definition: bulk.c:215
Definition: file.h:81
uint64_t oa_layout_id
Definition: client.h:758
M0_INTERNAL bool m0_is_read_fop(const struct m0_fop *fop)
Definition: io_fops.c:916
static uint64_t pargrp_id_find(m0_bindex_t index, const struct io_request *req, const struct io_req_fop *ir_fop)
Definition: file.c:638
M0_INTERNAL struct m0_fop_cob_rw * io_rw_get(struct m0_fop *fop)
Definition: io_fops.c:1037
Definition: pg.h:859
M0_INTERNAL void ioreq_fop_fini(struct ioreq_fop *fop)
Definition: io_req_fop.c:1030
struct m0_fop_type m0_fop_cob_truncate_fopt
Definition: io_fops.c:77
struct m0_indexvec ti_goff_ivec
Definition: pg.h:820
struct m0_rpc_machine * ri_rmachine
Definition: item.h:160
static struct m0_dtm_oper_descr reply
Definition: transmit.c:94
struct m0_fop_type m0_fop_cob_writev_fopt
Definition: io_fops.c:72
M0_INTERNAL uint64_t m0_sm_id_get(const struct m0_sm *sm)
Definition: sm.c:1021
void sync_record_update(struct m0_reqh_service_ctx *service, struct m0_entity *ent, struct m0_op *op, struct m0_be_tx_remid *btr)
Definition: sync.c:788
#define m0_tl_for(name, head, obj)
Definition: tlist.h:695
enum page_attr irf_pattr
Definition: pg.h:869
void m0_free(void *data)
Definition: memory.c:146
static const struct m0_rpc_item_ops item_ops
Definition: io_req_fop.c:557
struct m0_rpc_item f_item
Definition: fop.h:84
struct m0_fop_cob c_body
Definition: io_fops.h:454
uint32_t sm_state
Definition: sm.h:307
struct m0_bufvec ioo_attr
struct m0_io_indexvec crw_ivec
Definition: io_fops.h:409
static void io_rpc_item_cb(struct m0_rpc_item *item)
Definition: io_req_fop.c:413
struct m0_pdclust_src_addr src
Definition: fd.c:108
int32_t rc
Definition: trigger_fop.h:47
struct m0_indexvec ti_ivec
Definition: pg.h:793
struct m0_rpc_conn * s_conn
Definition: session.h:312
struct m0_be_tx_remid fmr_remid
Definition: wire.h:80
fop_type
Definition: stats_ut_svc.c:51
Definition: fop.h:80
struct m0_mutex rb_mutex
Definition: bulk.h:251
struct pargrp_iomap ** ioo_iomaps
uint64_t crw_flags
Definition: io_fops.h:411
#define FID_F
Definition: fid.h:75
static bool should_ioreq_sm_complete(struct m0_op_io *ioo)
Definition: io_req_fop.c:71
struct m0_fop * rep_fop
Definition: dir.c:334
M0_INTERNAL void * m0_extent_vec_get_checksum_addr(void *cksum_buf_vec, m0_bindex_t off, void *ivec, m0_bindex_t unit_sz, m0_bcount_t cs_sz)
Definition: cksum_utils.c:107
static void ioreq_cc_fop_release(struct m0_ref *ref)
Definition: io_req_fop.c:794
static void client_passive_recv(const struct m0_net_buffer_event *evt)
Definition: io_req_fop.c:587
M0_INTERNAL struct m0_reqh_service_ctx * m0_reqh_service_ctx_from_session(struct m0_rpc_session *session)
M0_BOB_DEFINE(M0_INTERNAL, &iofop_bobtype, ioreq_fop)