Motr  M0
io_nw_xfer.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 
23 #include "motr/client.h"
24 #include "motr/client_internal.h"
25 #include "motr/addb.h"
26 #include "motr/pg.h"
27 #include "motr/io.h"
28 
29 #include "lib/memory.h" /* m0_alloc, m0_free */
30 #include "lib/errno.h" /* ENOMEM */
31 #include "lib/finject.h" /* M0_FI_ */
32 #include "lib/cksum_utils.h"
33 #include "fid/fid.h" /* m0_fid */
34 #include "rpc/rpclib.h" /* m0_rpc_ */
35 #include "lib/ext.h" /* struct m0_ext */
36 #include "lib/misc.h" /* m0_extent_vec_get_checksum_addr */
37 #include "fop/fom_generic.h" /* m0_rpc_item_generic_reply_rc */
38 #include "sns/parity_repair.h" /* m0_sns_repair_spare_map*/
39 #include "fd/fd.h" /* m0_fd_fwd_map m0_fd_bwd_map */
40 #include "motr/addb.h"
41 #include "rpc/item.h"
42 #include "rpc/rpc_internal.h"
43 
44 #define M0_TRACE_SUBSYSTEM M0_TRACE_SUBSYS_CLIENT
45 #include "lib/trace.h" /* M0_LOG */
46 
50 
54 
56 const struct m0_bob_type nwxfer_bobtype = {
57  .bt_name = "nw_xfer_request_bobtype",
58  .bt_magix_offset = offsetof(struct nw_xfer_request, nxr_magic),
59  .bt_magix = M0_NWREQ_MAGIC,
60  .bt_check = NULL,
61 };
62 
63 const struct m0_bob_type tioreq_bobtype = {
64  .bt_name = "target_ioreq",
65  .bt_magix_offset = offsetof(struct target_ioreq, ti_magic),
66  .bt_magix = M0_TIOREQ_MAGIC,
67  .bt_check = NULL,
68 };
69 
70 static void to_op_io_map(const struct m0_op *op,
71  struct m0_op_io *ioo)
72 {
73  uint64_t oid = m0_sm_id_get(&op->op_sm);
74  uint64_t ioid = m0_sm_id_get(&ioo->ioo_sm);
75 
76  if (ioo->ioo_addb2_mapped++ == 0)
78 }
79 
80 static void m0_op_io_to_rpc_map(const struct m0_op_io *ioo,
81  const struct m0_rpc_item *item)
82 {
83  uint64_t rid = m0_sm_id_get(&item->ri_sm);
84  uint64_t ioid = m0_sm_id_get(&ioo->ioo_sm);
85  M0_ADDB2_ADD(M0_AVI_IOO_TO_RPC, ioid, rid);
86 }
87 
98 static uint32_t io_di_size(struct m0_op_io *ioo)
99 {
100  uint32_t rc = 0;
101  const struct m0_fid *fid;
102  const struct m0_di_ops *di_ops;
103  struct m0_file *file;
104 
105  M0_PRE(ioo != NULL);
106 
107  #ifndef ENABLE_DATA_INTEGRITY
108  return M0_RC(rc);
109  #endif
110  /* Get di details (workaround!) by setting the dom be NULL*/
111  file = &ioo->ioo_flock;
112  fid = &ioo->ioo_oo.oo_fid;
114  di_ops = file->fi_di_ops;
115 
116  if (di_ops->do_out_shift(file) == 0)
117  return M0_RC(0);
118 
120 
121  return M0_RC(rc);
122 }
123 
126  uint32_t *row,
127  uint32_t *col)
128 {
129  uint64_t pg_id;
130  struct m0_pdclust_layout *play;
131 
132  M0_PRE(map != NULL);
133  M0_PRE(row != NULL);
134  M0_PRE(col != NULL);
135 
136  play = pdlayout_get(map->pi_ioo);
137 
138  pg_id = page_id(index, map->pi_ioo->ioo_obj);
139  *row = pg_id % rows_nr(play, map->pi_ioo->ioo_obj);
140  *col = pg_id / rows_nr(play, map->pi_ioo->ioo_obj);
141 }
142 
151 static int dgmode_rwvec_alloc_init(struct target_ioreq *ti)
152 {
153  int rc;
154  uint64_t cnt;
155  struct dgmode_rwvec *dg;
156  struct m0_pdclust_layout *play;
157  struct m0_op_io *ioo;
158 
159  M0_ENTRY();
160  M0_PRE(ti != NULL);
161  M0_PRE(ti->ti_dgvec == NULL);
162 
163  ioo = bob_of(ti->ti_nwxfer, struct m0_op_io, ioo_nwxfer,
164  &ioo_bobtype);
165 
166  M0_ALLOC_PTR(dg);
167  if (dg == NULL) {
168  rc = -ENOMEM;
169  goto failed;
170  }
171 
172  play = pdlayout_get(ioo);
173  dg->dr_tioreq = ti;
174 
175  cnt = page_nr(ioo->ioo_iomap_nr
176  * layout_unit_size(play)
177  * (layout_n(play) + layout_k(play)),
178  ioo->ioo_obj);
179  rc = m0_indexvec_alloc(&dg->dr_ivec, cnt);
180  if (rc != 0)
181  goto failed;
182 
183  M0_ALLOC_ARR(dg->dr_bufvec.ov_buf, cnt);
184  if (dg->dr_bufvec.ov_buf == NULL) {
185  rc = -ENOMEM;
186  goto failed;
187  }
188 
189  M0_ALLOC_ARR(dg->dr_bufvec.ov_vec.v_count, cnt);
190  if (dg->dr_bufvec.ov_vec.v_count == NULL) {
191  rc = -ENOMEM;
192  goto failed;
193  }
194 
196  if (dg->dr_auxbufvec.ov_buf == NULL) {
197  rc = -ENOMEM;
198  goto failed;
199  }
200 
202  if (dg->dr_auxbufvec.ov_vec.v_count == NULL) {
203  rc = -ENOMEM;
204  goto failed;
205  }
206 
208  if (dg->dr_pageattrs == NULL) {
209  rc = -ENOMEM;
210  goto failed;
211  }
212 
213  /*
214  * This value is incremented every time a new segment is added
215  * to this index vector.
216  */
217  dg->dr_ivec.iv_vec.v_nr = 0;
218 
219  ti->ti_dgvec = dg;
220  return M0_RC(0);
221 failed:
222  ti->ti_dgvec = NULL;
223  if (dg->dr_bufvec.ov_buf != NULL)
224  m0_free(dg->dr_bufvec.ov_buf);
225  if (dg->dr_bufvec.ov_vec.v_count != NULL)
226  m0_free(dg->dr_bufvec.ov_vec.v_count);
227  if (dg->dr_auxbufvec.ov_buf != NULL)
229  if (dg->dr_auxbufvec.ov_vec.v_count != NULL)
231  m0_free(dg);
232  return M0_ERR(rc);
233 }
234 
245 {
246  M0_ENTRY();
247 
248  M0_PRE(dg != NULL);
249 
250  dg->dr_tioreq = NULL;
251  /*
252  * Will need to go through array of parity groups to find out
253  * exact number of segments allocated for the index vector.
254  * Instead, a fixed number of segments is enough to avoid
255  * triggering the assert from m0_indexvec_free().
256  * The memory allocator knows the size of memory area held by
257  * dg->dr_ivec.iv_index and dg->dr_ivec.iv_vec.v_count.
258  */
259  if (dg->dr_ivec.iv_vec.v_nr == 0)
260  ++dg->dr_ivec.iv_vec.v_nr;
261 
263  m0_free(dg->dr_bufvec.ov_buf);
264  m0_free(dg->dr_bufvec.ov_vec.v_count);
267  m0_free(dg->dr_pageattrs);
268  m0_free(dg);
269 }
270 
279 static uint64_t tioreqs_hash_func(const struct m0_htable *htable, const void *k)
280 {
281  const uint64_t *key;
282  M0_PRE(htable != NULL);
283  M0_PRE(htable->h_bucket_nr > 0);
284  M0_PRE(k != NULL);
285 
286  key = (uint64_t *)k;
287 
288  return *key % htable->h_bucket_nr;
289 }
290 
299 static bool tioreq_key_eq(const void *key1, const void *key2)
300 {
301  const uint64_t *k1 = (uint64_t *)key1;
302  const uint64_t *k2 = (uint64_t *)key2;
303 
304  M0_PRE(k1 != NULL);
305  M0_PRE(k2 != NULL);
306 
307  return *k1 == *k2;
308 }
309 
310 M0_HT_DESCR_DEFINE(tioreqht, "Hash of target_ioreq objects", M0_INTERNAL,
311  struct target_ioreq, ti_link, ti_magic,
313  ti_fid.f_container, tioreqs_hash_func, tioreq_key_eq);
314 
315 M0_HT_DEFINE(tioreqht, M0_INTERNAL, struct target_ioreq, uint64_t);
316 
324 static bool target_ioreq_invariant(const struct target_ioreq *ti)
325 {
326  return M0_RC(ti != NULL &&
327  _0C(target_ioreq_bob_check(ti)) &&
328  _0C(ti->ti_session != NULL) &&
329  _0C(ti->ti_nwxfer != NULL) &&
330  _0C(ti->ti_bufvec.ov_buf != NULL) &&
331  _0C(ti->ti_auxbufvec.ov_buf != NULL) &&
332  _0C(m0_fid_is_valid(&ti->ti_fid)) &&
333  m0_tl_forall(iofops, iofop, &ti->ti_iofops,
334  ioreq_fop_invariant(iofop)));
335 }
336 
340 M0_INTERNAL bool nw_xfer_request_invariant(const struct nw_xfer_request *xfer)
341 {
342  return xfer != NULL &&
343  _0C(nw_xfer_request_bob_check(xfer)) &&
344  _0C(xfer->nxr_state < NXS_STATE_NR) &&
345 
347  xfer->nxr_rc == 0 && xfer->nxr_bytes == 0 &&
348  m0_atomic64_get(&xfer->nxr_iofop_nr) == 0)) &&
349 
350  _0C(ergo(xfer->nxr_state == NXS_INFLIGHT,
351  !tioreqht_htable_is_empty(&xfer->nxr_tioreqs_hash))) &&
352 
353  _0C(ergo(xfer->nxr_state == NXS_COMPLETE,
354  m0_atomic64_get(&xfer->nxr_iofop_nr) == 0 &&
355  m0_atomic64_get(&xfer->nxr_rdbulk_nr) == 0)) &&
356 
357  m0_htable_forall(tioreqht, tioreq, &xfer->nxr_tioreqs_hash,
358  target_ioreq_invariant(tioreq));
359 }
360 
365 {
366  struct m0_op_io *ioo;
367  unsigned int opcode;
368 
369  M0_ENTRY("target_ioreq %p", ti);
370 
372  M0_PRE(iofops_tlist_is_empty(&ti->ti_iofops));
373 
374  ioo = bob_of(ti->ti_nwxfer, struct m0_op_io,
377  target_ioreq_bob_fini(ti);
378  tioreqht_tlink_fini(ti);
379  iofops_tlist_fini(&ti->ti_iofops);
380  ti->ti_ops = NULL;
381  ti->ti_session = NULL;
382  ti->ti_nwxfer = NULL;
383 
384  /* Resets the number of segments in vector. */
385  if (ti->ti_ivec.iv_vec.v_nr == 0)
386  ti->ti_ivec.iv_vec.v_nr = ti->ti_bufvec.ov_vec.v_nr;
387 
389  if (opcode == M0_OC_FREE)
391  m0_free0(&ti->ti_bufvec.ov_buf);
392  m0_free0(&ti->ti_bufvec.ov_vec.v_count);
395 
396  /* For the write path the ti_attrbuf which is m0_buf will be freed by
397  * RPC layer, so no need to explicitly free it m0_buf_free(&ti->ti_attrbuf);
398  * TODO: Further validate this by checking if memory is actually freed.
399  */
400 
401  m0_free0(&ti->ti_pageattrs);
402 
403  if (ti->ti_dgvec != NULL)
405  if (ti->ti_cc_fop_inited) {
406  struct m0_rpc_item *item = &ti->ti_cc_fop.crf_fop.f_item;
407  M0_LOG(M0_DEBUG, "item="ITEM_FMT" osr_xid=%"PRIu64,
409  ti->ti_cc_fop_inited = false;
411  }
412 
413  if ( opcode == M0_OC_WRITE ) {
414  m0_buf_free( &ti->ti_attrbuf );
415  m0_free( (void *)ti->ti_cksum_seg_b_nob );
416  } else if ( opcode == M0_OC_READ )
418 
419  m0_free(ti);
420  M0_LEAVE();
421 }
422 
424 {
425  struct ioreq_fop *irfop;
426 
427  m0_tl_for (iofops, &ti->ti_iofops, irfop) {
429  } m0_tl_endfor;
430 }
431 
436  struct m0_fid *fid)
437 {
438  struct target_ioreq *ti;
439 
440  M0_ENTRY("nw_xfer_request %p, fid %p", xfer, fid);
441 
443  M0_PRE(fid != NULL);
444 
445  ti = tioreqht_htable_lookup(&xfer->nxr_tioreqs_hash, &fid->f_container);
446  /* WARN: Searches only with the container but compares the whole fid. */
447  M0_ASSERT(ergo(ti != NULL, m0_fid_cmp(fid, &ti->ti_fid) == 0));
448 
449  M0_LEAVE();
450  return ti;
451 }
452 
453 /*
454  * For partially parity groups only data units present in the truncate range
455  * will be truncated. For fully spanned parity group both data and parity
456  * units will be truncated.
457  */
458 static bool should_unit_be_truncated(bool partial,
459  enum m0_pdclust_unit_type unit_type,
460  enum page_attr flags)
461 {
462  return (!partial || unit_type == M0_PUT_DATA) &&
463  (flags & PA_WRITE);
464 }
465 
479 static void target_ioreq_seg_add(struct target_ioreq *ti,
480  const struct m0_pdclust_src_addr *src,
481  const struct m0_pdclust_tgt_addr *tgt,
482  m0_bindex_t gob_offset,
484  struct pargrp_iomap *map)
485 {
486  uint32_t seg;
487  uint32_t tseg;
488  m0_bindex_t toff;
489  m0_bindex_t goff;
490  m0_bindex_t pgstart;
491  m0_bindex_t pgend;
492  m0_bindex_t unit_sz;
493  struct data_buf *buf;
494  struct m0_op_io *ioo;
495  struct m0_pdclust_layout *play;
496  uint64_t frame;
497  uint64_t unit;
498  struct m0_indexvec *ivec;
499  struct m0_indexvec *trunc_ivec = NULL;
500  struct m0_indexvec *goff_ivec = NULL;
501  struct m0_bufvec *bvec;
502  struct m0_bufvec *auxbvec;
503  enum m0_pdclust_unit_type unit_type;
504  enum page_attr *pattr;
505  uint64_t cnt;
506  unsigned int opcode;
507  m0_bcount_t grp_size;
508  uint64_t page_size;
509  struct m0_ext goff_span_ext;
510  bool is_goff_in_range;
511  void *dst_attr = NULL;
512  uint32_t b_nob;
513 
514  M0_PRE(tgt != NULL);
515  frame = tgt->ta_frame;
516  M0_PRE(src != NULL);
517  unit = src->sa_unit;
518  M0_ENTRY("tio req %p, gob_offset %" PRIu64 ", count %"PRIu64
519  " frame %" PRIu64 " unit %"PRIu64,
520  ti, gob_offset, count, frame, unit);
521 
522  M0_PRE(ti != NULL);
523  M0_PRE(map != NULL);
525 
526  ti->ti_goff = gob_offset;
527 
528  ioo = bob_of(ti->ti_nwxfer, struct m0_op_io,
529  ioo_nwxfer, &ioo_bobtype);
531  play = pdlayout_get(ioo);
532 
533  page_size = m0__page_size(ioo);
534  grp_size = data_size(play) * map->pi_grpid;
535  unit_type = m0_pdclust_unit_classify(play, unit);
536  M0_ASSERT(M0_IN(unit_type, (M0_PUT_DATA, M0_PUT_PARITY)));
537 
538  unit_sz = layout_unit_size(play);
539  toff = target_offset(frame, play, gob_offset);
540  pgstart = toff;
541  goff = unit_type == M0_PUT_DATA ? gob_offset : 0;
542 
544  "[gpos %" PRIu64 ", count %" PRIu64 "] [%" PRIu64 ", %" PRIu64 "]"
545  "->[%" PRIu64 ",%" PRIu64 "] %c", gob_offset, count, src->sa_group,
547  unit_type == M0_PUT_DATA ? 'D' : 'P');
548 
549  /* Use ti_dgvec as long as it is dgmode-read/write. */
550  if (ioreq_sm_state(ioo) == IRS_DEGRADED_READING ||
553  M0_ASSERT(ti->ti_dgvec != NULL);
554  ivec = &ti->ti_dgvec->dr_ivec;
555  bvec = &ti->ti_dgvec->dr_bufvec;
556  auxbvec = &ti->ti_dgvec->dr_auxbufvec;
557  pattr = ti->ti_dgvec->dr_pageattrs;
558  cnt = page_nr(ioo->ioo_iomap_nr * layout_unit_size(play) *
559  (layout_n(play) + layout_k(play)), ioo->ioo_obj);
560  M0_LOG(M0_DEBUG, "map_nr=%" PRIu64 " req state=%u cnt=%"PRIu64,
561  ioo->ioo_iomap_nr, ioreq_sm_state(ioo), cnt);
562  } else {
563  ivec = &ti->ti_ivec;
564  trunc_ivec = &ti->ti_trunc_ivec;
565  bvec = &ti->ti_bufvec;
566  auxbvec = &ti->ti_auxbufvec;
567  dst_attr = ti->ti_attrbuf.b_addr;
568  goff_ivec = &ti->ti_goff_ivec;
569  pattr = ti->ti_pageattrs;
570  cnt = page_nr(ioo->ioo_iomap_nr * layout_unit_size(play) *
571  layout_n(play), ioo->ioo_obj);
572  M0_LOG(M0_DEBUG, "map_nr=%" PRIu64 " req state=%u cnt=%"PRIu64,
573  ioo->ioo_iomap_nr, ioreq_sm_state(ioo), cnt);
574  }
575 
576  while (pgstart < toff + count) {
577  pgend = min64u(pgstart + page_size,
578  toff + count);
579  seg = SEG_NR(ivec);
580 
581  /* Save COB offsets in ti_ivec */
582  INDEX(ivec, seg) = pgstart;
583  COUNT(ivec, seg) = pgend - pgstart;
584 
585  if (unit_type == M0_PUT_DATA) {
586  uint32_t row = map->pi_max_row;
587  uint32_t col = map->pi_max_col;
588 
589  page_pos_get(map, goff, grp_size, &row, &col);
590  M0_ASSERT(row <= map->pi_max_row);
591  M0_ASSERT(col <= map->pi_max_col);
592  buf = map->pi_databufs[row][col];
593 
594  pattr[seg] |= PA_DATA;
595  M0_LOG(M0_DEBUG, "Data seg %u added", seg);
596  } else {
597  buf = map->pi_paritybufs[page_id(goff, ioo->ioo_obj)]
598  [unit - layout_n(play)];
599  pattr[seg] |= PA_PARITY;
600  M0_LOG(M0_DEBUG, "Parity seg %u added", seg);
601  }
602  buf->db_tioreq = ti;
603  if (buf->db_flags & PA_WRITE)
605 
606  if (opcode == M0_OC_FREE &&
607  should_unit_be_truncated(map->pi_trunc_partial,
608  unit_type, buf->db_flags)) {
609  tseg = SEG_NR(trunc_ivec);
610  INDEX(trunc_ivec, tseg) = pgstart;
611  COUNT(trunc_ivec, tseg) = pgend - pgstart;
612  ++trunc_ivec->iv_vec.v_nr;
613  M0_LOG(M0_DEBUG, "Seg id %d [%" PRIu64 ", %" PRIu64 "]"
614  "added to target ioreq with "FID_F,
615  tseg, INDEX(trunc_ivec, tseg),
616  COUNT(trunc_ivec, tseg),
617  FID_P(&ti->ti_fid));
618  }
619 
620  if (opcode == M0_OC_FREE && !map->pi_trunc_partial)
621  pattr[seg] |= PA_TRUNC;
622 
623  M0_ASSERT(addr_is_network_aligned(buf->db_buf.b_addr));
624  bvec->ov_buf[seg] = buf->db_buf.b_addr;
625  bvec->ov_vec.v_count[seg] = COUNT(ivec, seg);
626  if (map->pi_rtype == PIR_READOLD &&
627  unit_type == M0_PUT_DATA) {
628  M0_ASSERT(buf->db_auxbuf.b_addr != NULL);
629  auxbvec->ov_buf[seg] = buf->db_auxbuf.b_addr;
630  auxbvec->ov_vec.v_count[seg] = page_size;
631  }
632  pattr[seg] |= buf->db_flags;
633  M0_LOG(M0_DEBUG, "pageaddr=%p, auxpage=%p,"
634  " index=%6" PRIu64 ", size=%4"PRIu64
635  " grpid=%3" PRIu64 " flags=%4x for "FID_F,
636  bvec->ov_buf[seg], auxbvec->ov_buf[seg],
637  INDEX(ivec, seg), COUNT(ivec, seg),
638  map->pi_grpid, pattr[seg],
639  FID_P(&ti->ti_fid));
640  M0_LOG(M0_DEBUG, "Seg id %d [%" PRIu64 ", %"PRIu64
641  "] added to target_ioreq with "FID_F
642  " with flags 0x%x: ", seg,
643  INDEX(ivec, seg), COUNT(ivec, seg),
644  FID_P(&ti->ti_fid), pattr[seg]);
646  goff_span_ext.e_start = ioo->ioo_ext.iv_index[0];
647  goff_span_ext.e_end = ioo->ioo_ext.iv_index[ioo->ioo_ext.iv_vec.v_nr - 1]
648  + ioo->ioo_ext.iv_vec.v_count[ioo->ioo_ext.iv_vec.v_nr - 1];
649  /* If ioo_attr struct is not allocated then skip checksum computation */
650  is_goff_in_range = m0_ext_is_in(&goff_span_ext, goff) &&
652  if (dst_attr != NULL && unit_type == M0_PUT_DATA &&
653  opcode == M0_OC_WRITE && is_goff_in_range) {
654  void *src_attr;
655  m0_bcount_t cs_sz;
656 
657  cs_sz = ioo->ioo_attr.ov_vec.v_count[0];
658  /* This we can do as page_size <= unit_sz */
660  COUNT(ivec, seg),
661  unit_sz, cs_sz );
662  if (b_nob) {
663  /* This function will get checksum address from application provided
664  * buffer. Checksum is corresponding to on gob offset and ioo_ext and
665  * this function helps to locate exact address for the above.
666  * Note: ioo_ext is span of offset for which ioo_attr is provided and
667  * goff should lie within that span
668  */
669  src_attr = m0_extent_vec_get_checksum_addr( &ioo->ioo_attr, goff,
670  &ioo->ioo_ext, unit_sz, cs_sz);
671  M0_ASSERT(b_nob == cs_sz);
672  memcpy((char *)dst_attr + ti->ti_cksum_copied, src_attr, b_nob);
673 
674  /* Track checksum copied as we need to do overallocation for
675  * ti_attrbuf for traget and while sending FOP we use this
676  * counter to send the actual checksum size.
677  */
678  ti->ti_cksum_copied += b_nob;
679 
680  /* Make sure we are not exceeding the allocated buffer size */
682  }
683 
685  } else if (goff_ivec != NULL && unit_type == M0_PUT_DATA &&
686  opcode == M0_OC_READ && is_goff_in_range) {
695  INDEX(goff_ivec, seg) = goff;
696  COUNT(goff_ivec, seg) = COUNT(ivec, seg);
697  goff_ivec->iv_vec.v_nr++;
698  }
699 
700  goff += COUNT(ivec, seg);
701  ++ivec->iv_vec.v_nr;
702  pgstart = pgend;
703  }
704  M0_LEAVE();
705 }
706 
710 M0_INTERNAL struct m0_fid target_fid(struct m0_op_io *ioo,
711  struct m0_pdclust_tgt_addr *tgt)
712 {
713  struct m0_fid fid;
714 
716  &ioo->ioo_oo.oo_fid, tgt->ta_obj,
717  &fid);
718  return fid;
719 }
720 
730 static inline struct m0_rpc_session *
731 target_session(struct m0_op_io *ioo, struct m0_fid tfid)
732 {
733  struct m0_op *op;
734  struct m0_pool_version *pv;
735  struct m0_client *instance;
736 
737  M0_PRE(ioo != NULL);
738  op = &ioo->ioo_oo.oo_oc.oc_op;
740  pv = m0_pool_version_find(&instance->m0c_pools_common, &ioo->ioo_pver);
741  M0_ASSERT(pv != NULL);
742 
744  pv, m0_fid_cob_device_id(&tfid));
745 }
746 
760 static int bulk_buffer_add(struct ioreq_fop *irfop,
761  struct m0_net_domain *dom,
762  struct m0_rpc_bulk_buf **rbuf,
763  uint32_t *delta,
764  uint32_t maxsize)
765 {
766  int rc;
767  int seg_nr;
768  struct m0_op_io *ioo;
769  struct m0_indexvec *ivec;
770 
771  M0_PRE(irfop != NULL);
772  M0_PRE(dom != NULL);
773  M0_PRE(rbuf != NULL);
774  M0_PRE(delta != NULL);
775  M0_PRE(maxsize > 0);
776  M0_ENTRY("ioreq_fop %p net_domain %p delta_size %d",
777  irfop, dom, *delta);
778 
779  ioo = bob_of(irfop->irf_tioreq->ti_nwxfer, struct m0_op_io,
780  ioo_nwxfer, &ioo_bobtype);
781  ivec = M0_IN(ioreq_sm_state(ioo), (IRS_READING, IRS_WRITING)) ?
782  &irfop->irf_tioreq->ti_ivec :
783  &irfop->irf_tioreq->ti_dgvec->dr_ivec;
785  SEG_NR(ivec));
786  *delta += io_desc_size(dom);
787 
788  if (m0_io_fop_size_get(&irfop->irf_iofop.if_fop) + *delta < maxsize) {
790  0, dom, NULL, rbuf);
791  if (rc != 0) {
792  *delta -= io_desc_size(dom);
793  return M0_ERR(rc);
794  }
795  } else {
796  rc = -ENOSPC;
797  *delta -= io_desc_size(dom);
798  }
799 
800  M0_POST(ergo(rc == 0, *rbuf != NULL));
801  return M0_RC(rc);
802 }
803 
810 static void irfop_fini(struct ioreq_fop *irfop)
811 {
812  M0_ENTRY("ioreq_fop %p", irfop);
813 
814  M0_PRE(irfop != NULL);
815 
817  ioreq_fop_fini(irfop);
818  m0_free(irfop);
819 
820  M0_LEAVE();
821 }
822 
827 static void *buf_aux_chk_get(struct m0_bufvec *aux, enum page_attr p_attr,
828  uint32_t seg_idx, bool rd_in_wr)
829 {
830  return (p_attr == PA_DATA && rd_in_wr && aux != NULL &&
831  aux->ov_buf[seg_idx] != NULL) ? aux->ov_buf[seg_idx] : NULL;
832 }
833 
844  enum page_attr filter)
845 {
846  int rc = 0;
847  uint32_t seg = 0;
848  /* Number of segments in one m0_rpc_bulk_buf structure. */
849  uint32_t bbsegs;
850  uint32_t maxsize;
851  uint32_t delta;
852  uint32_t fop_cksm_nob;
853  uint32_t dispatched_cksm_nob = 0;
854  enum page_attr rw;
855  enum page_attr *pattr;
856  struct m0_bufvec *bvec;
857  struct m0_bufvec *auxbvec;
858  struct m0_op_io *ioo;
859  struct m0_obj_attr *io_attr;
860  struct m0_indexvec *ivec;
861  struct ioreq_fop *irfop;
862  struct m0_net_domain *ndom;
863  struct m0_rpc_bulk_buf *rbuf;
864  struct m0_io_fop *iofop;
865  struct m0_fop_cob_rw *rw_fop;
866  struct nw_xfer_request *xfer;
867  /* Is it in the READ phase of WRITE request. */
868  bool read_in_write = false;
869  void *buf;
870  void *bufnext;
871  m0_bcount_t max_seg_size;
872  m0_bcount_t xfer_len;
874  uint32_t segnext;
875  uint32_t ndom_max_segs;
876  struct m0_client *instance;
877 
878  M0_ENTRY("prepare io fops for target ioreq %p filter 0x%x, tfid "FID_F,
879  ti, filter, FID_P(&ti->ti_fid));
880 
882  M0_PRE(M0_IN(filter, (PA_DATA, PA_PARITY)));
883 
885  if (rc != 0 && (!M0_IN(rc, (-ECANCELED, -EINVAL))))
886  return M0_ERR(rc);
887 
888  xfer = ti->ti_nwxfer;
889  ioo = bob_of(xfer, struct m0_op_io, ioo_nwxfer, &ioo_bobtype);
890  M0_ASSERT(M0_IN(ioreq_sm_state(ioo),
893 
894  if (M0_IN(ioo->ioo_oo.oo_oc.oc_op.op_code, (M0_OC_WRITE,
895  M0_OC_FREE)) &&
897  read_in_write = true;
898 
899  if (M0_IN(ioreq_sm_state(ioo), (IRS_READING, IRS_WRITING))) {
900  ivec = &ti->ti_ivec;
901  bvec = &ti->ti_bufvec;
902  auxbvec = &ti->ti_auxbufvec;
903  pattr = ti->ti_pageattrs;
904  } else {
905  if (ti->ti_dgvec == NULL) {
906  return M0_RC(0);
907  }
908  ivec = &ti->ti_dgvec->dr_ivec;
909  bvec = &ti->ti_dgvec->dr_bufvec;
910  auxbvec = &ti->ti_dgvec->dr_auxbufvec;
911  pattr = ti->ti_dgvec->dr_pageattrs;
912  }
913 
918  PA_READ;
920 
921  max_seg_size = m0_net_domain_get_max_buffer_segment_size(ndom);
922 
923  ndom_max_segs = m0_net_domain_get_max_buffer_segments(ndom);
924 
925  while (seg < SEG_NR(ivec)) {
926  delta = 0;
927  bbsegs = 0;
928 
929  M0_LOG(M0_DEBUG, "pageattr = %u, filter = %u, rw = %u",
930  pattr[seg], filter, rw);
931 
932  if (!(pattr[seg] & filter) || !(pattr[seg] & rw) ||
933  (pattr[seg] & PA_TRUNC)) {
934  ++seg;
935  continue;
936  }
937 
938  M0_ALLOC_PTR(irfop);
939  if (irfop == NULL) {
940  rc = M0_ERR(-ENOMEM);
941  goto err;
942  }
943  rc = ioreq_fop_init(irfop, ti, filter);
944  if (rc != 0) {
945  m0_free(irfop);
946  goto err;
947  }
948  fop_cksm_nob = 0;
949 
950  iofop = &irfop->irf_iofop;
951  rw_fop = io_rw_get(&iofop->if_fop);
952 
953  rc = bulk_buffer_add(irfop, ndom, &rbuf, &delta, maxsize);
954  if (rc != 0) {
955  ioreq_fop_fini(irfop);
956  m0_free(irfop);
957  goto err;
958  }
959  delta += io_seg_size();
960 
961 
962  /*
963  * Adds io segments and io descriptor only if it fits within
964  * permitted size.
965  */
966  /* TODO: can this loop become a function call?
967  * -- too many levels of indentation */
968  while (seg < SEG_NR(ivec) &&
969  m0_io_fop_size_get(&iofop->if_fop) + delta < maxsize &&
970  bbsegs < ndom_max_segs) {
971 
972  /*
973  * Adds a page to rpc bulk buffer only if it passes
974  * through the filter.
975  */
976  if (pattr[seg] & rw && pattr[seg] & filter &&
977  !(pattr[seg] & PA_TRUNC)) {
978  delta += io_seg_size() + io_di_size(ioo);
979 
980  buf = buf_aux_chk_get(auxbvec, filter, seg,
981  read_in_write);
982 
983  if (buf == NULL) {
984  buf = bvec->ov_buf[seg];
985  /* Add the size for checksum generated for every segment, skip parity */
986  if ((filter == PA_DATA) && m0__obj_is_di_enabled(ioo) &&
987  (ioo->ioo_oo.oo_oc.oc_op.op_code == M0_OC_WRITE)) {
988  delta += ti->ti_cksum_seg_b_nob[seg];
989  fop_cksm_nob += ti->ti_cksum_seg_b_nob[seg];
990  }
991  }
992 
993  xfer_len = COUNT(ivec, seg);
994  offset = INDEX(ivec, seg);
995 
996  /*
997  * Accommodate multiple pages in a single
998  * net buffer segment, if they are consecutive
999  * pages.
1000  */
1001  segnext = seg + 1;
1002  while (segnext < SEG_NR(ivec) &&
1003  xfer_len < max_seg_size) {
1004  bufnext = buf_aux_chk_get(auxbvec,
1005  filter,
1006  segnext,
1007  read_in_write);
1008  if (bufnext == NULL)
1009  bufnext = bvec->ov_buf[segnext];
1010 
1011  if (buf + xfer_len == bufnext) {
1012  xfer_len += COUNT(ivec, ++seg);
1013  segnext = seg + 1;
1014  } else
1015  break;
1016  }
1017 
1019  xfer_len,
1020  offset, ndom);
1021 
1022  if (rc == -EMSGSIZE) {
1023  /*
1024  * Fix the number of segments in
1025  * current m0_rpc_bulk_buf structure.
1026  */
1027  rbuf->bb_nbuf->nb_buffer.ov_vec.v_nr =
1028  bbsegs;
1029  rbuf->bb_zerovec.z_bvec.ov_vec.v_nr =
1030  bbsegs;
1031  bbsegs = 0;
1032 
1033  delta -= io_seg_size() - io_di_size(ioo);
1034 
1035  if ((filter == PA_DATA) && m0__obj_is_di_enabled(ioo) &&
1036  (ioo->ioo_oo.oo_oc.oc_op.op_code == M0_OC_WRITE)) {
1037  delta -= ti->ti_cksum_seg_b_nob[seg];
1038  fop_cksm_nob -= ti->ti_cksum_seg_b_nob[seg];
1039  }
1040 
1041  /*
1042  * Buffer must be 4k aligned to be
1043  * used by network hw
1044  */
1046  rc = bulk_buffer_add(irfop, ndom,
1047  &rbuf, &delta, maxsize);
1048  if (rc == -ENOSPC)
1049  break;
1050  else if (rc != 0)
1051  goto fini_fop;
1052 
1053  /*
1054  * Since current bulk buffer is full,
1055  * new bulk buffer is added and
1056  * existing segment is attempted to
1057  * be added to new bulk buffer.
1058  */
1059  continue;
1060  } else if (rc == 0)
1061  ++bbsegs;
1062  }
1063 
1064  ++seg;
1065  }
1066 
1067  if (m0_io_fop_byte_count(iofop) == 0) {
1068  irfop_fini(irfop);
1069  continue;
1070  }
1071 
1072  rbuf->bb_nbuf->nb_buffer.ov_vec.v_nr = bbsegs;
1073  rbuf->bb_zerovec.z_bvec.ov_vec.v_nr = bbsegs;
1074 
1075  rw_fop->crw_fid = ti->ti_fid;
1076  rw_fop->crw_pver = ioo->ioo_pver;
1077  rw_fop->crw_index = ti->ti_obj;
1078 
1079  /*
1080  * Use NOHOLE by default (i.e. return error for missing
1081  * units instead of zeros), unless we are in read-verify
1082  * mode or in the degraded-read mode. Otherwise, in case of
1083  * partially spanned parity groups (last groups, usually),
1084  * we will get a lot of bogus errors when all data units
1085  * of the group are read.
1086  *
1087  * Note: parity units are always present in the groups, even
1088  * in the partially spanned ones. So we always use NOHOLE
1089  * for them. Otherwise, the user may get corrupted data.
1090  */
1092  if (ioreq_sm_state(ioo) == IRS_READING && !read_in_write &&
1093  (filter == PA_PARITY ||
1094  (!instance->m0c_config->mc_is_read_verify &&
1095  !(ioo->ioo_flags & M0_OOF_HOLE))))
1096  rw_fop->crw_flags |= M0_IO_FLAG_NOHOLE;
1097 
1098  if (ioreq_sm_state(ioo) == IRS_DEGRADED_READING &&
1099  !read_in_write && filter == PA_PARITY)
1100  rw_fop->crw_flags |= M0_IO_FLAG_NOHOLE;
1101 
1102  /* Assign the checksum buffer for traget */
1103  if (filter == PA_DATA && m0__obj_is_di_enabled(ioo)) {
1104  if (m0_is_write_fop(&iofop->if_fop)) {
1105  M0_ASSERT(fop_cksm_nob != 0);
1106  /* RPC layer to free crw_di_data_cksum */
1107  if ( m0_buf_alloc(&rw_fop->crw_di_data_cksum, fop_cksm_nob) != 0 )
1108  goto fini_fop;
1109 
1110  memcpy( rw_fop->crw_di_data_cksum.b_addr,
1111  ti->ti_attrbuf.b_addr + dispatched_cksm_nob,
1112  fop_cksm_nob );
1113  dispatched_cksm_nob += fop_cksm_nob;
1114  M0_ASSERT(dispatched_cksm_nob <= ti->ti_cksum_copied);
1115  }
1116  else {
1117  rw_fop->crw_di_data_cksum.b_addr = NULL;
1118  rw_fop->crw_di_data_cksum.b_nob = 0;
1119  }
1120 
1121  rw_fop->crw_cksum_size = (read_in_write ||
1122  !m0__obj_is_di_enabled(ioo)) ?
1123  0 : ioo->ioo_attr.ov_vec.v_count[0];
1124  }
1125  else {
1126  rw_fop->crw_di_data_cksum.b_addr = NULL;
1127  rw_fop->crw_di_data_cksum.b_nob = 0;
1128  rw_fop->crw_cksum_size = 0;
1129  }
1130 
1131 
1132  if (ioo->ioo_flags & M0_OOF_SYNC)
1133  rw_fop->crw_flags |= M0_IO_FLAG_SYNC;
1134  io_attr = m0_io_attr(ioo);
1135  rw_fop->crw_lid = io_attr->oa_layout_id;
1136 
1137  /*
1138  * XXX(Sining): This is a bit tricky: m0_io_fop_prepare in
1139  * ioservice/io_fops.c calls io_fop_di_prepare which has only
1140  * file system in mind and uses super block and file related
1141  * information to do something (it returns 0 directly for user
1142  * space). This is not the case for Client kernel mode!!
1143  *
1144  * Simply return 0 just like it does for user space at this
1145  * moment.
1146  */
1147  rc = m0_io_fop_prepare(&iofop->if_fop);
1148  if (rc != 0)
1149  goto fini_fop;
1150 
1151  if (m0_is_read_fop(&iofop->if_fop))
1154  &iofop->if_rbulk));
1155 
1157  iofops_tlist_add(&ti->ti_iofops, irfop);
1158 
1159  M0_LOG(M0_DEBUG,
1160  "fop=%p bulk=%p (%s) @"FID_F" io fops = %"PRIu64
1161  " read bulks = %" PRIu64 ", list_len=%d",
1162  &iofop->if_fop, &iofop->if_rbulk,
1163  m0_is_read_fop(&iofop->if_fop) ? "r" : "w",
1164  FID_P(&ti->ti_fid),
1165  m0_atomic64_get(&xfer->nxr_iofop_nr),
1167  (int)iofops_tlist_length(&ti->ti_iofops));
1168  }
1169 
1170  return M0_RC(0);
1171 
1172 fini_fop:
1173  irfop_fini(irfop);
1174 err:
1175  m0_tl_teardown(iofops, &ti->ti_iofops, irfop) {
1176  irfop_fini(irfop);
1177  }
1178 
1179  return M0_ERR(rc);
1180 }
1181 static int target_cob_fop_prepare(struct target_ioreq *ti);
1182 static const struct target_ioreq_ops tioreq_ops = {
1184  .tio_iofops_prepare = target_ioreq_iofops_prepare,
1185  .tio_cc_fops_prepare = target_cob_fop_prepare,
1186 };
1187 
1188 static int target_cob_fop_prepare(struct target_ioreq *ti)
1189 {
1190  int rc;
1191  M0_ENTRY("ti = %p type = %d", ti, ti->ti_req_type);
1193 
1194  rc = ioreq_cc_fop_init(ti);
1195  return M0_RC(rc);
1196 }
1197 
1210 static int target_ioreq_init(struct target_ioreq *ti,
1211  struct nw_xfer_request *xfer,
1212  const struct m0_fid *cobfid,
1213  uint64_t ta_obj,
1214  struct m0_rpc_session *session,
1215  uint64_t size)
1216 {
1217  int rc;
1218  struct m0_op_io *ioo;
1219  struct m0_op *op;
1220  struct m0_client *instance;
1221  uint32_t nr;
1222 
1223  M0_PRE(cobfid != NULL);
1224  M0_ENTRY("target_ioreq %p, nw_xfer_request %p, "FID_F,
1225  ti, xfer, FID_P(cobfid));
1226 
1227  M0_PRE(ti != NULL);
1228  M0_PRE(xfer != NULL);
1229  M0_PRE(session != NULL);
1230  M0_PRE(size > 0);
1231 
1232  ti->ti_rc = 0;
1233  ti->ti_ops = &tioreq_ops;
1234  ti->ti_fid = *cobfid;
1235  ti->ti_nwxfer = xfer;
1236  ti->ti_dgvec = NULL;
1237  ti->ti_req_type = TI_NONE;
1238  M0_SET0(&ti->ti_cc_fop);
1239  ti->ti_cc_fop_inited = false;
1240 
1241  /*
1242  * Target object is usually in ONLINE state unless explicitly
1243  * told otherwise.
1244  */
1245  ti->ti_state = M0_PNDS_ONLINE;
1246  ti->ti_session = session;
1247  ti->ti_parbytes = 0;
1248  ti->ti_databytes = 0;
1249 
1250  ioo = bob_of(xfer, struct m0_op_io, ioo_nwxfer,
1251  &ioo_bobtype);
1252  op = &ioo->ioo_oo.oo_oc.oc_op;
1254  M0_PRE(instance != NULL);
1255 
1256  ti->ti_obj = ta_obj;
1257 
1258  iofops_tlist_init(&ti->ti_iofops);
1259  tioreqht_tlink_init(ti);
1260  target_ioreq_bob_init(ti);
1261 
1262  nr = page_nr(size, ioo->ioo_obj);
1263  rc = m0_indexvec_alloc(&ti->ti_ivec, nr);
1264  if (rc != 0)
1265  goto out;
1266 
1267  if (op->op_code == M0_OC_READ) {
1269  ti->ti_goff_ivec.iv_vec.v_nr = 0;
1270  if (rc != 0)
1271  goto fail;
1272  }
1273 
1274  if (op->op_code == M0_OC_FREE) {
1276  if (rc != 0)
1277  goto fail;
1278  }
1279 
1280  ti->ti_bufvec.ov_vec.v_nr = nr;
1281  M0_ALLOC_ARR(ti->ti_bufvec.ov_vec.v_count, nr);
1282  if (ti->ti_bufvec.ov_vec.v_count == NULL)
1283  goto fail;
1284 
1285  M0_ALLOC_ARR(ti->ti_bufvec.ov_buf, nr);
1286  if (ti->ti_bufvec.ov_buf == NULL)
1287  goto fail;
1288 
1289  /* Memory allocation for checksum computation */
1290  if (op->op_code == M0_OC_WRITE && m0__obj_is_di_enabled(ioo)) {
1291  uint32_t b_nob;
1292 
1293  ti->ti_attrbuf.b_addr = NULL;
1294  b_nob = (size * ioo->ioo_attr.ov_vec.v_count[0]) /
1296  rc = m0_buf_alloc(&ti->ti_attrbuf, b_nob);
1297  if (rc != 0)
1298  goto fail;
1299  ti->ti_cksum_copied = 0;
1301  }
1302  else {
1303  ti->ti_attrbuf.b_addr = NULL;
1304  ti->ti_attrbuf.b_nob = 0;
1305  ti->ti_cksum_copied = 0;
1306  ti->ti_cksum_seg_b_nob = NULL;
1307  }
1308  /*
1309  * For READOLD method, an extra bufvec is needed to remember
1310  * the addresses of auxillary buffers so those auxillary
1311  * buffers can be used in rpc bulk transfer to avoid polluting
1312  * real data buffers which are the application's memory for IO
1313  * in case zero copy method is in use.
1314  */
1315  ti->ti_auxbufvec.ov_vec.v_nr = nr;
1317  if (ti->ti_auxbufvec.ov_vec.v_count == NULL)
1318  goto fail;
1319 
1321  if (ti->ti_auxbufvec.ov_buf == NULL)
1322  goto fail;
1323 
1324  if (M0_FI_ENABLED("no-mem-err"))
1325  goto fail;
1327  if (ti->ti_pageattrs == NULL)
1328  goto fail;
1329 
1330  /*
1331  * This value is incremented when new segments are added to the
1332  * index vector in target_ioreq_seg_add().
1333  */
1334  ti->ti_ivec.iv_vec.v_nr = 0;
1335  ti->ti_trunc_ivec.iv_vec.v_nr = 0;
1336 
1338  return M0_RC(0);
1339 fail:
1340  m0_indexvec_free(&ti->ti_ivec);
1341  if (op->op_code == M0_OC_READ)
1343  if (op->op_code == M0_OC_FREE)
1345  m0_free(ti->ti_bufvec.ov_vec.v_count);
1346  m0_free(ti->ti_bufvec.ov_buf);
1349 
1350 out:
1351  return M0_ERR(-ENOMEM);
1352 }
1353 
1366 static int nw_xfer_tioreq_get(struct nw_xfer_request *xfer,
1367  struct m0_fid *fid,
1368  uint64_t ta_obj,
1369  struct m0_rpc_session *session,
1370  uint64_t size,
1371  struct target_ioreq **out)
1372 {
1373  int rc = 0;
1374  struct target_ioreq *ti;
1375  struct m0_op_io *ioo;
1376  struct m0_op *op;
1377  struct m0_client *instance;
1378 
1379  M0_PRE(fid != NULL);
1380  M0_ENTRY("nw_xfer_request %p, "FID_F, xfer, FID_P(fid));
1381 
1382  M0_PRE(session != NULL);
1383  M0_PRE(out != NULL);
1385 
1386  ioo = bob_of(xfer, struct m0_op_io, ioo_nwxfer, &ioo_bobtype);
1387  op = &ioo->ioo_oo.oo_oc.oc_op;
1389  M0_PRE(instance != NULL);
1390 
1391  ti = target_ioreq_locate(xfer, fid);
1392  if (ti == NULL) {
1393  M0_ALLOC_PTR(ti);
1394  if (ti == NULL)
1395  return M0_ERR(-ENOMEM);
1396 
1397  rc = target_ioreq_init(ti, xfer, fid, ta_obj, session, size);
1398  if (rc == 0) {
1399  tioreqht_htable_add(&xfer->nxr_tioreqs_hash, ti);
1400  M0_LOG(M0_INFO, "New target_ioreq %p added for "FID_F,
1401  ti, FID_P(fid));
1402  } else {
1403  m0_free(ti);
1404  return M0_ERR_INFO(rc, "target_ioreq_init() failed");
1405  }
1406  }
1407 
1408  if (ti->ti_dgvec == NULL && M0_IN(ioreq_sm_state(ioo),
1411 
1412  *out = ti;
1413 
1414  return M0_RC(rc);
1415 }
1416 
1421 static void databufs_set_dgw_mode(struct pargrp_iomap *iomap,
1422  struct m0_pdclust_layout *play,
1423  struct m0_ext *ext)
1424 {
1425  uint32_t row_start;
1426  uint32_t row_end;
1427  uint32_t row;
1428  uint32_t col;
1429  m0_bcount_t grp_off;
1430  struct data_buf *dbuf;
1431 
1432  grp_off = data_size(play) * iomap->pi_grpid;
1433  page_pos_get(iomap, ext->e_start, grp_off, &row_start, &col);
1434  page_pos_get(iomap, ext->e_end - 1, grp_off, &row_end, &col);
1435 
1436  for (row = row_start; row <= row_end; ++row) {
1437  dbuf = iomap->pi_databufs[row][col];
1438  if (dbuf->db_flags & PA_WRITE)
1439  dbuf->db_flags |= PA_DGMODE_WRITE;
1440  }
1441 }
1442 
1446 static void paritybufs_set_dgw_mode(struct pargrp_iomap *iomap,
1447  struct m0_op_io *ioo,
1448  uint64_t unit)
1449 {
1450  uint32_t row;
1451  uint32_t col;
1452  struct data_buf *dbuf;
1453  struct m0_pdclust_layout *play = pdlayout_get(ioo);
1454  uint64_t unit_size = layout_unit_size(play);
1455 
1456  parity_page_pos_get(iomap, unit * unit_size, &row, &col);
1457  for (; row < rows_nr(play, ioo->ioo_obj); ++row) {
1458  dbuf = iomap->pi_paritybufs[row][col];
1459  if (m0_pdclust_is_replicated(play) &&
1460  iomap->pi_databufs[row][0] == NULL)
1461  continue;
1462  if (dbuf->db_flags & PA_WRITE)
1463  dbuf->db_flags |= PA_DGMODE_WRITE;
1464  }
1465 }
1466 
1475 static int nw_xfer_io_distribute(struct nw_xfer_request *xfer)
1476 {
1477  bool do_cobs = true;
1478  int rc = 0;
1479  unsigned int op_code;
1480  uint64_t i;
1481  uint64_t unit;
1482  uint64_t unit_size;
1483  uint64_t count;
1484  uint64_t pgstart;
1485  struct m0_op *op;
1486  /* Extent representing a data unit. */
1487  struct m0_ext u_ext;
1488  /* Extent representing resultant extent. */
1489  struct m0_ext r_ext;
1490  /* Extent representing a segment from index vector. */
1491  struct m0_ext v_ext;
1492  struct m0_op_io *ioo;
1493  struct target_ioreq *ti;
1494  struct m0_ivec_cursor cursor;
1495  struct m0_pdclust_layout *play;
1496  enum m0_pdclust_unit_type unit_type;
1497  struct m0_pdclust_src_addr src;
1498  struct m0_pdclust_tgt_addr tgt;
1499  struct m0_bitmap units_spanned;
1500  struct pargrp_iomap *iomap;
1501  struct m0_client *instance;
1502 
1503  M0_ENTRY("nw_xfer_request %p", xfer);
1504 
1506 
1507  ioo = bob_of(xfer, struct m0_op_io, ioo_nwxfer, &ioo_bobtype);
1508  op = &ioo->ioo_oo.oo_oc.oc_op;
1509  op_code = op->op_code,
1510  play = pdlayout_get(ioo);
1511  unit_size = layout_unit_size(play);
1513 
1514  /*
1515  * In non-oostore mode, all cobs are created on object creation.
1516  * In oostore mode, CROW is enabled and cobs are created automatically
1517  * at the server side on the 1st write request. But, because of SNS,
1518  * we need to create cobs for the spare units, and to make sure all cobs
1519  * are created for all units in the parity group touched by the update
1520  * request. See more below.
1521  */
1522  if (!m0__is_oostore(instance) || op_code == M0_OC_READ)
1523  do_cobs = false;
1524  /*
1525  * In replicated layout (N == 1), all units in the parity group are
1526  * always spanned. And there are no spare units, so...
1527  */
1528  if (ioo->ioo_pbuf_type == M0_PBUF_IND)
1529  do_cobs = false;
1530 
1531  if (do_cobs) {
1532  rc = m0_bitmap_init(&units_spanned, m0_pdclust_size(play));
1533  if (rc != 0)
1534  return M0_ERR(rc);
1535  }
1536 
1537  for (i = 0; i < ioo->ioo_iomap_nr; ++i) {
1538  count = 0;
1539  iomap = ioo->ioo_iomaps[i];
1540  pgstart = data_size(play) * iomap->pi_grpid;
1541  src.sa_group = iomap->pi_grpid;
1542 
1543  M0_LOG(M0_DEBUG, "xfer=%p map=%p [grpid=%" PRIu64 " state=%u]",
1544  xfer, iomap, iomap->pi_grpid, iomap->pi_state);
1545 
1546  if (do_cobs)
1547  m0_bitmap_reset(&units_spanned);
1548 
1549  /* traverse parity group ivec by units */
1550  m0_ivec_cursor_init(&cursor, &iomap->pi_ivec);
1551  while (!m0_ivec_cursor_move(&cursor, count)) {
1552  unit = (m0_ivec_cursor_index(&cursor) - pgstart) /
1553  unit_size;
1554 
1555  u_ext.e_start = pgstart + unit * unit_size;
1556  u_ext.e_end = u_ext.e_start + unit_size;
1557 
1558  v_ext.e_start = m0_ivec_cursor_index(&cursor);
1559  v_ext.e_end = v_ext.e_start +
1560  m0_ivec_cursor_step(&cursor);
1561 
1562  m0_ext_intersection(&u_ext, &v_ext, &r_ext);
1563  M0_ASSERT(m0_ext_is_valid(&r_ext));
1564  count = m0_ext_length(&r_ext);
1565 
1566  unit_type = m0_pdclust_unit_classify(play, unit);
1567  M0_ASSERT(unit_type == M0_PUT_DATA);
1568 
1570  databufs_set_dgw_mode(iomap, play, &r_ext);
1571 
1572  src.sa_unit = unit;
1573  rc = xfer->nxr_ops->nxo_tioreq_map(xfer, &src, &tgt,
1574  &ti);
1575  if (rc != 0)
1576  goto err;
1577 
1578  ti->ti_ops->tio_seg_add(ti, &src, &tgt, r_ext.e_start,
1579  m0_ext_length(&r_ext), iomap);
1580  if (op_code == M0_OC_WRITE && do_cobs &&
1581  ti->ti_req_type == TI_READ_WRITE)
1582  m0_bitmap_set(&units_spanned, unit, true);
1583 
1584  }
1585 
1586  M0_ASSERT(ergo(M0_IN(op_code, (M0_OC_READ, M0_OC_WRITE)),
1587  m0_vec_count(&ioo->ioo_ext.iv_vec) ==
1588  m0_vec_count(&ioo->ioo_data.ov_vec)));
1589 
1590  /* process parity units */
1591  if (M0_IN(ioo->ioo_pbuf_type, (M0_PBUF_DIR,
1592  M0_PBUF_IND)) ||
1594  iomap->pi_state == PI_DEGRADED)) {
1595 
1596  for (unit = 0; unit < layout_k(play); ++unit) {
1597  src.sa_unit = layout_n(play) + unit;
1599  src.sa_unit) == M0_PUT_PARITY);
1600 
1601  rc = xfer->nxr_ops->nxo_tioreq_map(xfer, &src,
1602  &tgt, &ti);
1603  if (rc != 0)
1604  goto err;
1605 
1607  paritybufs_set_dgw_mode(iomap, ioo,
1608  unit);
1609 
1610  if (op_code == M0_OC_WRITE && do_cobs)
1611  m0_bitmap_set(&units_spanned,
1612  src.sa_unit, true);
1613 
1614  ti->ti_ops->tio_seg_add(ti, &src, &tgt, pgstart,
1615  layout_unit_size(play),
1616  iomap);
1617  }
1618 
1619  if (!do_cobs)
1620  continue; /* to next iomap */
1621 
1622  /*
1623  * Create cobs for all units not spanned by the
1624  * IO request (data or spare units).
1625  *
1626  * If some data unit is not present in the group (hole
1627  * or not complete last group), we still need to create
1628  * cob for it. Otherwise, during SNS-repair the receiver
1629  * will wait forever for this unit without knowing that
1630  * its size is actually zero.
1631  */
1632  for (unit = 0; unit < m0_pdclust_size(play); ++unit) {
1633  if (m0_bitmap_get(&units_spanned, unit))
1634  continue;
1635 
1636  src.sa_unit = unit;
1637  rc = xfer->nxr_ops->nxo_tioreq_map(xfer, &src,
1638  &tgt, &ti);
1639  if (rc != 0)
1640  M0_LOG(M0_ERROR, "[%p] map=%p "
1641  "nxo_tioreq_map() failed: rc=%d",
1642  ioo, iomap, rc);
1643  /*
1644  * Skip the case when some other parity group
1645  * has spanned the particular target already.
1646  */
1647  if (ti->ti_req_type != TI_NONE)
1648  continue;
1649 
1650  ti->ti_req_type = TI_COB_CREATE;
1651  }
1652  }
1653  }
1654 
1655  if (do_cobs)
1656  m0_bitmap_fini(&units_spanned);
1657 
1658  M0_ASSERT(ergo(M0_IN(op_code, (M0_OC_READ, M0_OC_WRITE)),
1659  m0_vec_count(&ioo->ioo_ext.iv_vec) ==
1660  m0_vec_count(&ioo->ioo_data.ov_vec)));
1661 
1662  return M0_RC(0);
1663 err:
1664  m0_htable_for(tioreqht, ti, &xfer->nxr_tioreqs_hash) {
1665  tioreqht_htable_del(&xfer->nxr_tioreqs_hash, ti);
1666  target_ioreq_fini(ti);
1667  m0_free0(&ti);
1668  } m0_htable_endfor;
1669 
1670  return M0_ERR(rc);
1671 }
1672 
1682 static void nw_xfer_req_complete(struct nw_xfer_request *xfer, bool rmw)
1683 {
1684  struct m0_client *instance;
1685  struct m0_op_io *ioo;
1686  struct target_ioreq *ti;
1687  struct ioreq_fop *irfop;
1688  struct m0_fop *fop;
1689  struct m0_rpc_item *item;
1690 
1691  M0_ENTRY("nw_xfer_request %p, rmw %s", xfer,
1692  rmw ? (char *)"true" : (char *)"false");
1693 
1694  M0_PRE(xfer != NULL);
1695  xfer->nxr_state = NXS_COMPLETE;
1696  ioo = bob_of(xfer, struct m0_op_io, ioo_nwxfer, &ioo_bobtype);
1698 
1700  /*
1701  * Ignore the following invariant check as there exists cases in which
1702  * io fops are created sucessfully for some target services but fail
1703  * for some services in nxo_dispatch (for example, session/connection
1704  * to a service is invalid, resulting a 'dirty' op in which
1705  * nr_iofops != 0 and nxr_state == NXS_COMPLETE.
1706  *
1707  * M0_PRE_EX(m0_op_io_invariant(ioo));
1708  */
1709 
1710  m0_htable_for(tioreqht, ti, &xfer->nxr_tioreqs_hash) {
1711 
1712  /* Maintains only the first error encountered. */
1713  if (xfer->nxr_rc == 0)
1714  xfer->nxr_rc = ti->ti_rc;
1715 
1716  xfer->nxr_bytes += ti->ti_databytes;
1717  ti->ti_databytes = 0;
1718 
1719  if (m0__is_oostore(instance) &&
1720  ti->ti_req_type == TI_COB_CREATE &&
1722  ti->ti_req_type = TI_NONE;
1723  continue;
1724  }
1725 
1726  if (m0__is_oostore(instance) &&
1727  ti->ti_req_type == TI_COB_TRUNCATE &&
1729  ti->ti_req_type = TI_NONE;
1730  }
1731 
1732  m0_tl_teardown(iofops, &ti->ti_iofops, irfop) {
1733  fop = &irfop->irf_iofop.if_fop;
1735  M0_LOG(M0_DEBUG, "[%p] fop %p, ref %llu, "
1736  "item %p[%u], ri_error %d, ri_state %d",
1737  ioo, fop,
1738  (unsigned long long)m0_ref_read(&fop->f_ref),
1740  item->ri_sm.sm_state);
1741 
1744  item->ri_rmachine != NULL));
1745  if (item->ri_rmachine == NULL) {
1746  M0_ASSERT(ti->ti_session != NULL);
1749  }
1750 
1751  M0_LOG(M0_DEBUG,
1752  "[%p] item %p, target fid "FID_F"fop %p, "
1753  "ref %llu", ioo, item, FID_P(&ti->ti_fid), fop,
1754  (unsigned long long)m0_ref_read(&fop->f_ref));
1756  }
1757 
1758  } m0_htable_endfor;
1759 
1762  M0_LOG(M0_INFO, "Number of bytes %s = %"PRIu64,
1763  ioreq_sm_state(ioo) == IRS_READ_COMPLETE ? "read" : "written",
1764  xfer->nxr_bytes);
1765 
1766  /*
1767  * This function is invoked from 4 states - IRS_READ_COMPLETE,
1768  * IRS_WRITE_COMPLETE, IRS_DEGRADED_READING, IRS_DEGRADED_WRITING.
1769  * And the state change is applicable only for healthy state IO,
1770  * meaning for states IRS_READ_COMPLETE and IRS_WRITE_COMPLETE.
1771  */
1772  if (M0_IN(ioreq_sm_state(ioo),
1775  if (!rmw)
1777  else if (ioreq_sm_state(ioo) == IRS_READ_COMPLETE)
1778  xfer->nxr_bytes = 0;
1779  }
1780 
1781  /*
1782  * nxo_dispatch may fail if connections to services have not been
1783  * established yet. In this case, ioo_rc contains error code and
1784  * xfer->nxr_rc == 0, don't overwrite ioo_rc.
1785  *
1786  * TODO: merge this with op->op_sm.sm_rc ?
1787  */
1788  if (xfer->nxr_rc != 0)
1789  ioo->ioo_rc = xfer->nxr_rc;
1790 
1791  M0_LEAVE();
1792 }
1793 
1802 static int nw_xfer_req_dispatch(struct nw_xfer_request *xfer)
1803 {
1804  int rc = 0;
1805  int post_error = 0;
1806  int ri_error;
1807  uint64_t nr_dispatched = 0;
1808  struct ioreq_fop *irfop;
1809  struct m0_op_io *ioo;
1810  struct m0_op *op;
1811  struct target_ioreq *ti;
1812  struct m0_client *instance;
1813 
1814  M0_ENTRY();
1815 
1816  M0_PRE(xfer != NULL);
1817  ioo = bob_of(xfer, struct m0_op_io, ioo_nwxfer, &ioo_bobtype);
1818  op = &ioo->ioo_oo.oo_oc.oc_op;
1820  M0_PRE(instance != NULL);
1821 
1822  to_op_io_map(op, ioo);
1823 
1824  /* FOPs' preparation */
1825  m0_htable_for(tioreqht, ti, &xfer->nxr_tioreqs_hash) {
1826  if (ti->ti_state != M0_PNDS_ONLINE) {
1827  M0_LOG(M0_INFO, "Skipped iofops prepare for "FID_F,
1828  FID_P(&ti->ti_fid));
1829  continue;
1830  }
1831  ti->ti_start_time = m0_time_now();
1832  if (ti->ti_req_type == TI_COB_CREATE &&
1833  ioreq_sm_state(ioo) == IRS_WRITING) {
1834  rc = ti->ti_ops->tio_cc_fops_prepare(ti);
1835  if (rc != 0)
1836  return M0_ERR_INFO(rc, "[%p] cob create fop"
1837  "failed", ioo);
1838  continue;
1839  }
1840 
1841  if (ioreq_sm_state(ioo) == IRS_TRUNCATE) {
1842  if (ti->ti_req_type == TI_READ_WRITE) {
1844  rc = ti->ti_ops->tio_cc_fops_prepare(ti);
1845  if (rc != 0)
1846  return M0_ERR(rc);
1847  }
1848  continue;
1849  }
1850  rc = ti->ti_ops->tio_iofops_prepare(ti, PA_DATA) ?:
1852  if (rc != 0)
1853  return M0_ERR(rc);
1854  } m0_htable_endfor;
1855 
1856  /* Submit io FOPs */
1857  m0_htable_for(tioreqht, ti, &xfer->nxr_tioreqs_hash) {
1858  struct m0_rpc_item *item = &ti->ti_cc_fop.crf_fop.f_item;
1859 
1860  /* Skips the target device if it is not online. */
1861  if (ti->ti_state != M0_PNDS_ONLINE) {
1862  M0_LOG(M0_INFO, "Skipped device "FID_F,
1863  FID_P(&ti->ti_fid));
1864  continue;
1865  }
1866  if (ti->ti_req_type == TI_COB_CREATE &&
1867  ioreq_sm_state(ioo) == IRS_WRITING) {
1868  /*
1869  * An error returned by rpc post has been ignored.
1870  * It will be handled in the respective bottom half.
1871  */
1872  M0_LOG(M0_DEBUG, "item="ITEM_FMT" osr_xid=%"PRIu64,
1874  rc = m0_rpc_post(item);
1875  M0_CNT_INC(nr_dispatched);
1876  m0_op_io_to_rpc_map(ioo, item);
1877  continue;
1878  }
1879  if (op->op_code == M0_OC_FREE &&
1880  ioreq_sm_state(ioo) == IRS_TRUNCATE &&
1881  ti->ti_req_type == TI_COB_TRUNCATE) {
1882  if (ti->ti_trunc_ivec.iv_vec.v_nr > 0) {
1883  /*
1884  * An error returned by rpc post has been
1885  * ignored. It will be handled in the
1886  * io_bottom_half().
1887  */
1888  M0_LOG(M0_DEBUG, "item="ITEM_FMT
1889  " osr_xid=%"PRIu64,
1890  ITEM_ARG(item),
1892  rc = m0_rpc_post(item);
1893  M0_CNT_INC(nr_dispatched);
1894  m0_op_io_to_rpc_map(ioo, item);
1895  }
1896  continue;
1897  }
1898  m0_tl_for (iofops, &ti->ti_iofops, irfop) {
1900  ti->ti_session);
1902  M0_LOG(M0_DEBUG, "[%p] Submitted fop for device "
1903  FID_F"@%p, item %p, fop_nr=%llu, rc=%d, "
1904  "ri_error=%d", ioo, FID_P(&ti->ti_fid), irfop,
1905  &irfop->irf_iofop.if_fop.f_item,
1906  (unsigned long long)
1907  m0_atomic64_get(&xfer->nxr_iofop_nr),
1908  rc, ri_error);
1909 
1910  /* XXX: noisy */
1911  m0_op_io_to_rpc_map(ioo,
1912  &irfop->irf_iofop.if_fop.f_item);
1913 
1914  if (rc != 0)
1915  goto out;
1916  m0_atomic64_inc(&instance->m0c_pending_io_nr);
1917  if (ri_error == 0)
1918  M0_CNT_INC(nr_dispatched);
1919  else if (post_error == 0)
1920  post_error = ri_error;
1921  } m0_tl_endfor;
1922  } m0_htable_endfor;
1923 
1924 out:
1925  if (rc == 0 && nr_dispatched == 0 && post_error == 0) {
1926  /* No fop has been dispatched.
1927  *
1928  * This might happen in dgmode reading:
1929  * In 'parity verify' mode, a whole parity group, including
1930  * data and parity units are all read from ioservices.
1931  * If some units failed to read, no need to read extra unit.
1932  * The units needed for recvoery are ready.
1933  */
1935  M0_ASSERT(op->op_code == M0_OC_READ &&
1936  instance->m0c_config->mc_is_read_verify);
1938  } else if (rc == 0)
1939  xfer->nxr_state = NXS_INFLIGHT;
1940 
1941  M0_LOG(M0_DEBUG, "[%p] nxr_iofop_nr %llu, nxr_rdbulk_nr %llu, "
1942  "nr_dispatched %llu", ioo,
1943  (unsigned long long)m0_atomic64_get(&xfer->nxr_iofop_nr),
1944  (unsigned long long)m0_atomic64_get(&xfer->nxr_rdbulk_nr),
1945  (unsigned long long)nr_dispatched);
1946 
1947  return M0_RC(rc);
1948 }
1949 
2009 static bool should_spare_be_mapped(struct m0_op_io *ioo,
2010  enum m0_pool_nd_state dev_state)
2011 {
2012  return (M0_IN(ioreq_sm_state(ioo),
2014  dev_state == M0_PNDS_SNS_REPAIRED)
2015  ||
2017  (dev_state == M0_PNDS_SNS_REPAIRED ||
2018  (dev_state == M0_PNDS_SNS_REPAIRING &&
2019  ioo->ioo_sns_state == SRS_REPAIR_DONE)));
2020 
2021 }
2022 
2034 static int nw_xfer_tioreq_map(struct nw_xfer_request *xfer,
2035  const struct m0_pdclust_src_addr *src,
2036  struct m0_pdclust_tgt_addr *tgt,
2037  struct target_ioreq **tio)
2038 {
2039  int rc;
2040  struct m0_fid tfid;
2041  const struct m0_fid *gfid;
2042  struct m0_op_io *ioo;
2043  struct m0_rpc_session *session;
2044  struct m0_pdclust_layout *play;
2045  struct m0_pdclust_instance *play_instance;
2046  enum m0_pool_nd_state dev_state;
2047  enum m0_pool_nd_state dev_state_prev;
2048  uint32_t spare_slot;
2049  uint32_t spare_slot_prev;
2050  struct m0_pdclust_src_addr spare;
2051  struct m0_poolmach *pm;
2052 
2053  M0_ENTRY("nw_xfer_request=%p", xfer);
2054 
2056  M0_PRE(src != NULL);
2057  M0_PRE(tgt != NULL);
2058  M0_PRE(tio != NULL);
2059 
2060  ioo = bob_of(xfer, struct m0_op_io, ioo_nwxfer, &ioo_bobtype);
2061 
2062  play = pdlayout_get(ioo);
2063  M0_PRE(play != NULL);
2064  play_instance = pdlayout_instance(layout_instance(ioo));
2065  M0_PRE(play_instance != NULL);
2066 
2067  spare = *src;
2068  m0_fd_fwd_map(play_instance, src, tgt);
2069  tfid = target_fid(ioo, tgt);
2070  M0_LOG(M0_DEBUG, "src_id[%" PRIu64 ":%" PRIu64 "] -> "
2071  "dest_id[%" PRIu64 ":%" PRIu64 "] @ tfid="FID_F,
2073  FID_P(&tfid));
2074 
2075  pm = ioo_to_poolmach(ioo);
2076  M0_ASSERT(pm != NULL);
2077  rc = m0_poolmach_device_state(pm, tgt->ta_obj, &dev_state);
2078  if (rc != 0)
2079  return M0_RC(rc);
2080 
2081  if (M0_FI_ENABLED("poolmach_client_repaired_device1")) {
2082  if (tfid.f_container == 1)
2083  dev_state = M0_PNDS_SNS_REPAIRED;
2084  }
2085 
2086  M0_LOG(M0_INFO, "[%p] tfid="FID_F" dev_state=%d\n",
2087  ioo, FID_P(&tfid), dev_state);
2088 
2089  if (should_spare_be_mapped(ioo, dev_state)) {
2090  gfid = &ioo->ioo_oo.oo_fid;
2091  rc = m0_sns_repair_spare_map(pm, gfid, play, play_instance,
2092  src->sa_group, src->sa_unit,
2093  &spare_slot, &spare_slot_prev);
2094  if (rc != 0)
2095  return M0_RC(rc);
2096 
2097  /* Check if there is an effective-failure. */
2098  if (spare_slot_prev != src->sa_unit) {
2099  spare.sa_unit = spare_slot_prev;
2100  m0_fd_fwd_map(play_instance, &spare, tgt);
2101  tfid = target_fid(ioo, tgt);
2103  &dev_state_prev);
2104  if (rc != 0)
2105  return M0_RC(rc);
2106  } else
2107  dev_state_prev = M0_PNDS_SNS_REPAIRED;
2108 
2109  if (dev_state_prev == M0_PNDS_SNS_REPAIRED) {
2110  spare.sa_unit = spare_slot;
2111  m0_fd_fwd_map(play_instance, &spare, tgt);
2112  tfid = target_fid(ioo, tgt);
2113  }
2114  dev_state = dev_state_prev;
2115  }
2116 
2117  session = target_session(ioo, tfid);
2118 
2119  rc = nw_xfer_tioreq_get(xfer, &tfid, tgt->ta_obj, session,
2120  layout_unit_size(play) * ioo->ioo_iomap_nr, tio);
2121 
2122  if (M0_IN(ioreq_sm_state(ioo), (IRS_DEGRADED_READING,
2124  dev_state != M0_PNDS_SNS_REPAIRED)
2125  (*tio)->ti_state = dev_state;
2126 
2127  return M0_RC(rc);
2128 }
2129 
2130 static const struct nw_xfer_ops xfer_ops = {
2132  .nxo_complete = nw_xfer_req_complete,
2133  .nxo_dispatch = nw_xfer_req_dispatch,
2134  .nxo_tioreq_map = nw_xfer_tioreq_map,
2135 };
2136 
2137 M0_INTERNAL void nw_xfer_request_init(struct nw_xfer_request *xfer)
2138 {
2139  uint64_t bucket_nr;
2140  struct m0_op_io *ioo;
2141  struct m0_pdclust_layout *play;
2142 
2143  M0_ENTRY("nw_xfer_request : %p", xfer);
2144 
2145  M0_PRE(xfer != NULL);
2146 
2147  ioo = bob_of(xfer, struct m0_op_io, ioo_nwxfer, &ioo_bobtype);
2148  nw_xfer_request_bob_init(xfer);
2149  xfer->nxr_rc = 0;
2150  xfer->nxr_bytes = 0;
2151  m0_atomic64_set(&xfer->nxr_iofop_nr, 0);
2152  m0_atomic64_set(&xfer->nxr_rdbulk_nr, 0);
2153  xfer->nxr_state = NXS_INITIALIZED;
2154  xfer->nxr_ops = &xfer_ops;
2155  m0_mutex_init(&xfer->nxr_lock);
2156 
2157  play = pdlayout_get(ioo);
2158  bucket_nr = layout_n(play) + 2 * layout_k(play);
2159  xfer->nxr_rc = tioreqht_htable_init(&xfer->nxr_tioreqs_hash,
2160  bucket_nr);
2161 
2163  M0_LEAVE();
2164 }
2165 
2166 M0_INTERNAL void nw_xfer_request_fini(struct nw_xfer_request *xfer)
2167 {
2168  M0_ENTRY("nw_xfer_request : %p", xfer);
2169 
2170  M0_PRE(xfer != NULL);
2171  M0_PRE(M0_IN(xfer->nxr_state, (NXS_COMPLETE, NXS_INITIALIZED)));
2173  M0_LOG(M0_DEBUG, "nw_xfer_request : %p, nxr_rc = %d",
2174  xfer, xfer->nxr_rc);
2175 
2176  xfer->nxr_ops = NULL;
2177  m0_mutex_fini(&xfer->nxr_lock);
2178  nw_xfer_request_bob_fini(xfer);
2179  tioreqht_htable_fini(&xfer->nxr_tioreqs_hash);
2180 
2181  M0_LEAVE();
2182 }
2183 
2184 #undef M0_TRACE_SUBSYSTEM
2185 
2186 /*
2187  * Local variables:
2188  * c-indentation-style: "K&R"
2189 
2190  * c-basic-offset: 8
2191  * tab-width: 8
2192  * fill-column: 80
2193  * scroll-step: 1
2194  * End:
2195  */
2196 /*
2197  * vim: tabstop=8 shiftwidth=8 noexpandtab textwidth=80 nowrap
2198  */
struct m0_file ioo_flock
M0_INTERNAL m0_bcount_t m0_net_domain_get_max_buffer_segment_size(struct m0_net_domain *dom)
static void m0_atomic64_inc(struct m0_atomic64 *a)
static uint64_t tioreqs_hash_func(const struct m0_htable *htable, const void *k)
Definition: io_nw_xfer.c:279
M0_INTERNAL void m0_ivec_cursor_init(struct m0_ivec_cursor *cur, const struct m0_indexvec *ivec)
Definition: vec.c:707
Definition: pg.h:130
M0_INTERNAL int m0_rpc_post(struct m0_rpc_item *item)
Definition: rpc.c:63
static bool target_ioreq_invariant(const struct target_ioreq *ti)
Definition: io_nw_xfer.c:324
uint32_t rit_opcode
Definition: item.h:474
uint64_t crw_lid
Definition: io_fops.h:392
static size_t nr
Definition: dump.c:1505
#define M0_PRE(cond)
M0_INTERNAL bool m0_pdclust_is_replicated(struct m0_pdclust_layout *play)
Definition: pdclust.c:829
#define M0_ALLOC_ARR(arr, nr)
Definition: memory.h:84
M0_INTERNAL m0_bcount_t m0_ext_length(const struct m0_ext *ext)
Definition: ext.c:42
M0_INTERNAL int m0_bitmap_init(struct m0_bitmap *map, size_t nr)
Definition: bitmap.c:86
static void nw_xfer_req_complete(struct nw_xfer_request *xfer, bool rmw)
Definition: io_nw_xfer.c:1682
M0_INTERNAL m0_bcount_t m0_io_fop_byte_count(struct m0_io_fop *iofop)
Definition: io_fops.c:1925
#define COUNT(ivec, i)
Definition: file.c:392
struct m0_buf ti_attrbuf
Definition: pg.h:810
#define m0_htable_for(name, var, htable)
Definition: hash.h:483
struct m0_fop crf_fop
M0_INTERNAL struct m0_rpc_session * m0_obj_container_id_to_session(struct m0_pool_version *pv, uint64_t container_id)
Definition: cob.c:925
M0_INTERNAL int m0_indexvec_alloc(struct m0_indexvec *ivec, uint32_t len)
Definition: vec.c:532
m0_bindex_t e_end
Definition: ext.h:40
static uint32_t seg_nr
Definition: net.c:119
int const char const void size_t int flags
Definition: dir.c:328
uint64_t sa_group
Definition: pdclust.h:241
static const uint64_t k1
Definition: hash_fnc.c:34
static int bulk_buffer_add(struct ioreq_fop *irfop, struct m0_net_domain *dom, struct m0_rpc_bulk_buf **rbuf, uint32_t *delta, uint32_t maxsize)
Definition: io_nw_xfer.c:760
#define NULL
Definition: misc.h:38
M0_INTERNAL void m0_bitmap_fini(struct m0_bitmap *map)
Definition: bitmap.c:97
static void paritybufs_set_dgw_mode(struct pargrp_iomap *iomap, struct m0_op_io *ioo, uint64_t unit)
Definition: io_nw_xfer.c:1446
map
Definition: processor.c:112
M0_INTERNAL m0_bcount_t m0_ivec_cursor_step(const struct m0_ivec_cursor *cur)
Definition: vec.c:726
struct m0_atomic64 nxr_rdbulk_nr
M0_INTERNAL bool m0__obj_is_di_enabled(struct m0_op_io *ioo)
Definition: io.c:660
uint32_t crw_index
Definition: io_fops.h:386
struct m0_bufvec nb_buffer
Definition: net.h:1322
#define ergo(a, b)
Definition: misc.h:293
Definition: storage.c:103
M0_HT_DEFINE(tioreqht, M0_INTERNAL, struct target_ioreq, uint64_t)
void * b_addr
Definition: buf.h:39
M0_INTERNAL struct m0_pool_version * m0_pool_version_find(struct m0_pools_common *pc, const struct m0_fid *id)
Definition: pool.c:586
struct m0_file file
Definition: di.c:36
M0_INTERNAL bool m0__is_oostore(struct m0_client *instance)
Definition: client.c:255
M0_INTERNAL int m0_rpc_bulk_buf_databuf_add(struct m0_rpc_bulk_buf *rbuf, void *buf, m0_bcount_t count, m0_bindex_t index, struct m0_net_domain *netdom)
Definition: bulk.c:331
struct m0_pool_version * pv
Definition: dir.c:629
struct data_buf *** pi_paritybufs
M0_INTERNAL void nw_xfer_request_init(struct nw_xfer_request *xfer)
Definition: io_nw_xfer.c:2137
#define M0_LOG(level,...)
Definition: trace.h:167
M0_LEAVE()
struct m0_io_fop irf_iofop
Definition: pg.h:866
static bool should_spare_be_mapped(struct m0_op_io *ioo, enum m0_pool_nd_state dev_state)
Definition: io_nw_xfer.c:2009
static uint32_t layout_k(const struct m0_pdclust_layout *play)
Definition: file.c:520
int(* nxo_tioreq_map)(struct nw_xfer_request *xfer, const struct m0_pdclust_src_addr *src, struct m0_pdclust_tgt_addr *tgt, struct target_ioreq **tio)
static int nw_xfer_tioreq_map(struct nw_xfer_request *xfer, const struct m0_pdclust_src_addr *src, struct m0_pdclust_tgt_addr *tgt, struct target_ioreq **tio)
Definition: io_nw_xfer.c:2034
M0_INTERNAL int m0_sns_repair_spare_map(struct m0_poolmach *pm, const struct m0_fid *fid, struct m0_pdclust_layout *pl, struct m0_pdclust_instance *pi, uint64_t group, uint64_t unit, uint32_t *spare_slot_out, uint32_t *spare_slot_out_prev)
uint64_t(* do_out_shift)(const struct m0_file *file)
Definition: di.h:109
struct m0_vec ov_vec
Definition: vec.h:147
static int dgmode_rwvec_alloc_init(struct target_ioreq *ti)
Definition: io_nw_xfer.c:151
struct m0_rpc_bulk if_rbulk
Definition: io_fops.h:175
struct m0_sm ri_sm
Definition: item.h:181
M0_INTERNAL int ioreq_cc_fop_init(struct target_ioreq *ti)
Definition: io_req_fop.c:804
static int target_cob_fop_prepare(struct target_ioreq *ti)
Definition: io_nw_xfer.c:1188
static uint32_t io_seg_size(void)
Definition: file.c:6433
uint64_t ta_obj
Definition: pdclust.h:256
enum target_ioreq_type ti_req_type
struct m0_indexvec_varr ti_bufvec
struct m0_op oc_op
struct m0_net_domain * ntm_dom
Definition: net.h:853
int32_t ri_error
Definition: item.h:161
static void dgmode_rwvec_dealloc_fini(struct dgmode_rwvec *dg)
Definition: io_nw_xfer.c:244
M0_INTERNAL void m0_indexvec_free(struct m0_indexvec *ivec)
Definition: vec.c:553
static struct m0_rpc_session * target_session(struct m0_op_io *ioo, struct m0_fid tfid)
Definition: io_nw_xfer.c:731
struct m0_indexvec ti_trunc_ivec
Definition: pg.h:799
M0_BOB_DEFINE(M0_INTERNAL, &nwxfer_bobtype, nw_xfer_request)
uint64_t m0_bindex_t
Definition: types.h:80
uint64_t ti_obj
struct m0_varr ti_pageattrs
static void * buf_aux_chk_get(struct m0_bufvec *aux, enum page_attr p_attr, uint32_t seg_idx, bool rd_in_wr)
Definition: io_nw_xfer.c:827
uint64_t m0_bcount_t
Definition: types.h:77
M0_INTERNAL int m0_poolmach_device_state(struct m0_poolmach *pm, uint32_t device_index, enum m0_pool_nd_state *state_out)
Definition: pool_machine.c:816
static int void * buf
Definition: dir.c:1019
static struct m0_rpc_session session
Definition: formation2.c:38
#define M0_SET0(obj)
Definition: misc.h:64
M0_INTERNAL m0_bcount_t m0_extent_get_checksum_nob(m0_bindex_t ext_start, m0_bindex_t ext_length, m0_bindex_t unit_sz, m0_bcount_t cs_size)
Definition: cksum_utils.c:85
Definition: di.h:92
M0_ADDB2_ADD(M0_AVI_FS_CREATE, new_fid.f_container, new_fid.f_key, mode, rc)
M0_INTERNAL int m0_fid_cmp(const struct m0_fid *fid0, const struct m0_fid *fid1)
Definition: fid.c:170
struct m0_fid crw_pver
Definition: io_fops.h:389
static struct m0_rpc_item * item
Definition: item.c:56
void ** ov_buf
Definition: vec.h:149
void target_ioreq_cancel(struct target_ioreq *ti)
Definition: io_nw_xfer.c:423
int ioo_addb2_mapped
const char * bt_name
Definition: bob.h:73
M0_INTERNAL uint64_t m0__obj_lid(struct m0_obj *obj)
Definition: obj.c:126
struct m0_indexvec pi_ivec
Definition: pg.h:340
Definition: sock.c:887
static m0_bcount_t count
Definition: xcode.c:167
#define ITEM_ARG(item)
Definition: item.h:618
M0_INTERNAL void m0_rpc_bulk_buflist_empty(struct m0_rpc_bulk *rbulk)
Definition: bulk.c:279
struct m0_sm ioo_sm
struct m0_buf crw_di_data_cksum
Definition: io_fops.h:416
struct target_ioreq * dr_tioreq
#define SEG_NR(ivec)
Definition: file.c:393
enum m0_pool_nd_state ti_state
#define m0_tl_endfor
Definition: tlist.h:700
struct m0_fid fid
Definition: di.c:46
struct m0_vec iv_vec
Definition: vec.h:139
M0_INTERNAL int ioreq_fop_init(struct ioreq_fop *fop, struct target_ioreq *ti, enum page_attr pattr)
Definition: io_req_fop.c:971
return M0_RC(rc)
op
Definition: libdemo.c:64
unsigned int op_code
Definition: client.h:656
static struct target_ioreq * target_ioreq_locate(struct nw_xfer_request *xfer, struct m0_fid *fid)
Definition: io_nw_xfer.c:435
static uint32_t unit_size
Definition: layout.c:53
#define M0_ENTRY(...)
Definition: trace.h:170
uint64_t osr_xid
Definition: onwire.h:105
m0_bindex_t * iv_index
Definition: vec.h:141
Definition: filter.py:1
int opcode
Definition: crate.c:301
int m0_obj_layout_id_to_unit_size(uint64_t layout_id)
Definition: obj.c:851
int i
Definition: dir.c:1033
void m0_fop_rpc_machine_set(struct m0_fop *fop, struct m0_rpc_machine *mach)
Definition: fop.c:351
M0_INTERNAL m0_bcount_t m0_rpc_session_get_max_item_payload_size(const struct m0_rpc_session *session)
Definition: session.c:775
m0_pdclust_unit_type
Definition: pdclust.h:89
enum page_attr db_flags
const struct m0_bob_type ioo_bobtype
Definition: io_req.c:153
#define PRIu64
Definition: types.h:58
struct m0_rpc_machine * c_rpc_machine
Definition: conn.h:278
struct m0_fid crw_fid
Definition: io_fops.h:383
Definition: client.h:647
static uint32_t rows_nr(struct m0_pdclust_layout *play)
Definition: file.c:691
M0_INTERNAL bool m0_ext_is_valid(const struct m0_ext *ext)
Definition: ext.c:90
struct nw_xfer_request ioo_nwxfer
#define M0_ERR_INFO(rc, fmt,...)
Definition: trace.h:215
int(* nxo_distribute)(struct nw_xfer_request *xfer)
uint64_t ti_parbytes
return M0_ERR(-EOPNOTSUPP)
static int target_ioreq_init(struct target_ioreq *ti, struct nw_xfer_request *xfer, const struct m0_fid *cobfid, uint64_t ta_obj, struct m0_rpc_session *session, uint64_t size)
Definition: io_nw_xfer.c:1210
M0_INTERNAL bool nw_xfer_request_invariant(const struct nw_xfer_request *xfer)
Definition: io_nw_xfer.c:340
struct m0_op_obj ioo_oo
M0_INTERNAL struct m0_poolmach * ioo_to_poolmach(struct m0_op_io *ioo)
Definition: io.c:75
struct m0_fop if_fop
Definition: io_fops.h:172
Definition: trace.h:482
M0_INTERNAL struct m0_client * m0__op_instance(const struct m0_op *op)
Definition: client.c:236
Definition: cnt.h:36
static int key
Definition: locality.c:283
enum sns_repair_state ioo_sns_state
struct m0_indexvec ioo_ext
void m0_rpc_item_cancel(struct m0_rpc_item *item)
Definition: item.c:932
M0_INTERNAL void ioreq_sm_state_set_locked(struct m0_op_io *ioo, int state)
Definition: io_req.c:193
#define m0_tl_teardown(name, head, obj)
Definition: tlist.h:708
int(* tio_cc_fops_prepare)(struct target_ioreq *ti)
static int nw_xfer_tioreq_get(struct nw_xfer_request *xfer, struct m0_fid *fid, uint64_t ta_obj, struct m0_rpc_session *session, uint64_t size, struct target_ioreq **out)
Definition: io_nw_xfer.c:1366
static uint32_t io_di_size(struct m0_op_io *ioo)
Definition: io_nw_xfer.c:98
struct m0_net_buffer * bb_nbuf
Definition: bulk.h:177
M0_INTERNAL void nw_xfer_request_fini(struct nw_xfer_request *xfer)
Definition: io_nw_xfer.c:2166
enum pargrp_iomap_state pi_state
#define m0_free0(pptr)
Definition: memory.h:77
void(* tio_seg_add)(struct target_ioreq *ti, const struct m0_pdclust_src_addr *src, const struct m0_pdclust_tgt_addr *tgt, m0_bindex_t gob_offset, m0_bcount_t count, struct pargrp_iomap *map)
static uint64_t page_nr(m0_bcount_t size)
Definition: file.c:492
uint32_t * ti_cksum_seg_b_nob
Definition: pg.h:814
M0_INTERNAL size_t m0_io_fop_size_get(struct m0_fop *fop)
Definition: io_fops.c:1589
struct m0_net_transfer_mc rm_tm
Definition: rpc_machine.h:88
m0_bcount_t b_nob
Definition: buf.h:38
static uint64_t page_id(m0_bindex_t offset)
Definition: file.c:686
#define M0_ASSERT(cond)
struct m0_fid ioo_pver
m0_time_t m0_time_now(void)
Definition: time.c:134
struct m0_rpc_item_header2 ri_header
Definition: item.h:193
m0_pool_nd_state
Definition: pool_machine.h:57
static struct m0_pdclust_instance * pdlayout_instance(const struct m0_layout_instance *li)
Definition: file.c:504
const struct nw_xfer_ops * nxr_ops
uint64_t ta_frame
Definition: pdclust.h:254
#define m0_htable_forall(name, var, htable,...)
Definition: hash.h:465
#define bob_of(ptr, type, field, bt)
Definition: bob.h:140
struct m0_bufvec ioo_data
static struct m0_bufvec bvec
Definition: xcode.c:169
M0_INTERNAL int ioreq_fop_async_submit(struct m0_io_fop *iofop, struct m0_rpc_session *session)
Definition: io_req_fop.c:666
static struct m0_stob_domain * dom
Definition: storage.c:38
#define ITEM_FMT
Definition: item.h:617
struct m0_varr dr_pageattrs
uint64_t pi_grpid
static void databufs_set_dgw_mode(struct pargrp_iomap *iomap, struct m0_pdclust_layout *play, struct m0_ext *ext)
Definition: io_nw_xfer.c:1421
struct m0_obj * ioo_obj
M0_INTERNAL bool ioreq_fop_invariant(const struct ioreq_fop *fop)
Definition: io_req_fop.c:62
M0_INTERNAL struct m0_obj_attr * m0_io_attr(struct m0_op_io *ioo)
Definition: utils.c:302
static int nw_xfer_req_dispatch(struct nw_xfer_request *xfer)
Definition: io_nw_xfer.c:1802
enum m0_pbuf_type ioo_pbuf_type
static void irfop_fini(struct ioreq_fop *irfop)
Definition: io_nw_xfer.c:810
const struct m0_rpc_item_type * ri_type
Definition: item.h:200
static uint64_t layout_unit_size(const struct m0_pdclust_layout *play)
Definition: file.c:525
M0_INTERNAL int m0_buf_alloc(struct m0_buf *buf, size_t size)
Definition: buf.c:43
struct m0_sm_group * sm_grp
Definition: sm.h:321
m0_bcount_t crw_cksum_size
Definition: io_fops.h:413
M0_INTERNAL uint32_t m0_fid_cob_device_id(const struct m0_fid *cob_fid)
Definition: fid_convert.c:81
M0_INTERNAL void m0_mutex_init(struct m0_mutex *mutex)
Definition: mutex.c:35
const struct m0_bob_type tioreq_bobtype
Definition: io_nw_xfer.c:49
static void to_op_io_map(const struct m0_op *op, struct m0_op_io *ioo)
Definition: io_nw_xfer.c:70
uint64_t f_container
Definition: fid.h:39
#define M0_POST(cond)
struct m0_0vec bb_zerovec
Definition: bulk.h:179
uint32_t ioo_flags
struct m0_fid oo_fid
M0_INTERNAL void m0_bitmap_set(struct m0_bitmap *map, size_t idx, bool val)
Definition: bitmap.c:139
uint32_t v_nr
Definition: vec.h:51
static m0_bindex_t offset
Definition: dump.c:173
struct m0_indexvec_varr dr_bufvec
struct m0_htable nxr_tioreqs_hash
M0_INTERNAL void m0_buf_free(struct m0_buf *buf)
Definition: buf.c:55
m0_bcount_t * v_count
Definition: vec.h:53
struct m0_rpc_session * ti_session
static const struct m0_di_ops di_ops[M0_DI_NR]
Definition: di.c:128
M0_INTERNAL bool m0_ivec_cursor_move(struct m0_ivec_cursor *cur, m0_bcount_t count)
Definition: vec.c:718
static uint64_t min64u(uint64_t a, uint64_t b)
Definition: arith.h:66
struct m0_tl ti_iofops
struct m0_op_common oo_oc
static void page_pos_get(struct pargrp_iomap *map, m0_bindex_t index, uint32_t *row, uint32_t *col)
Definition: file.c:725
M0_INTERNAL bool m0_ext_is_in(const struct m0_ext *ext, m0_bindex_t index)
Definition: ext.c:48
#define FID_P(f)
Definition: fid.h:77
static uint64_t data_size(const struct m0_pdclust_layout *play)
Definition: file.c:550
static const struct nw_xfer_ops xfer_ops
Definition: io_nw_xfer.c:2130
M0_INTERNAL bool addr_is_network_aligned(void *addr)
Definition: utils.c:29
M0_INTERNAL struct m0_op * m0__ioo_to_op(struct m0_op_io *ioo)
Definition: client.c:249
M0_INTERNAL m0_bcount_t m0_vec_count(const struct m0_vec *vec)
Definition: vec.c:53
const struct target_ioreq_ops * ti_ops
static const uint64_t k2
Definition: hash_fnc.c:35
struct m0_bufvec z_bvec
Definition: vec.h:514
static uint32_t layout_n(const struct m0_pdclust_layout *play)
Definition: file.c:515
static struct m0_pdclust_layout * pdlayout_get(const struct io_request *req)
Definition: file.c:510
static int64_t m0_atomic64_get(const struct m0_atomic64 *a)
const struct m0_bob_type nwxfer_bobtype
Definition: io_nw_xfer.c:48
M0_INTERNAL uint32_t m0_pdclust_size(const struct m0_pdclust_layout *pl)
Definition: pdclust.c:372
uint64_t sa_unit
Definition: pdclust.h:243
int32_t ioo_rc
M0_INTERNAL int m0_rpc_session_validate(struct m0_rpc_session *session)
Definition: session.c:573
M0_INTERNAL size_t m0_rpc_bulk_buf_length(struct m0_rpc_bulk *rbulk)
Definition: bulk.c:550
uint64_t ti_databytes
struct m0_pdclust_tgt_addr tgt
Definition: fd.c:110
static uint8_t fail[DATA_UNIT_COUNT_MAX+PARITY_UNIT_COUNT_MAX]
M0_INTERNAL int64_t m0_ref_read(const struct m0_ref *ref)
Definition: refs.c:44
struct target_ioreq * irf_tioreq
Definition: pg.h:881
#define M0_CNT_INC(cnt)
Definition: arith.h:226
m0_bcount_t ti_cksum_copied
Definition: pg.h:811
#define M0_FI_ENABLED(tag)
Definition: finject.h:231
struct m0_ref f_ref
Definition: fop.h:81
Definition: ext.h:37
Definition: fid.h:38
m0_bindex_t e_start
Definition: ext.h:39
M0_INTERNAL void ioreq_fop_fini(struct ioreq_fop *fop)
Definition: io_req_fop.c:1030
struct m0_fid ti_fid
static struct m0_layout_instance * layout_instance(const struct io_request *req)
Definition: file.c:498
#define M0_ALLOC_PTR(ptr)
Definition: memory.h:86
struct cc_req_fop ti_cc_fop
static bool should_unit_be_truncated(bool partial, enum m0_pdclust_unit_type unit_type, enum page_attr flags)
Definition: io_nw_xfer.c:458
struct m0_mutex nxr_lock
m0_bindex_t ti_goff
Definition: pg.h:785
M0_HT_DESCR_DEFINE(tioreqht, "Hash of target_ioreq objects", M0_INTERNAL, struct target_ioreq, ti_link, ti_magic, M0_TIOREQ_MAGIC, M0_TLIST_HEAD_MAGIC, ti_fid.f_container, tioreqs_hash_func, tioreq_key_eq)
struct m0_bufvec dr_auxbufvec
Definition: pg.h:707
enum nw_xfer_state nxr_state
static void m0_op_io_to_rpc_map(const struct m0_op_io *ioo, const struct m0_rpc_item *item)
Definition: io_nw_xfer.c:80
static int nw_xfer_io_distribute(struct nw_xfer_request *xfer)
Definition: io_nw_xfer.c:1475
struct m0_rpc_item * m0_fop_to_rpc_item(const struct m0_fop *fop)
Definition: fop.c:337
static void parity_page_pos_get(struct pargrp_iomap *map, m0_bindex_t index, uint32_t *row, uint32_t *col)
Definition: io_nw_xfer.c:124
M0_INTERNAL bool m0_bitmap_get(const struct m0_bitmap *map, size_t idx)
Definition: bitmap.c:105
M0_INTERNAL enum m0_pdclust_unit_type m0_pdclust_unit_classify(const struct m0_pdclust_layout *pl, int unit)
Definition: pdclust.c:425
m0_bcount_t size
Definition: di.c:39
page_attr
#define _0C(exp)
Definition: assert.h:311
M0_INTERNAL m0_bindex_t m0_ivec_cursor_index(const struct m0_ivec_cursor *cur)
Definition: vec.c:733
struct data_buf *** pi_databufs
M0_INTERNAL void m0_mutex_fini(struct m0_mutex *mutex)
Definition: mutex.c:42
struct m0_indexvec dr_ivec
Definition: pg.h:700
void m0_fop_put_lock(struct m0_fop *fop)
Definition: fop.c:198
struct m0_atomic64 nxr_iofop_nr
static struct m0_fop * fop
Definition: item.c:57
M0_INTERNAL int32_t m0_net_domain_get_max_buffer_segments(struct m0_net_domain *dom)
static struct m0 instance
Definition: main.c:78
M0_INTERNAL void m0_bitmap_reset(struct m0_bitmap *map)
Definition: bitmap.c:149
static struct m0_be_seg * seg
Definition: btree.c:40
uint64_t ioo_iomap_nr
static uint32_t ioreq_sm_state(const struct io_request *req)
Definition: file.c:975
static int target_ioreq_iofops_prepare(struct target_ioreq *ti, enum page_attr filter)
Definition: io_nw_xfer.c:843
M0_INTERNAL void m0_ext_intersection(const struct m0_ext *e0, const struct m0_ext *e1, struct m0_ext *result)
Definition: ext.c:81
M0_INTERNAL struct m0_fid target_fid(struct m0_op_io *ioo, struct m0_pdclust_tgt_addr *tgt)
Definition: io_nw_xfer.c:710
struct nw_xfer_request * ti_nwxfer
#define out(...)
Definition: gen.c:41
Definition: file.h:81
M0_INTERNAL uint64_t m0__page_size(const struct m0_op_io *ioo)
Definition: utils.c:41
uint64_t oa_layout_id
Definition: client.h:758
M0_INTERNAL bool m0_is_read_fop(const struct m0_fop *fop)
Definition: io_fops.c:916
static bool tioreq_key_eq(const void *key1, const void *key2)
Definition: io_nw_xfer.c:299
struct m0_fid gfid
Definition: dir.c:626
M0_INTERNAL struct m0_fop_cob_rw * io_rw_get(struct m0_fop *fop)
Definition: io_fops.c:1037
Definition: pg.h:859
static int32_t min32(int32_t a, int32_t b)
Definition: arith.h:36
M0_INTERNAL bool m0_is_write_fop(const struct m0_fop *fop)
Definition: io_fops.c:922
M0_INTERNAL bool m0_fid_is_valid(const struct m0_fid *fid)
Definition: fid.c:96
void target_ioreq_fini(struct target_ioreq *ti)
Definition: io_nw_xfer.c:364
M0_INTERNAL void m0_fd_fwd_map(struct m0_pdclust_instance *pi, const struct m0_pdclust_src_addr *src, struct m0_pdclust_tgt_addr *tgt)
Definition: fd.c:838
struct m0_indexvec ti_goff_ivec
Definition: pg.h:820
M0_INTERNAL int m0_io_fop_prepare(struct m0_fop *fop)
Definition: io_fops.c:1513
struct m0_bufvec ti_auxbufvec
Definition: pg.h:807
struct m0_rpc_machine * ri_rmachine
Definition: item.h:160
static void m0_atomic64_add(struct m0_atomic64 *a, int64_t num)
M0_INTERNAL int m0_rpc_bulk_buf_add(struct m0_rpc_bulk *rbulk, uint32_t segs_nr, m0_bcount_t length, struct m0_net_domain *netdom, struct m0_net_buffer *nb, struct m0_rpc_bulk_buf **out)
Definition: bulk.c:291
M0_INTERNAL uint64_t m0_sm_id_get(const struct m0_sm *sm)
Definition: sm.c:1021
#define m0_tl_for(name, head, obj)
Definition: tlist.h:695
void m0_free(void *data)
Definition: memory.c:146
#define m0_htable_endfor
Definition: hash.h:491
static const struct target_ioreq_ops tioreq_ops
Definition: io_nw_xfer.c:1182
struct m0_rpc_item f_item
Definition: fop.h:84
uint32_t sm_state
Definition: sm.h:307
struct m0_bufvec ioo_attr
struct m0_pdclust_src_addr src
Definition: fd.c:108
struct dgmode_rwvec * ti_dgvec
int32_t rc
Definition: trigger_fop.h:47
uint64_t h_bucket_nr
Definition: hash.h:178
static uint32_t io_desc_size(struct m0_net_domain *ndom)
Definition: file.c:6423
struct m0_indexvec ti_ivec
Definition: pg.h:793
#define M0_POST_EX(cond)
#define offsetof(typ, memb)
Definition: misc.h:29
M0_INTERNAL bool m0_sm_group_is_locked(const struct m0_sm_group *grp)
Definition: sm.c:107
M0_INTERNAL void m0_poolmach_gob2cob(struct m0_poolmach *pm, const struct m0_fid *gfid, uint32_t idx, struct m0_fid *cob_fid)
struct m0_rpc_conn * s_conn
Definition: session.h:312
static uint64_t target_offset(uint64_t frame, struct m0_pdclust_layout *play, m0_bindex_t gob_offset)
Definition: file.c:571
int(* tio_iofops_prepare)(struct target_ioreq *ti, enum page_attr filter)
Definition: fop.h:80
struct pargrp_iomap ** ioo_iomaps
const struct m0_di_ops * fi_di_ops
Definition: file.h:92
uint64_t crw_flags
Definition: io_fops.h:411
#define FID_F
Definition: fid.h:75
m0_time_t ti_start_time
Definition: pg.h:759
Definition: vec.h:145
M0_INTERNAL void m0_file_init(struct m0_file *file, const struct m0_fid *fid, struct m0_rm_domain *dom, enum m0_di_types di_type)
Definition: file.c:477
static void m0_atomic64_set(struct m0_atomic64 *a, int64_t num)
M0_INTERNAL void * m0_extent_vec_get_checksum_addr(void *cksum_buf_vec, m0_bindex_t off, void *ivec, m0_bindex_t unit_sz, m0_bcount_t cs_sz)
Definition: cksum_utils.c:107
m0_bcount_t b_nob
Definition: buf.h:230
Definition: idx_mock.c:47
#define m0_tl_forall(name, var, head,...)
Definition: tlist.h:735
static void target_ioreq_seg_add(struct target_ioreq *ti, const struct m0_pdclust_src_addr *src, const struct m0_pdclust_tgt_addr *tgt, m0_bindex_t gob_offset, m0_bcount_t count, struct pargrp_iomap *map)
Definition: io_nw_xfer.c:479