@@ -303,7 +303,7 @@ void __imr_alloc_data_region(void** region, int raid_mode, int local_data_size,
303303 * region = (void * ) malloc (2 * local_data_size );
304304 } else if (raid_mode == 5 ){
305305 //We need space for our own local data, as well as space for the parity data
306- //We add two just in case the data size isn't evenly divisble by set_size-1
306+ //We add two just in case the data size isn't evenly divisible by set_size-1
307307 // 3 is needed because making the parity one larger on some nodes requires
308308 // extra bits of "data" on the other nodes
309309 * region = (void * ) malloc (local_data_size + local_data_size /(set_size - 1 ) + 3 );
@@ -482,14 +482,14 @@ int __imr_member_store(fenix_group_t* g, int member_id,
482482 // all of the data in the corresponding blocks and the parity for those blocks
483483 //Standard RAID does this by having one disk store parity for a given block instead of data, but this assumes
484484 // that there is no benefit to data locality - in our case we want each node to have a local copy of its own
485- // data, preferably in a single (virtually) continuous memory range for data movement optomization . So we'll
485+ // data, preferably in a single (virtually) continuous memory range for data movement optimization . So we'll
486486 // store the local data, then put 1/N of the parity data at the bottom of the commit.
487487 //The weirdness comes from the fact that a given node CANNOT contribute to the data being checked for parity which
488488 // will be stored on itself. IE, a node cannot save both a portion of the data and the parity for that data portion -
489489 // doing so would mean if that node fails it is as if we lost two nodes for recovery semantics, making every failure
490490 // non-recoverable.
491491 // This means we need to do an XOR reduction across every node but myself, then store the result on myself - this is
492- // a little awkward with MPI's reductions which require full comm participation and do not recieve any information about
492+ // a little awkward with MPI's reductions which require full comm participation and do not receive any information about
493493 // the source of a given chunk of data (IE we can't exclude data from node X, as we want to).
494494 //This is easily doable using MPI send/recvs, but doing it that way neglects all of the data/comm size optimizations,
495495 // as well as any block XOR optimizations from MPI's reduction operations.
@@ -683,7 +683,7 @@ int __imr_get_snapshot_at_position(fenix_group_t* g, int position,
683683 retval = FENIX_ERROR_INVALID_POSITION ;
684684 } else {
685685 //Each member ought to have the same snapshots, in the same order.
686- //If this isn't true, some other bug has occured . Thus, we will just
686+ //If this isn't true, some other bug has occurred . Thus, we will just
687687 //query the first member.
688688 * time_stamp = group -> entries [0 ].timestamp [group -> entries [0 ].current_head - 1 - position ];
689689 retval = FENIX_SUCCESS ;
@@ -812,7 +812,7 @@ int __imr_member_restore(fenix_group_t* g, int member_id,
812812
813813 if (recv_size > 0 ){
814814 void * recv_buf = malloc (member_data .datatype_size * recv_size );
815- //first recieve their data, so store in the resiliency section.
815+ //first receive their data, so store in the resiliency section.
816816 MPI_Recv (recv_buf , recv_size * member_data .datatype_size , MPI_BYTE , group -> partners [0 ],
817817 RECOVER_MEMBER_ENTRY_TAG ^group -> base .groupid , group -> base .comm , NULL );
818818 __fenix_data_subset_deserialize (mentry -> data_regions + snapshot , recv_buf ,
0 commit comments