@@ -7,7 +7,7 @@ rank = MPI.Comm_rank(comm)
77sz = MPI. Comm_size (comm)
88filename = MPI. bcast (tempname (), 0 , comm)
99
10- MPI. Barrier (comm)
10+ # TODO MPI.Barrier(comm)
1111
1212# Collective write
1313fh = MPI. File. open (comm, filename, read= true , write= true , create= true )
@@ -16,74 +16,69 @@ fh = MPI.File.open(comm, filename, read=true, write=true, create=true)
1616if ! MPI. File. get_atomicity (fh)
1717 MPI. File. set_atomicity (fh, true )
1818end
19-
2019@test MPI. File. get_atomicity (fh)
2120
22- MPI. Barrier (comm)
23- MPI. File. sync (fh)
21+ function sync ()
22+ # First ensure that all local changes are flushed ...
23+ MPI. File. sync (fh)
24+ # ... then wait for all other process to finish doing that ...
25+ MPI_Barrier (comm)
26+ # ... then make sure we see all change that the other processes made.
27+ MPI. File. sync (fh)
28+ end
29+
30+ sync ()
2431
2532header = " my header"
2633
2734if rank == 0
2835 MPI. File. write_shared (fh, header)
2936end
30-
31- # TODO : is there a better way to synchronise shared pointers?
32- MPI. Barrier (comm)
33- MPI. File. sync (fh)
37+ sync ()
3438
3539offset = MPI. File. get_position_shared (fh)
3640@test offset == sizeof (header)
3741byte_offset = MPI. File. get_byte_offset (fh, offset)
3842@test byte_offset == offset
3943
4044MPI. File. set_view! (fh, byte_offset, MPI. Datatype (Int64), MPI. Datatype (Int64))
41- MPI. Barrier (comm)
42- MPI. File. sync (fh)
45+ sync ()
4346@test MPI. File. get_position_shared (fh) == 0
4447
45- MPI. Barrier (comm)
46- MPI. File. sync (fh)
47-
4848MPI. File. write_ordered (fh, fill (Int64 (rank), rank+ 1 ))
49- MPI. Barrier (comm)
50- MPI. File. sync (fh)
51- # TODO : this has to be fixed: https://github.com/JuliaParallel/MPI.jl/issues/879
52- @test MPI. File. get_position_shared (fh) == sum (1 : sz) skip = Sys. isapple () || Sys. iswindows ()
49+ sync ()
50+ # TODO # TODO : this has to be fixed:
51+ # TODO # https://github.com/JuliaParallel/MPI.jl/issues/555,
52+ # TODO # https://github.com/JuliaParallel/MPI.jl/issues/579
53+ # TODO @test MPI.File.get_position_shared(fh) == sum(1:sz) skip = Sys.isapple() || Sys.iswindows()
54+ @test MPI. File. get_position_shared (fh) == sum (1 : sz)
5355
5456MPI. File. seek_shared (fh, 0 )
5557@test MPI. File. get_position_shared (fh) == 0
56-
57- MPI. Barrier (comm)
58- MPI. File. sync (fh)
58+ sync ()
5959
6060buf = zeros (Int64, rank+ 1 )
6161MPI. File. read_ordered! (fh, buf)
6262@test buf == fill (Int64 (rank), rank+ 1 )
63+ sync ()
6364
64- MPI. Barrier (comm)
65- MPI. File. sync (fh)
66- MPI. Barrier (comm)
67- # TODO : this has to be fixed: https://github.com/JuliaParallel/MPI.jl/issues/555
68- @test MPI. File. get_position_shared (fh) == sum (1 : sz) skip = Sys. iswindows ()
65+ # TODO # TODO : this has to be fixed:
66+ # TODO # https://github.com/JuliaParallel/MPI.jl/issues/555
67+ # TODO @test MPI.File.get_position_shared(fh) == sum(1:sz) skip = Sys.iswindows()
68+ @test MPI. File. get_position_shared (fh) == sum (1 : sz)
6969
7070MPI. File. set_view! (fh, 0 , MPI. Datatype (UInt8), MPI. Datatype (UInt8))
71- MPI. Barrier (comm)
72- MPI. File. sync (fh)
71+ sync ()
7372MPI. File. seek_shared (fh, 0 )
7473@test MPI. File. get_position_shared (fh) == 0
75-
76- MPI. Barrier (comm)
77- MPI. File. sync (fh)
74+ sync ()
7875
7976if rank == sz- 1
8077 buf = Array {UInt8} (undef, sizeof (header))
8178 MPI. File. read_shared! (fh, buf)
8279 @test String (buf) == header
8380end
84-
85- MPI. Barrier (comm)
86- MPI. File. sync (fh)
81+ sync ()
8782
8883@test MPI. File. get_position_shared (fh) == sizeof (header)
8984
0 commit comments