-
Notifications
You must be signed in to change notification settings - Fork 254
Expand file tree
/
Copy pathtest_robotframework.py
More file actions
861 lines (767 loc) · 32.6 KB
/
test_robotframework.py
File metadata and controls
861 lines (767 loc) · 32.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
import csv
import os.path
import re
import shutil
import sys
import tempfile
from contextlib import contextmanager
from pathlib import Path
from unittest import mock
from xml.etree import ElementTree as ET
import pytest
import responses
from robot.libdocpkg.robotbuilder import LibraryDocBuilder
from cumulusci.core.config import BaseProjectConfig, TaskConfig, UniversalConfig
from cumulusci.core.exceptions import RobotTestFailure, TaskOptionsError
from cumulusci.core.tests.utils import MockLoggerMixin
from cumulusci.tasks.robotframework import Robot, RobotLibDoc, RobotTestDoc
from cumulusci.tasks.robotframework.debugger import DebugListener
from cumulusci.tasks.robotframework.libdoc import KeywordFile
from cumulusci.tasks.robotframework.robotframework import KeywordLogger
from cumulusci.tasks.salesforce.tests.util import create_task
from cumulusci.utils import temporary_dir, touch
from cumulusci.utils.xml.robot_xml import log_perf_summary_from_xml
class TestRobot:
@mock.patch("cumulusci.tasks.robotframework.robotframework.robot_run")
def test_run_task_with_failure(self, robot_run):
robot_run.return_value = 1
task = create_task(Robot, {"suites": "tests", "pdb": True})
with pytest.raises(RobotTestFailure):
task()
@mock.patch("cumulusci.tasks.robotframework.robotframework.robot_run")
def test_run_task_error_message(self, robot_run):
expected = {
1: "1 test failed.", # singular; pet peeve of my to see "1 tests"
2: "2 tests failed.", # plural
249: "249 tests failed.",
250: "250 or more tests failed.",
251: "Help or version information printed.",
252: "Invalid test data or command line options.",
253: "Test execution stopped by user.",
255: "Unexpected internal error",
}
for error_code in expected.keys():
robot_run.return_value = error_code
task = create_task(Robot, {"suites": "tests", "pdb": True})
with pytest.raises(RobotTestFailure) as e:
task()
assert str(e.value) == expected[error_code]
@mock.patch("cumulusci.tasks.robotframework.robotframework.patch_statusreporter")
def test_pdb_arg(self, patch_statusreporter):
create_task(
Robot,
{
"suites": "test", # required, or the task will raise an exception
"pdb": "False",
},
)
patch_statusreporter.assert_not_called()
create_task(
Robot,
{
"suites": "test", # required, or the task will raise an exception
"pdb": "True",
},
)
patch_statusreporter.assert_called_once()
def test_list_args(self):
"""Verify that certain arguments are converted to lists"""
task = create_task(
Robot,
{
"suites": "test", # required, or the task will raise an exception
"test": "one, two",
"include": "foo, bar",
"exclude": "a, b",
"vars": "uno, dos, tres",
"skip": "xyzzy,plugh",
},
)
for option in ("test", "include", "exclude", "vars", "suites", "skip"):
assert isinstance(task.options[option], list)
def test_options_converted_to_dict(self):
task = create_task(
Robot,
{
"suites": "test", # required, or the task will raise an exception
"options": "outputdir:/tmp/example,loglevel:DEBUG",
},
)
assert isinstance(task.options["options"], dict)
def test_process_arg_requires_int(self):
"""Verify we throw a useful error for non-int "processes" option"""
expected = "Please specify an integer for the `processes` option."
with pytest.raises(TaskOptionsError, match=expected):
create_task(Robot, {"suites": "tests", "processes": "bogus"})
@mock.patch("cumulusci.tasks.robotframework.robotframework.robot_run")
@mock.patch("cumulusci.tasks.robotframework.robotframework.subprocess.run")
def test_pabot_arg_with_process_eq_one(self, mock_subprocess_run, mock_robot_run):
"""Verify that pabot-specific arguments are ignored if processes==1"""
mock_robot_run.return_value = 0
task = create_task(
Robot,
{
"suites": "tests",
"process": 1,
"ordering": "robot/order.txt",
"testlevelsplit": "true",
},
)
task()
mock_subprocess_run.assert_not_called()
outputdir = str(Path(".").resolve())
mock_robot_run.assert_called_once_with(
"tests",
listener=[],
outputdir=outputdir,
variable=["org:test"],
tagstatexclude=["cci_metric_elapsed_time", "cci_metric"],
stdout=sys.stdout,
stderr=sys.stderr,
)
@mock.patch("cumulusci.tasks.robotframework.robotframework.robot_run")
@mock.patch("cumulusci.tasks.robotframework.robotframework.subprocess.run")
def test_process_arg_eq_one(self, mock_subprocess_run, mock_robot_run):
"""Verify that setting the process option to 1 runs robot rather than pabot"""
mock_robot_run.return_value = 0
task = create_task(Robot, {"suites": "tests", "process": 1})
task()
mock_subprocess_run.assert_not_called()
outputdir = str(Path(".").resolve())
mock_robot_run.assert_called_once_with(
"tests",
listener=[],
outputdir=outputdir,
variable=["org:test"],
tagstatexclude=["cci_metric_elapsed_time", "cci_metric"],
stdout=sys.stdout,
stderr=sys.stderr,
)
@mock.patch("cumulusci.tasks.robotframework.robotframework.robot_run")
def test_suites(self, mock_robot_run):
"""Verify that passing a list of suites is handled properly"""
mock_robot_run.return_value = 0
task = create_task(Robot, {"suites": "tests,more_tests", "process": 1})
task()
outputdir = str(Path(".").resolve())
mock_robot_run.assert_called_once_with(
"tests",
"more_tests",
listener=[],
outputdir=outputdir,
variable=["org:test"],
tagstatexclude=["cci_metric_elapsed_time", "cci_metric"],
stdout=sys.stdout,
stderr=sys.stderr,
)
@mock.patch("cumulusci.tasks.robotframework.robotframework.robot_run")
def test_tagstatexclude(self, mock_robot_run):
"""Verify tagstatexclude is treated as a list"""
mock_robot_run.return_value = 0
task = create_task(
Robot,
{
"suites": "test", # required, or the task will raise an exception
"options": {
"tagstatexclude": "this,that",
},
},
)
assert type(task.options["options"]["tagstatexclude"]) is list
task()
outputdir = str(Path(".").resolve())
mock_robot_run.assert_called_once_with(
"test",
listener=[],
outputdir=outputdir,
variable=["org:test"],
tagstatexclude=["this", "that", "cci_metric_elapsed_time", "cci_metric"],
stdout=sys.stdout,
stderr=sys.stderr,
)
def test_default_listeners(self):
# first, verify that not specifying any listener options
# results in no listeners...
task = create_task(
Robot, {"suites": "test"} # required, or the task will raise an exception
)
assert len(task.options["options"]["listener"]) == 0
# next, make sure that if we specify the options with a Falsy
# string, the option is properly treated like a boolean
task = create_task(
Robot,
{
"suites": "test", # required, or the task will raise an exception
"verbose": "False",
"robot_debug": "False",
},
)
assert len(task.options["options"]["listener"]) == 0
def test_debug_option(self):
"""Verify that setting debug to True attaches the appropriate listener"""
task = create_task(
Robot,
{
"suites": "test", # required, or the task will raise an exception
"robot_debug": "True",
},
)
listener_classes = [
listener.__class__ for listener in task.options["options"]["listener"]
]
assert (
DebugListener in listener_classes
), "DebugListener was not in task options"
def test_verbose_option(self):
"""Verify that setting verbose to True attaches the appropriate listener"""
task = create_task(
Robot,
{
"suites": "test", # required, or the task will raise an exception
"verbose": "True",
},
)
listener_classes = [
listener.__class__ for listener in task.options["options"]["listener"]
]
assert (
KeywordLogger in listener_classes
), "KeywordLogger was not in task options"
def test_user_defined_listeners_option(self):
"""Verify that our listeners don't replace user-defined listeners"""
task = create_task(
Robot,
{
"suites": "test", # required, or the task will raise an exception
"robot_debug": "True",
"verbose": "True",
"options": {"listener": ["FakeListener.py"]},
},
)
listener_classes = [
listener.__class__ for listener in task.options["options"]["listener"]
]
assert "FakeListener.py" in task.options["options"]["listener"]
assert DebugListener in listener_classes
assert KeywordLogger in listener_classes
@mock.patch("cumulusci.tasks.robotframework.robotframework.robot_run")
@mock.patch("cumulusci.tasks.robotframework.robotframework.add_path")
def test_sources(self, mock_add_path, mock_robot_run):
"""Verify that sources get added to PYTHONPATH when task runs"""
universal_config = UniversalConfig()
project_config = BaseProjectConfig(
universal_config,
{
"sources": {
"test1": {"path": "dummy1"},
"test2": {"path": "dummy2"},
}
},
)
# get_namespace returns a config. The only part of the config
# that the code uses is the repo_root property, so we don't need
# a full blown config.
project_config.get_namespace = mock.Mock(
side_effect=lambda source: mock.Mock(
repo_root=project_config.sources[source]["path"]
)
)
task = create_task(
Robot,
{"suites": "test", "sources": ["test1", "test2"]},
project_config=project_config,
)
mock_robot_run.return_value = 0
assert "dummy1" not in sys.path
assert "dummy2" not in sys.path
task()
project_config.get_namespace.assert_has_calls(
[mock.call("test1"), mock.call("test2")]
)
mock_add_path.assert_has_calls(
[mock.call("dummy1", end=True), mock.call("dummy2", end=True)]
)
assert "dummy1" not in sys.path
assert "dummy2" not in sys.path
assert (
Path(".").resolve() == Path(task.return_values["robot_outputdir"]).resolve()
)
@mock.patch("cumulusci.tasks.robotframework.robotframework.robot_run")
@mock.patch("cumulusci.tasks.robotframework.robotframework.add_path")
def test_repo_root_in_sys_path(self, mock_add_path, mock_robot_run):
"""Verify that the repo root is added to sys.path
Normally, the repo root is added to sys.path in the __init__
of BaseSalesforceTask. However, if we're running a task from
another repo, the git root of that other repo isn't added. The
robot task will do that; this verifies that.
"""
mock_robot_run.return_value = 0
universal_config = UniversalConfig()
project_config = BaseProjectConfig(universal_config)
with temporary_dir() as d:
project_config.repo_info["root"] = d
task = create_task(
Robot, {"suites": "tests"}, project_config=project_config
)
assert d not in sys.path
task()
mock_add_path.assert_called_once_with(d)
assert d not in sys.path
@mock.patch("cumulusci.tasks.robotframework.robotframework.robot_run")
def test_sources_not_found(self, mock_robot_run):
task = create_task(
Robot,
{"suites": "test", "sources": ["bogus"]},
)
expected = "robot source 'bogus' could not be found"
with pytest.raises(TaskOptionsError, match=expected):
task()
@mock.patch("cumulusci.tasks.robotframework.robotframework.robot_run")
def test_outputdir_return_value(mock_run, tmpdir):
"""Ensure that the task properly sets the outputdir return value"""
project_config = BaseProjectConfig(UniversalConfig())
test_dir = "test-dir"
tmpdir.mkdir(test_dir)
task = create_task(
Robot,
{
"suites": "test",
"options": {"outputdir": test_dir},
},
project_config=project_config,
)
mock_run.return_value = 0
task()
assert (Path.cwd() / test_dir).resolve() == Path(
task.return_values["robot_outputdir"]
).resolve()
class TestRobotTestDoc:
@mock.patch("cumulusci.tasks.robotframework.robotframework.testdoc")
def test_run_task(self, testdoc):
task = create_task(RobotTestDoc, {"path": ".", "output": "out"})
task()
testdoc.assert_called_once_with(".", "out")
class TestRobotLibDoc(MockLoggerMixin):
maxDiff = None
def setup_method(self):
self.tmpdir = tempfile.mkdtemp(dir=".")
self.task_config = TaskConfig()
self._task_log_handler.reset()
self.task_log = self._task_log_handler.messages
self.datadir = os.path.dirname(__file__)
def teardown_method(self):
shutil.rmtree(self.tmpdir)
def test_output_directory_not_exist(self):
"""Verify we catch an error if the output directory doesn't exist"""
path = os.path.join(self.datadir, "TestLibrary.py")
output = os.path.join(self.tmpdir, "bogus", "index.html")
task = create_task(RobotLibDoc, {"path": path, "output": output})
# on windows, the output path may have backslashes which needs
# to be protected in the expected regex
expected = r"Unable to create output file '{}' (.*)".format(re.escape(output))
with pytest.raises(TaskOptionsError, match=expected):
task()
def test_validate_filenames(self):
"""Verify that we catch bad filenames early"""
expected = "Unable to find the following input files: 'bogus.py', 'bogus.robot'"
output = os.path.join(self.tmpdir, "index.html")
with pytest.raises(TaskOptionsError, match=expected):
create_task(RobotLibDoc, {"path": "bogus.py,bogus.robot", "output": output})
# there's a special path through the code if only one filename is bad...
expected = "Unable to find the input file 'bogus.py'"
with pytest.raises(TaskOptionsError, match=expected):
create_task(RobotLibDoc, {"path": "bogus.py", "output": output})
def test_task_log(self):
"""Verify that the task prints out the name of the output file"""
path = os.path.join(self.datadir, "TestLibrary.py")
output = os.path.join(self.tmpdir, "index.html")
task = create_task(RobotLibDoc, {"path": path, "output": output})
task()
assert "created {}".format(output) in self.task_log["info"]
assert os.path.exists(output)
def test_comma_separated_list_of_files(self):
"""Verify that we properly parse a comma-separated list of files"""
path = "{},{}".format(
os.path.join(self.datadir, "TestLibrary.py"),
os.path.join(self.datadir, "TestResource.robot"),
)
output = os.path.join(self.tmpdir, "index.html")
task = create_task(RobotLibDoc, {"path": path, "output": output})
task()
assert os.path.exists(output)
assert len(task.result["files"]) == 2
def test_glob_patterns(self):
output = os.path.join(self.tmpdir, "index.html")
path = os.path.join(self.datadir, "*Library.py")
task = create_task(RobotLibDoc, {"path": path, "output": output})
task()
assert os.path.exists(output)
assert len(task.result["files"]) == 1
assert task.result["files"][0] == os.path.join(self.datadir, "TestLibrary.py")
def test_remove_duplicates(self):
output = os.path.join(self.tmpdir, "index.html")
path = os.path.join(self.datadir, "*Library.py")
task = create_task(RobotLibDoc, {"path": [path, path], "output": output})
task()
assert len(task.result["files"]) == 1
assert task.result["files"][0] == os.path.join(self.datadir, "TestLibrary.py")
def test_creates_output(self):
path = os.path.join(self.datadir, "TestLibrary.py")
output = os.path.join(self.tmpdir, "index.html")
task = create_task(RobotLibDoc, {"path": path, "output": output})
task()
assert "created {}".format(output) in self.task_log["info"]
assert os.path.exists(output)
def test_pageobject(self):
"""Verify that we can parse a page object file"""
path = os.path.join(self.datadir, "TestPageObjects.py")
output = os.path.join(self.tmpdir, "index.html")
task = create_task(RobotLibDoc, {"path": path, "output": output})
task()
assert "created {}".format(output) in self.task_log["info"]
assert os.path.exists(output)
def test_csv(self):
path = ",".join(
(
os.path.join(self.datadir, "TestLibrary.py"),
os.path.join(self.datadir, "TestResource.robot"),
os.path.join(self.datadir, "TestPageObjects.py"),
)
)
output = Path(self.tmpdir) / "keywords.csv"
if output.exists():
os.remove(output)
task = create_task(RobotLibDoc, {"path": path, "output": output.as_posix()})
task()
assert os.path.exists(output)
with open(output, "r", newline="") as csvfile:
reader = csv.reader(csvfile)
actual_output = [row for row in reader]
# not only does this verify that the expected keywords are in
# the output, but that the base class keywords are *not*
datadir = os.path.join("cumulusci", "tasks", "robotframework", "tests", "")
expected_output = [
["Name", "Source", "Line#", "po type", "po_object", "Documentation"],
[
"Keyword One",
f"{datadir}TestPageObjects.py",
"13",
"Listing",
"Something__c",
"",
],
[
"Keyword One",
f"{datadir}TestPageObjects.py",
"24",
"Detail",
"Something__c",
"",
],
[
"Keyword Three",
f"{datadir}TestPageObjects.py",
"30",
"Detail",
"Something__c",
"",
],
[
"Keyword Two",
f"{datadir}TestPageObjects.py",
"16",
"Listing",
"Something__c",
"",
],
[
"Keyword Two",
f"{datadir}TestPageObjects.py",
"27",
"Detail",
"Something__c",
"",
],
[
"Library Keyword One",
f"{datadir}TestLibrary.py",
"13",
"",
"",
"Keyword documentation with *bold* and _italics_",
],
[
"Library Keyword Two",
f"{datadir}TestLibrary.py",
"17",
"",
"",
"",
],
[
"Resource keyword one",
f"{datadir}TestResource.robot",
"2",
"",
"",
"",
],
[
"Resource keyword two",
f"{datadir}TestResource.robot",
"6",
"",
"",
"",
],
]
assert actual_output == expected_output
@mock.patch("cumulusci.tasks.robotframework.libdoc.view_file")
def test_preview_option(self, mock_view_file):
"""Verify that the 'preview' option results in calling the view_file method"""
path = os.path.join(self.datadir, "TestLibrary.py")
output = os.path.join(self.tmpdir, "index.html")
task = create_task(
RobotLibDoc, {"path": path, "output": output, "preview": True}
)
task()
mock_view_file.assert_called_once_with(output)
class TestRobotLibDocKeywordFile:
def setup_method(self):
self.tmpdir = tempfile.mkdtemp(dir=".")
def teardown_method(self):
shutil.rmtree(self.tmpdir)
def test_existing_file(self):
path = os.path.join(self.tmpdir, "keywords.py")
touch(path)
kwfile = KeywordFile(path)
assert kwfile.filename == "keywords.py"
assert kwfile.path == path
assert kwfile.keywords == {}
def test_file_as_module(self):
kwfile = KeywordFile("cumulusci.robotframework.Salesforce")
assert kwfile.filename == "Salesforce"
assert kwfile.path == "cumulusci.robotframework.Salesforce"
assert kwfile.keywords == {}
def test_add_keyword(self):
kwfile = KeywordFile("test.TestLibrary")
kwfile.add_keywords("the documentation...", ("Detail", "Contact"))
assert len(kwfile.keywords) == 1
assert kwfile.keywords[("Detail", "Contact")] == "the documentation..."
def test_to_tuples(self):
"""Test that to_tuples returns relative paths when possible
The code attempts to convert absolute paths to relative paths,
but if it can't then the path remains unchainged. This test generates
results with one file that is relative to cwd and one that is not.
"""
here = os.path.dirname(__file__)
path = Path(here) / "TestLibrary.py"
libdoc = LibraryDocBuilder().build(str(path))
# we'll set the first to a non-relative directory and leave
# the other one relative to here (assuming that `here` is
# relative to cwd)
absolute_path = str(Path("/bogus/whatever.py"))
libdoc.keywords[0].source = absolute_path
# The returned result is a set, so the order is indeterminate. That's
# why the following line sorts it.
kwfile = KeywordFile("Whatever")
kwfile.add_keywords(libdoc)
rows = sorted(kwfile.to_tuples())
# verify the absolute path remains absolute
assert rows[0][1] == absolute_path
# verify that the path to a file under cwd is relative
assert rows[1][1] == str(path.relative_to(os.getcwd()))
class TestRobotLibDocOutput:
"""Tests for the generated robot keyword documentation"""
def setup_method(self):
self.tmpdir = tempfile.mkdtemp(dir=".")
self.datadir = os.path.dirname(__file__)
path = [
os.path.join(self.datadir, "TestLibrary.py"),
os.path.join(self.datadir, "TestResource.robot"),
]
output = os.path.join(self.tmpdir, "index.html")
self.task = create_task(
RobotLibDoc,
{"path": path, "output": output, "title": "Keyword Documentation, yo."},
)
self.task()
docroot = ET.parse(output).getroot()
self.html_body = docroot.find("body")
def teardown_method(self):
shutil.rmtree(self.tmpdir)
def test_output_title(self):
"""Verify the document has the expected title"""
title_element = self.html_body.find(
".//div[@class='header']/h1[@class='title']"
)
assert title_element is not None
assert title_element.text.strip() == "Keyword Documentation, yo."
def test_formatted_documentation(self):
"""Verify that markup in the documentation is rendered as html"""
doc_element = self.html_body.find(
".//tr[@id='TestLibrary.py.Library-Keyword-One']//td[@class='kwdoc']"
)
doc_html = str(ET.tostring(doc_element, method="html").strip())
# we could just do an assert on the full markup of the
# element, but it seems likely that could fail for benign
# regions (extra whitespace, for example). So we'll just make
# sure there are a couple of expected formatted elements.
assert "<b>bold</b>" in doc_html
assert "<i>italics</i>" in doc_html
def test_output_sections(self):
"""Verify that the output has a section for each file"""
sections = self.html_body.findall(".//div[@class='file']")
section_titles = [
x.find(".//div[@class='file-header']/h2").text for x in sections
]
assert len(sections) == 2, "expected to find 2 sections, found {}".format(
len(sections)
)
assert section_titles == ["TestLibrary.py", "TestResource.robot"]
def test_output_keywords(self):
"""Verify that all keywords in the libraries are represented in the output file"""
keyword_rows = self.html_body.findall(".//tr[@class='kwrow']")
keywords = [x.find(".//td[@class='kwname']") for x in keyword_rows]
keyword_names = [x.text.strip() for x in keywords]
assert len(keywords) == 4
assert keyword_names == [
"Library Keyword One",
"Library Keyword Two",
"Resource keyword one",
"Resource keyword two",
]
class TestLibdocPageObjects:
"""Tests for generating docs for page objects"""
def setup_method(self):
self.tmpdir = tempfile.mkdtemp(dir=".")
self.datadir = os.path.dirname(__file__)
path = [os.path.join(self.datadir, "TestPageObjects.py")]
self.output = os.path.join(self.tmpdir, "index.html")
self.task = create_task(
RobotLibDoc,
{"path": path, "output": self.output, "title": "Keyword Documentation, yo"},
)
self.task()
self.docroot = ET.parse(self.output).getroot()
self.html_body = self.docroot.find("body")
def teardown_method(self):
shutil.rmtree(self.tmpdir)
def test_file_title(self):
"""Verify that the TITLE in the file is added to the generated html"""
# The page object file has a TITLE attribute and a docstring;
# make sure they are picked up.
title_element = self.html_body.find(".//div[@class='file-header']/h2")
assert title_element.text == "This is the title"
def test_file_description(self):
"""Verify that the docstring in the file is added to the generated html"""
file_doc_element = self.html_body.find(
".//div[@class='pageobject-file-description']"
)
description = ET.tostring(file_doc_element).decode("utf-8").strip()
assert (
description
== '<div class="pageobject-file-description"><p>this is the docstring</p></div>'
)
def test_pageobject_sections(self):
# the TestPageObjects.py file has two page objects,
# one with two keywords and one with three
sections = self.html_body.findall(".//div[@class='pageobject-header']")
assert len(sections) == 2
def test_pageobject_docstring(self):
section = self.html_body.find(".//div[@pageobject='Detail-Something__c']")
description = section.find("div[@class='description']")
expected = '<div class="description" title="Description"><p>Description of SomethingDetailPage</p></div>'
actual = ET.tostring(description).decode("utf-8").strip()
assert shrinkws(actual) == shrinkws(expected)
section = self.html_body.find(".//div[@pageobject='Listing-Something__c']")
description = section.find("div[@class='description']")
expected = '<div class="description" title="Description"><p>Description of SomethingListingPage</p></div>'
actual = ET.tostring(description).decode("utf-8").strip()
assert shrinkws(actual) == shrinkws(expected)
class TestRobotPerformanceKeywords:
def setup_method(self):
self.datadir = os.path.dirname(__file__)
@contextmanager
def _run_robot_and_parse_xml(
self, test_pattern, suite_path="tests/salesforce/performance.robot"
):
universal_config = UniversalConfig()
project_config = BaseProjectConfig(universal_config)
with temporary_dir() as d, mock.patch(
"cumulusci.robotframework.Salesforce.Salesforce._init_locators"
), responses.RequestsMock():
project_config.repo_info["root"] = d
suite = Path(self.datadir) / "../../../robotframework/" / suite_path
task = create_task(
Robot,
{
"test": test_pattern,
"suites": str(suite),
"options": {"outputdir": d, "skiponfailure": "noncritical"},
},
project_config=project_config,
)
task()
logger_func = mock.Mock()
log_perf_summary_from_xml(Path(d) / "output.xml", logger_func)
yield logger_func.mock_calls
def parse_metric(self, metric):
name, value = metric.split(": ")
value = value.strip("s ") # strip seconds unit
try:
value = float(value)
except ValueError:
raise Exception(f"Cannot convert to float {value}")
return name.strip(), value
def extract_times(self, pattern, call):
first_arg = call[1][0]
if pattern in first_arg:
metrics = first_arg.split("-")[-1].split(",")
return dict(self.parse_metric(metric) for metric in metrics)
def test_parser_FOR_and_IF(self):
# verify that metrics nested inside a FOR or IF are accounted for
pattern = "Test FOR and IF statements"
suite_path = Path(self.datadir) / "performance.robot"
with self._run_robot_and_parse_xml(
pattern, suite_path=suite_path
) as logger_calls:
elapsed_times = [self.extract_times(pattern, call) for call in logger_calls]
perf_data = list(filter(None, elapsed_times))[0]
assert perf_data["plugh"] == 4.0
assert perf_data["xyzzy"] == 2.0
def test_elapsed_time_xml(self):
pattern = "Elapsed Time: "
with self._run_robot_and_parse_xml("Test Perf*") as logger_calls:
elapsed_times = [self.extract_times(pattern, call) for call in logger_calls]
elapsed_times = [next(iter(x.values())) for x in elapsed_times if x]
elapsed_times.sort()
assert elapsed_times[1:] == [53, 11655.9, 18000.0]
# CI hosts can be noisy; allow small timing variance.
assert float(elapsed_times[0]) <= 5
def test_metrics(self):
pattern = "Max_CPU_Percent: "
with self._run_robot_and_parse_xml(
"Test Perf Measure Other Metric"
) as logger_calls:
elapsed_times = [self.extract_times(pattern, call) for call in logger_calls]
assert list(filter(None, elapsed_times))[0]["Max_CPU_Percent"] == 30.0
def test_empty_test(self):
pattern = "Max_CPU_Percent: "
with self._run_robot_and_parse_xml(
"Test Perf Measure Other Metric"
) as logger_calls:
elapsed_times = [self.extract_times(pattern, call) for call in logger_calls]
assert list(filter(None, elapsed_times))[0]["Max_CPU_Percent"] == 30.0
def test_explicit_failures(self):
pattern = "Elapsed Time: "
suite_path = Path(self.datadir) / "failing_tests.robot"
with self._run_robot_and_parse_xml(
"Test *", suite_path=suite_path
) as logger_calls:
elapsed_times = [self.extract_times(pattern, call) for call in logger_calls]
assert list(filter(None, elapsed_times)) == [
{"Elapsed Time": 11655.9, "Donuts": 42.3}
]
def shrinkws(s):
return re.sub(r"\s+", " ", s).replace("> <", "><")