@@ -1392,13 +1392,20 @@ def __init__(self):
13921392 # Compile without attaching to the device.
13931393 config = IPUConfig()
13941394 config.device_connection.type = DeviceConnectionType.ON_DEMAND
1395+
1396+ If using `DeviceConnectionType.PRE_COMPILE` to compile models to run on C600
1397+ cards then the link topology will need to be set to "line" using the
1398+ `POPLAR_TARGET_OPTIONS` environment variable. See
1399+ :external+poplar-api:ref:`environment variables` in the
1400+ :external+poplar-api:doc:`index` for more information.
13951401 """
13961402 self .type = DeviceConnectionType .ALWAYS
13971403 """
1398- Version of the IPU hardware to use (string). Must be one of "ipu1", "ipu2"
1399- or "" (default). Only required if the
1400- :ref:`connection type <device_connection.type>` provided is
1401- `DeviceConnectionType.PRE_COMPILE` or `DeviceConnectionType.NEVER`.
1404+ Version of the IPU architecture to use (string). Must be one of "ipu1",
1405+ "ipu2", "ipu21" or "" (default). A specific version is required if the
1406+ :ref:`connection type <device_connection.type>` is specified as
1407+ `DeviceConnectionType.PRE_COMPILE` or `DeviceConnectionType.NEVER`. Do
1408+ not specify a version otherwise.
14021409 """
14031410 self .version = ""
14041411 """
@@ -1474,7 +1481,7 @@ def _to_protobuf(self, pb):
14741481class _IpuAlgebraicSimplifierConfig (_ConfigBase ):
14751482 def __init__ (self ):
14761483 """
1477- Enables optimizations which allow arbitrary reassociations and
1484+ Enables optimizations which allow arbitrary re-associations and
14781485 transformations of mathematical operations with no accuracy guarantees.
14791486 Enabling this option can result in incorrect output for programs that depend
14801487 on an exact implementation of IEEE floating point for maths functions. It
@@ -1795,7 +1802,7 @@ def __init__(self):
17951802 """
17961803 self .cholesky_block_size = 0
17971804 """
1798- Enables optimizations which allow arbitrary reassociations and
1805+ Enables optimizations which allow arbitrary re-associations and
17991806 transformations of mathematical operations with no accuracy guarantees.
18001807 Enabling this option can result in incorrect output for programs that depend
18011808 on an exact implementation of IEEE floating point for maths functions. It
@@ -1913,7 +1920,7 @@ def __init__(self):
19131920 """
19141921 Configure the IPUs to be used by the session.
19151922 The configuration describes a system consisting of multiple TensorFlow
1916- devices, each with control of one of more IPUs. The devices will be labeled
1923+ devices, each with control of one of more IPUs. The devices will be labelled
19171924 ``/device:IPU:0``, ``/device:IPU:1`` and so on.
19181925
19191926 Each device can control a specific number of IPUs, given by the ``num_ipus``
@@ -1943,7 +1950,7 @@ def __init__(self):
19431950
19441951 The configuration describes a system consisting of multiple TensorFlow
19451952 devices, each with control of one of more IPUs. The TensorFlow devices will
1946- be labeled ``/device:IPU:0``, ``/device:IPU:1`` and so on.
1953+ be labelled ``/device:IPU:0``, ``/device:IPU:1`` and so on.
19471954
19481955 Each TensorFlow device uses a specific configuration consisting of one or
19491956 more IPUs from the list of devices. These can be found by running the
@@ -2256,10 +2263,11 @@ def configure_ipu_system(config, device="cpu", reset_configuration=True):
22562263
22572264def reset_ipu_configuration ():
22582265 """ Reset the IPU configuration in preparation for it to be reconfigured.
2259- Blocks until all currently configured IPU devices have finished executing.
2266+ This blocks until all currently configured IPU devices have finished
2267+ executing.
22602268
2261- Note that this function does not currently support reseting IPUs that are
2262- running in parallel python threads.
2269+ Note that this function does not currently support resetting IPUs that are
2270+ running in parallel Python threads.
22632271 """
22642272 sync_ops = []
22652273
0 commit comments