summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeff Vander Stoep <jeffv@google.com>2015-06-18 07:52:02 -0700
committerJeff Vander Stoep <jeffv@google.com>2015-06-18 15:45:31 -0700
commita8a6732ff6e9d5e49a8c6e888716ed98d64e2609 (patch)
tree57b5a1be2dcd01bc500525ac831f3e3421f4dae5
parent5de4f653a1cff7d7acbbb933711537ef47e2723c (diff)
downloadplatform_prebuilts_python_linux-x86_2.7.5-a8a6732ff6e9d5e49a8c6e888716ed98d64e2609.tar.gz
platform_prebuilts_python_linux-x86_2.7.5-a8a6732ff6e9d5e49a8c6e888716ed98d64e2609.tar.bz2
platform_prebuilts_python_linux-x86_2.7.5-a8a6732ff6e9d5e49a8c6e888716ed98d64e2609.zip
SELinux: python modules for selinux tools
<tool(s)>: <required python modules> audit2allow audit2why: sepolgen selinux sesearch: setools setoolsgui (cherry picked from fe63c8d5e6e841f474eec96e2dd38d8fa97a17f8) Bug: 21928182 Change-Id: Ifc19252d8d8e178b86c51fe1f54e162a61ffc0dd
-rw-r--r--lib/python2.7/site-packages/selinux/__init__.py2445
-rwxr-xr-xlib/python2.7/site-packages/selinux/_selinux.sobin0 -> 333725 bytes
-rwxr-xr-xlib/python2.7/site-packages/selinux/audit2why.sobin0 -> 243205 bytes
-rw-r--r--lib/python2.7/site-packages/sepolgen/__init__.py0
-rw-r--r--lib/python2.7/site-packages/sepolgen/access.py331
-rw-r--r--lib/python2.7/site-packages/sepolgen/audit.py549
-rw-r--r--lib/python2.7/site-packages/sepolgen/classperms.py116
-rw-r--r--lib/python2.7/site-packages/sepolgen/defaults.py77
-rw-r--r--lib/python2.7/site-packages/sepolgen/interfaces.py509
-rw-r--r--lib/python2.7/site-packages/sepolgen/lex.py866
-rw-r--r--lib/python2.7/site-packages/sepolgen/matching.py255
-rw-r--r--lib/python2.7/site-packages/sepolgen/module.py213
-rw-r--r--lib/python2.7/site-packages/sepolgen/objectmodel.py172
-rw-r--r--lib/python2.7/site-packages/sepolgen/output.py173
-rw-r--r--lib/python2.7/site-packages/sepolgen/policygen.py402
-rw-r--r--lib/python2.7/site-packages/sepolgen/refparser.py1128
-rw-r--r--lib/python2.7/site-packages/sepolgen/refpolicy.py917
-rw-r--r--lib/python2.7/site-packages/sepolgen/sepolgeni18n.py26
-rw-r--r--lib/python2.7/site-packages/sepolgen/util.py87
-rw-r--r--lib/python2.7/site-packages/sepolgen/yacc.py2209
-rw-r--r--lib/python2.7/site-packages/setools/__init__.py68
-rw-r--r--lib/python2.7/site-packages/setools/boolquery.py66
-rw-r--r--lib/python2.7/site-packages/setools/categoryquery.py55
-rw-r--r--lib/python2.7/site-packages/setools/commonquery.py60
-rw-r--r--lib/python2.7/site-packages/setools/compquery.py39
-rw-r--r--lib/python2.7/site-packages/setools/constraintquery.py142
-rw-r--r--lib/python2.7/site-packages/setools/contextquery.py98
-rw-r--r--lib/python2.7/site-packages/setools/descriptors.py230
-rw-r--r--lib/python2.7/site-packages/setools/dta.py603
-rw-r--r--lib/python2.7/site-packages/setools/exception.py62
-rw-r--r--lib/python2.7/site-packages/setools/fsusequery.py87
-rw-r--r--lib/python2.7/site-packages/setools/genfsconquery.py98
-rw-r--r--lib/python2.7/site-packages/setools/infoflow.py403
-rw-r--r--lib/python2.7/site-packages/setools/initsidquery.py74
-rw-r--r--lib/python2.7/site-packages/setools/mixins.py91
-rw-r--r--lib/python2.7/site-packages/setools/mlsrulequery.py115
-rw-r--r--lib/python2.7/site-packages/setools/netifconquery.py77
-rw-r--r--lib/python2.7/site-packages/setools/nodeconquery.py148
-rw-r--r--lib/python2.7/site-packages/setools/objclassquery.py101
-rw-r--r--lib/python2.7/site-packages/setools/permmap.py363
-rw-r--r--lib/python2.7/site-packages/setools/polcapquery.py47
-rw-r--r--lib/python2.7/site-packages/setools/policyrep/__init__.py568
-rwxr-xr-xlib/python2.7/site-packages/setools/policyrep/_qpol.sobin0 -> 2151445 bytes
-rw-r--r--lib/python2.7/site-packages/setools/policyrep/boolcond.py167
-rw-r--r--lib/python2.7/site-packages/setools/policyrep/constraint.py297
-rw-r--r--lib/python2.7/site-packages/setools/policyrep/context.py68
-rw-r--r--lib/python2.7/site-packages/setools/policyrep/default.py128
-rw-r--r--lib/python2.7/site-packages/setools/policyrep/exception.py248
-rw-r--r--lib/python2.7/site-packages/setools/policyrep/fscontext.py123
-rw-r--r--lib/python2.7/site-packages/setools/policyrep/initsid.py50
-rw-r--r--lib/python2.7/site-packages/setools/policyrep/mls.py463
-rw-r--r--lib/python2.7/site-packages/setools/policyrep/mlsrule.py62
-rw-r--r--lib/python2.7/site-packages/setools/policyrep/netcontext.py167
-rw-r--r--lib/python2.7/site-packages/setools/policyrep/objclass.py110
-rw-r--r--lib/python2.7/site-packages/setools/policyrep/polcap.py40
-rw-r--r--lib/python2.7/site-packages/setools/policyrep/qpol.py1114
-rw-r--r--lib/python2.7/site-packages/setools/policyrep/rbacrule.py92
-rw-r--r--lib/python2.7/site-packages/setools/policyrep/role.py81
-rw-r--r--lib/python2.7/site-packages/setools/policyrep/rule.py72
-rw-r--r--lib/python2.7/site-packages/setools/policyrep/symbol.py74
-rw-r--r--lib/python2.7/site-packages/setools/policyrep/terule.py155
-rw-r--r--lib/python2.7/site-packages/setools/policyrep/typeattr.py174
-rw-r--r--lib/python2.7/site-packages/setools/policyrep/user.py86
-rw-r--r--lib/python2.7/site-packages/setools/portconquery.py146
-rw-r--r--lib/python2.7/site-packages/setools/query.py192
-rw-r--r--lib/python2.7/site-packages/setools/rbacrulequery.py147
-rw-r--r--lib/python2.7/site-packages/setools/rolequery.py77
-rw-r--r--lib/python2.7/site-packages/setools/sensitivityquery.py74
-rw-r--r--lib/python2.7/site-packages/setools/terulequery.py178
-rw-r--r--lib/python2.7/site-packages/setools/typeattrquery.py70
-rw-r--r--lib/python2.7/site-packages/setools/typequery.py96
-rw-r--r--lib/python2.7/site-packages/setools/userquery.py116
-rw-r--r--lib/python2.7/site-packages/setoolsgui/__init__.py21
-rw-r--r--lib/python2.7/site-packages/setoolsgui/apol/__init__.py24
-rw-r--r--lib/python2.7/site-packages/setoolsgui/apol/mainwindow.py261
-rw-r--r--lib/python2.7/site-packages/setoolsgui/apol/models.py103
-rw-r--r--lib/python2.7/site-packages/setoolsgui/apol/rulemodels.py116
-rw-r--r--lib/python2.7/site-packages/setoolsgui/apol/terulequery.py271
-rwxr-xr-xlib/python2.7/site-packages/setoolsgui/libselinux.so.1bin0 -> 172235 bytes
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/__init__.py85
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/__init__.py51
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/__init__.py6
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/clique.py97
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/dominating_set.py114
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/independent_set.py63
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/matching.py46
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/ramsey.py37
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_clique.py41
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_dominating_set.py53
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_independent_set.py8
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_matching.py8
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_ramsey.py27
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_vertex_cover.py39
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/vertex_cover.py65
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/__init__.py5
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/connectivity.py123
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/correlation.py298
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/mixing.py248
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/neighbor_degree.py133
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/pairs.py134
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/base_test.py50
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/test_connectivity.py121
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/test_correlation.py101
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/test_mixing.py186
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/test_neighbor_degree.py82
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/test_pairs.py113
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/__init__.py93
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/basic.py335
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/centrality.py266
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/cluster.py266
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/projection.py497
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/redundancy.py84
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/spectral.py88
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/tests/test_basic.py117
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/tests/test_centrality.py169
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/tests/test_cluster.py70
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/tests/test_project.py363
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py93
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/block.py115
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/boundary.py102
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/__init__.py20
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/betweenness.py334
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/betweenness_subset.py263
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/closeness.py103
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/communicability_alg.py495
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/current_flow_betweenness.py361
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/current_flow_betweenness_subset.py263
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/current_flow_closeness.py127
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/degree_alg.py131
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/eigenvector.py169
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/flow_matrix.py139
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/katz.py296
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/load.py190
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_betweenness_centrality.py462
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py258
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_closeness_centrality.py93
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_communicability.py134
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py211
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py181
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_current_flow_closeness.py56
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_degree_centrality.py92
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py123
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_katz_centrality.py289
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_load_centrality.py273
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/chordal/__init__.py3
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/chordal/chordal_alg.py347
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/chordal/tests/test_chordal.py59
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/clique.py516
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/cluster.py363
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/community/__init__.py1
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/community/kclique.py82
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/community/tests/test_kclique.py46
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/__init__.py5
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/attracting.py133
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/biconnected.py417
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/connected.py192
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/strongly_connected.py359
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/tests/test_attracting.py64
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/tests/test_biconnected.py191
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/tests/test_connected.py72
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/tests/test_strongly_connected.py138
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/tests/test_weakly_connected.py88
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/weakly_connected.py126
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/connectivity/__init__.py4
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/connectivity/connectivity.py607
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/connectivity/cuts.py382
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/connectivity/tests/test_connectivity.py145
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/connectivity/tests/test_cuts.py157
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/core.py324
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/cycles.py317
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/dag.py275
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/distance_measures.py170
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/distance_regular.py179
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/euler.py135
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/__init__.py3
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/maxflow.py477
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/mincost.py802
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/tests/test_maxflow.py273
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/tests/test_maxflow_large_graph.py51
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/tests/test_mincost.py284
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/graphical.py405
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/hierarchy.py53
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isolate.py77
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/__init__.py4
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/isomorph.py227
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/isomorphvf2.py965
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/matchhelpers.py346
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/iso_r01_s80.A99bin0 -> 1442 bytes
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/iso_r01_s80.B99bin0 -> 1442 bytes
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/si2_b06_m200.A99bin0 -> 310 bytes
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/si2_b06_m200.B99bin0 -> 1602 bytes
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/test_isomorphism.py32
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/test_isomorphvf2.py217
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/test_vf2userfunc.py192
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/vf2userfunc.py198
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/link_analysis/__init__.py2
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/link_analysis/hits_alg.py308
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/link_analysis/pagerank_alg.py399
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/link_analysis/tests/test_hits.py93
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/link_analysis/tests/test_pagerank.py122
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/matching.py825
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/mis.py81
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/mst.py254
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/__init__.py4
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/all.py151
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/binary.py329
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/product.py330
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/tests/test_all.py167
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/tests/test_binary.py270
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/tests/test_product.py334
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/tests/test_unary.py47
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/unary.py69
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/richclub.py101
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/__init__.py6
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/astar.py159
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/dense.py156
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/generic.py392
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_astar.py137
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_dense.py106
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_dense_numpy.py53
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_generic.py145
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_unweighted.py81
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_weighted.py246
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/unweighted.py359
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/weighted.py765
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/simple_paths.py124
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/smetric.py37
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/swap.py185
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_block.py103
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_boundary.py104
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_clique.py114
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_cluster.py195
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_core.py114
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_cycles.py122
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_dag.py163
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_distance_measures.py69
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_distance_regular.py44
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_euler.py84
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_graphical.py114
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_hierarchy.py30
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_matching.py247
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_mis.py89
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_mst.py133
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_richclub.py30
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_simple_paths.py73
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_smetric.py19
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_swap.py42
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_vitality.py35
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/traversal/__init__.py4
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/traversal/breadth_first_search.py53
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/traversal/depth_first_search.py124
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/traversal/tests/test_bfs.py36
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/traversal/tests/test_dfs.py68
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/algorithms/vitality.py84
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/classes/__init__.py5
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/classes/digraph.py1236
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/classes/function.py423
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/classes/graph.py1816
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/classes/multidigraph.py851
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/classes/multigraph.py966
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/historical_tests.py477
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_digraph.py257
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_digraph_historical.py119
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_function.py190
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_graph.py602
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_graph_historical.py14
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_multidigraph.py327
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_multigraph.py244
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/convert.py847
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/drawing/__init__.py20
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/drawing/layout.py540
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/drawing/nx_agraph.py447
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/drawing/nx_pydot.py287
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/drawing/nx_pylab.py896
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/drawing/tests/test_agraph.py75
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/drawing/tests/test_layout.py61
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/drawing/tests/test_pydot.py62
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/drawing/tests/test_pylab.py40
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/exception.py50
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/external/__init__.py0
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/external/decorator/__init__.py8
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/external/decorator/decorator2/__init__.py1
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/external/decorator/decorator2/_decorator2.py210
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/__init__.py21
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/atlas.py12336
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/bipartite.py529
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/classic.py508
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/degree_seq.py793
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/directed.py304
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/ego.py70
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/geometric.py352
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/hybrid.py116
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/intersection.py118
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/line.py69
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/random_clustered.py125
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/random_graphs.py890
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/small.py412
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/social.py280
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/stochastic.py46
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_atlas.py55
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_bipartite.py176
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_classic.py408
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_degree_seq.py169
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_directed.py36
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_ego.py42
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_geometric.py31
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_hybrid.py24
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_intersection.py19
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_line.py30
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_random_clustered.py28
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_random_graphs.py129
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_small.py181
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_stochastic.py33
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_threshold.py183
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/generators/threshold.py906
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/linalg/__init__.py9
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/linalg/attrmatrix.py458
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/linalg/graphmatrix.py156
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/linalg/laplacianmatrix.py277
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/linalg/spectrum.py90
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/linalg/tests/test_graphmatrix.py89
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/linalg/tests/test_laplacian.py101
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/linalg/tests/test_spectrum.py44
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/__init__.py16
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/adjlist.py314
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/edgelist.py464
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/gexf.py926
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/gml.py410
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/gpickle.py100
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/graphml.py579
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/__init__.py10
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/adjacency.py123
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/node_link.py116
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/serialize.py31
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/tests/test_adjacency.py52
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/tests/test_node_link.py44
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/tests/test_serialize.py49
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/tests/test_tree.py29
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/tree.py113
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/leda.py106
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/multiline_adjlist.py390
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/nx_shp.py224
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/nx_yaml.py109
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/p2g.py107
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/pajek.py231
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/sparsegraph6.py169
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_adjlist.py283
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_edgelist.py234
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_gexf.py306
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_gml.py135
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_gpickle.py27
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_graphml.py445
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_leda.py35
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_p2g.py64
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_pajek.py51
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_shp.py140
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_sparsegraph6.py87
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_yaml.py53
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/relabel.py205
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/release.py254
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/testing/__init__.py1
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/testing/tests/test_utils.py108
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/testing/utils.py57
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/tests/__init__.py0
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/tests/benchmark.py248
-rwxr-xr-xlib/python2.7/site-packages/setoolsgui/networkx/tests/test.py45
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/tests/test_convert.py224
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/tests/test_convert_numpy.py172
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/tests/test_convert_scipy.py179
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/tests/test_exceptions.py33
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/tests/test_relabel.py163
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/utils/__init__.py5
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/utils/decorators.py270
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/utils/misc.py151
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/utils/random_sequence.py222
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/utils/rcm.py150
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/utils/tests/test_decorators.py160
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/utils/tests/test_misc.py72
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/utils/tests/test_random_sequence.py33
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/utils/tests/test_rcm.py13
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/utils/union_find.py75
-rw-r--r--lib/python2.7/site-packages/setoolsgui/networkx/version.py25
-rw-r--r--lib/python2.7/site-packages/setoolsgui/selinux/__init__.py2445
-rwxr-xr-xlib/python2.7/site-packages/setoolsgui/selinux/_selinux.sobin0 -> 333725 bytes
-rwxr-xr-xlib/python2.7/site-packages/setoolsgui/selinux/audit2why.sobin0 -> 243205 bytes
-rw-r--r--lib/python2.7/site-packages/setoolsgui/sepolgen/__init__.py0
-rw-r--r--lib/python2.7/site-packages/setoolsgui/sepolgen/access.py331
-rw-r--r--lib/python2.7/site-packages/setoolsgui/sepolgen/audit.py549
-rw-r--r--lib/python2.7/site-packages/setoolsgui/sepolgen/classperms.py116
-rw-r--r--lib/python2.7/site-packages/setoolsgui/sepolgen/defaults.py77
-rw-r--r--lib/python2.7/site-packages/setoolsgui/sepolgen/interfaces.py509
-rw-r--r--lib/python2.7/site-packages/setoolsgui/sepolgen/lex.py866
-rw-r--r--lib/python2.7/site-packages/setoolsgui/sepolgen/matching.py255
-rw-r--r--lib/python2.7/site-packages/setoolsgui/sepolgen/module.py213
-rw-r--r--lib/python2.7/site-packages/setoolsgui/sepolgen/objectmodel.py172
-rw-r--r--lib/python2.7/site-packages/setoolsgui/sepolgen/output.py173
-rw-r--r--lib/python2.7/site-packages/setoolsgui/sepolgen/policygen.py402
-rw-r--r--lib/python2.7/site-packages/setoolsgui/sepolgen/refparser.py1128
-rw-r--r--lib/python2.7/site-packages/setoolsgui/sepolgen/refpolicy.py917
-rw-r--r--lib/python2.7/site-packages/setoolsgui/sepolgen/sepolgeni18n.py26
-rw-r--r--lib/python2.7/site-packages/setoolsgui/sepolgen/util.py87
-rw-r--r--lib/python2.7/site-packages/setoolsgui/sepolgen/yacc.py2209
-rwxr-xr-xlib/python2.7/site-packages/setoolsgui/sesearch206
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/__init__.py68
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/boolquery.py66
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/categoryquery.py55
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/commonquery.py60
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/compquery.py39
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/constraintquery.py142
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/contextquery.py98
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/descriptors.py230
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/dta.py603
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/exception.py62
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/fsusequery.py87
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/genfsconquery.py98
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/infoflow.py403
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/initsidquery.py74
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/mixins.py91
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/mlsrulequery.py115
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/netifconquery.py77
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/nodeconquery.py148
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/objclassquery.py101
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/permmap.py363
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/polcapquery.py47
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/policyrep/__init__.py568
-rwxr-xr-xlib/python2.7/site-packages/setoolsgui/setools/policyrep/_qpol.sobin0 -> 2151445 bytes
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/policyrep/boolcond.py167
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/policyrep/constraint.py297
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/policyrep/context.py68
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/policyrep/default.py128
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/policyrep/exception.py248
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/policyrep/fscontext.py123
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/policyrep/initsid.py50
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/policyrep/mls.py463
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/policyrep/mlsrule.py62
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/policyrep/netcontext.py167
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/policyrep/objclass.py110
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/policyrep/polcap.py40
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/policyrep/qpol.py1114
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/policyrep/rbacrule.py92
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/policyrep/role.py81
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/policyrep/rule.py72
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/policyrep/symbol.py74
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/policyrep/terule.py155
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/policyrep/typeattr.py174
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/policyrep/user.py86
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/portconquery.py146
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/query.py192
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/rbacrulequery.py147
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/rolequery.py77
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/sensitivityquery.py74
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/terulequery.py178
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/typeattrquery.py70
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/typequery.py96
-rw-r--r--lib/python2.7/site-packages/setoolsgui/setools/userquery.py116
-rw-r--r--lib/python2.7/site-packages/setoolsgui/widget.py37
456 files changed, 109752 insertions, 0 deletions
diff --git a/lib/python2.7/site-packages/selinux/__init__.py b/lib/python2.7/site-packages/selinux/__init__.py
new file mode 100644
index 0000000..b81b031
--- /dev/null
+++ b/lib/python2.7/site-packages/selinux/__init__.py
@@ -0,0 +1,2445 @@
+# This file was automatically generated by SWIG (http://www.swig.org).
+# Version 2.0.11
+#
+# Do not make changes to this file unless you know what you are doing--modify
+# the SWIG interface file instead.
+
+
+
+
+
+from sys import version_info
+if version_info >= (2,6,0):
+ def swig_import_helper():
+ from os.path import dirname
+ import imp
+ fp = None
+ try:
+ fp, pathname, description = imp.find_module('_selinux', [dirname(__file__)])
+ except ImportError:
+ import _selinux
+ return _selinux
+ if fp is not None:
+ try:
+ _mod = imp.load_module('_selinux', fp, pathname, description)
+ finally:
+ fp.close()
+ return _mod
+ _selinux = swig_import_helper()
+ del swig_import_helper
+else:
+ import _selinux
+del version_info
+try:
+ _swig_property = property
+except NameError:
+ pass # Python < 2.2 doesn't have 'property'.
+def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
+ if (name == "thisown"): return self.this.own(value)
+ if (name == "this"):
+ if type(value).__name__ == 'SwigPyObject':
+ self.__dict__[name] = value
+ return
+ method = class_type.__swig_setmethods__.get(name,None)
+ if method: return method(self,value)
+ if (not static):
+ self.__dict__[name] = value
+ else:
+ raise AttributeError("You cannot add attributes to %s" % self)
+
+def _swig_setattr(self,class_type,name,value):
+ return _swig_setattr_nondynamic(self,class_type,name,value,0)
+
+def _swig_getattr(self,class_type,name):
+ if (name == "thisown"): return self.this.own()
+ method = class_type.__swig_getmethods__.get(name,None)
+ if method: return method(self)
+ raise AttributeError(name)
+
+def _swig_repr(self):
+ try: strthis = "proxy of " + self.this.__repr__()
+ except: strthis = ""
+ return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
+
+try:
+ _object = object
+ _newclass = 1
+except AttributeError:
+ class _object : pass
+ _newclass = 0
+
+
+import shutil, os, stat
+
+DISABLED = -1
+PERMISSIVE = 0
+ENFORCING = 1
+
+def restorecon(path, recursive=False):
+ """ Restore SELinux context on a given path """
+
+ try:
+ mode = os.lstat(path)[stat.ST_MODE]
+ status, context = matchpathcon(path, mode)
+ except OSError:
+ path = os.path.realpath(os.path.expanduser(path))
+ mode = os.lstat(path)[stat.ST_MODE]
+ status, context = matchpathcon(path, mode)
+
+ if status == 0:
+ status, oldcontext = lgetfilecon(path)
+ if context != oldcontext:
+ lsetfilecon(path, context)
+
+ if recursive:
+ for root, dirs, files in os.walk(path):
+ for name in files + dirs:
+ restorecon(os.path.join(root, name))
+
+def chcon(path, context, recursive=False):
+ """ Set the SELinux context on a given path """
+ lsetfilecon(path, context)
+ if recursive:
+ for root, dirs, files in os.walk(path):
+ for name in files + dirs:
+ lsetfilecon(os.path.join(root,name), context)
+
+def copytree(src, dest):
+ """ An SELinux-friendly shutil.copytree method """
+ shutil.copytree(src, dest)
+ restorecon(dest, recursive=True)
+
+def install(src, dest):
+ """ An SELinux-friendly shutil.move method """
+ shutil.move(src, dest)
+ restorecon(dest, recursive=True)
+
+class security_id(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, security_id, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, security_id, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["ctx"] = _selinux.security_id_ctx_set
+ __swig_getmethods__["ctx"] = _selinux.security_id_ctx_get
+ if _newclass:ctx = _swig_property(_selinux.security_id_ctx_get, _selinux.security_id_ctx_set)
+ __swig_setmethods__["refcnt"] = _selinux.security_id_refcnt_set
+ __swig_getmethods__["refcnt"] = _selinux.security_id_refcnt_get
+ if _newclass:refcnt = _swig_property(_selinux.security_id_refcnt_get, _selinux.security_id_refcnt_set)
+ def __init__(self):
+ this = _selinux.new_security_id()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_security_id
+ __del__ = lambda self : None;
+security_id_swigregister = _selinux.security_id_swigregister
+security_id_swigregister(security_id)
+
+
+def avc_sid_to_context(*args):
+ return _selinux.avc_sid_to_context(*args)
+avc_sid_to_context = _selinux.avc_sid_to_context
+
+def avc_sid_to_context_raw(*args):
+ return _selinux.avc_sid_to_context_raw(*args)
+avc_sid_to_context_raw = _selinux.avc_sid_to_context_raw
+
+def avc_context_to_sid(*args):
+ return _selinux.avc_context_to_sid(*args)
+avc_context_to_sid = _selinux.avc_context_to_sid
+
+def avc_context_to_sid_raw(*args):
+ return _selinux.avc_context_to_sid_raw(*args)
+avc_context_to_sid_raw = _selinux.avc_context_to_sid_raw
+
+def sidget(*args):
+ return _selinux.sidget(*args)
+sidget = _selinux.sidget
+
+def sidput(*args):
+ return _selinux.sidput(*args)
+sidput = _selinux.sidput
+
+def avc_get_initial_sid(*args):
+ return _selinux.avc_get_initial_sid(*args)
+avc_get_initial_sid = _selinux.avc_get_initial_sid
+class avc_entry_ref(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, avc_entry_ref, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, avc_entry_ref, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["ae"] = _selinux.avc_entry_ref_ae_set
+ __swig_getmethods__["ae"] = _selinux.avc_entry_ref_ae_get
+ if _newclass:ae = _swig_property(_selinux.avc_entry_ref_ae_get, _selinux.avc_entry_ref_ae_set)
+ def __init__(self):
+ this = _selinux.new_avc_entry_ref()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_avc_entry_ref
+ __del__ = lambda self : None;
+avc_entry_ref_swigregister = _selinux.avc_entry_ref_swigregister
+avc_entry_ref_swigregister(avc_entry_ref)
+
+class avc_memory_callback(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, avc_memory_callback, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, avc_memory_callback, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["func_malloc"] = _selinux.avc_memory_callback_func_malloc_set
+ __swig_getmethods__["func_malloc"] = _selinux.avc_memory_callback_func_malloc_get
+ if _newclass:func_malloc = _swig_property(_selinux.avc_memory_callback_func_malloc_get, _selinux.avc_memory_callback_func_malloc_set)
+ __swig_setmethods__["func_free"] = _selinux.avc_memory_callback_func_free_set
+ __swig_getmethods__["func_free"] = _selinux.avc_memory_callback_func_free_get
+ if _newclass:func_free = _swig_property(_selinux.avc_memory_callback_func_free_get, _selinux.avc_memory_callback_func_free_set)
+ def __init__(self):
+ this = _selinux.new_avc_memory_callback()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_avc_memory_callback
+ __del__ = lambda self : None;
+avc_memory_callback_swigregister = _selinux.avc_memory_callback_swigregister
+avc_memory_callback_swigregister(avc_memory_callback)
+
+class avc_log_callback(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, avc_log_callback, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, avc_log_callback, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["func_log"] = _selinux.avc_log_callback_func_log_set
+ __swig_getmethods__["func_log"] = _selinux.avc_log_callback_func_log_get
+ if _newclass:func_log = _swig_property(_selinux.avc_log_callback_func_log_get, _selinux.avc_log_callback_func_log_set)
+ __swig_setmethods__["func_audit"] = _selinux.avc_log_callback_func_audit_set
+ __swig_getmethods__["func_audit"] = _selinux.avc_log_callback_func_audit_get
+ if _newclass:func_audit = _swig_property(_selinux.avc_log_callback_func_audit_get, _selinux.avc_log_callback_func_audit_set)
+ def __init__(self):
+ this = _selinux.new_avc_log_callback()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_avc_log_callback
+ __del__ = lambda self : None;
+avc_log_callback_swigregister = _selinux.avc_log_callback_swigregister
+avc_log_callback_swigregister(avc_log_callback)
+
+class avc_thread_callback(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, avc_thread_callback, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, avc_thread_callback, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["func_create_thread"] = _selinux.avc_thread_callback_func_create_thread_set
+ __swig_getmethods__["func_create_thread"] = _selinux.avc_thread_callback_func_create_thread_get
+ if _newclass:func_create_thread = _swig_property(_selinux.avc_thread_callback_func_create_thread_get, _selinux.avc_thread_callback_func_create_thread_set)
+ __swig_setmethods__["func_stop_thread"] = _selinux.avc_thread_callback_func_stop_thread_set
+ __swig_getmethods__["func_stop_thread"] = _selinux.avc_thread_callback_func_stop_thread_get
+ if _newclass:func_stop_thread = _swig_property(_selinux.avc_thread_callback_func_stop_thread_get, _selinux.avc_thread_callback_func_stop_thread_set)
+ def __init__(self):
+ this = _selinux.new_avc_thread_callback()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_avc_thread_callback
+ __del__ = lambda self : None;
+avc_thread_callback_swigregister = _selinux.avc_thread_callback_swigregister
+avc_thread_callback_swigregister(avc_thread_callback)
+
+class avc_lock_callback(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, avc_lock_callback, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, avc_lock_callback, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["func_alloc_lock"] = _selinux.avc_lock_callback_func_alloc_lock_set
+ __swig_getmethods__["func_alloc_lock"] = _selinux.avc_lock_callback_func_alloc_lock_get
+ if _newclass:func_alloc_lock = _swig_property(_selinux.avc_lock_callback_func_alloc_lock_get, _selinux.avc_lock_callback_func_alloc_lock_set)
+ __swig_setmethods__["func_get_lock"] = _selinux.avc_lock_callback_func_get_lock_set
+ __swig_getmethods__["func_get_lock"] = _selinux.avc_lock_callback_func_get_lock_get
+ if _newclass:func_get_lock = _swig_property(_selinux.avc_lock_callback_func_get_lock_get, _selinux.avc_lock_callback_func_get_lock_set)
+ __swig_setmethods__["func_release_lock"] = _selinux.avc_lock_callback_func_release_lock_set
+ __swig_getmethods__["func_release_lock"] = _selinux.avc_lock_callback_func_release_lock_get
+ if _newclass:func_release_lock = _swig_property(_selinux.avc_lock_callback_func_release_lock_get, _selinux.avc_lock_callback_func_release_lock_set)
+ __swig_setmethods__["func_free_lock"] = _selinux.avc_lock_callback_func_free_lock_set
+ __swig_getmethods__["func_free_lock"] = _selinux.avc_lock_callback_func_free_lock_get
+ if _newclass:func_free_lock = _swig_property(_selinux.avc_lock_callback_func_free_lock_get, _selinux.avc_lock_callback_func_free_lock_set)
+ def __init__(self):
+ this = _selinux.new_avc_lock_callback()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_avc_lock_callback
+ __del__ = lambda self : None;
+avc_lock_callback_swigregister = _selinux.avc_lock_callback_swigregister
+avc_lock_callback_swigregister(avc_lock_callback)
+
+AVC_OPT_UNUSED = _selinux.AVC_OPT_UNUSED
+AVC_OPT_SETENFORCE = _selinux.AVC_OPT_SETENFORCE
+
+def avc_init(*args):
+ return _selinux.avc_init(*args)
+avc_init = _selinux.avc_init
+
+def avc_open(*args):
+ return _selinux.avc_open(*args)
+avc_open = _selinux.avc_open
+
+def avc_cleanup():
+ return _selinux.avc_cleanup()
+avc_cleanup = _selinux.avc_cleanup
+
+def avc_reset():
+ return _selinux.avc_reset()
+avc_reset = _selinux.avc_reset
+
+def avc_destroy():
+ return _selinux.avc_destroy()
+avc_destroy = _selinux.avc_destroy
+
+def avc_has_perm_noaudit(*args):
+ return _selinux.avc_has_perm_noaudit(*args)
+avc_has_perm_noaudit = _selinux.avc_has_perm_noaudit
+
+def avc_has_perm(*args):
+ return _selinux.avc_has_perm(*args)
+avc_has_perm = _selinux.avc_has_perm
+
+def avc_audit(*args):
+ return _selinux.avc_audit(*args)
+avc_audit = _selinux.avc_audit
+
+def avc_compute_create(*args):
+ return _selinux.avc_compute_create(*args)
+avc_compute_create = _selinux.avc_compute_create
+
+def avc_compute_member(*args):
+ return _selinux.avc_compute_member(*args)
+avc_compute_member = _selinux.avc_compute_member
+AVC_CALLBACK_GRANT = _selinux.AVC_CALLBACK_GRANT
+AVC_CALLBACK_TRY_REVOKE = _selinux.AVC_CALLBACK_TRY_REVOKE
+AVC_CALLBACK_REVOKE = _selinux.AVC_CALLBACK_REVOKE
+AVC_CALLBACK_RESET = _selinux.AVC_CALLBACK_RESET
+AVC_CALLBACK_AUDITALLOW_ENABLE = _selinux.AVC_CALLBACK_AUDITALLOW_ENABLE
+AVC_CALLBACK_AUDITALLOW_DISABLE = _selinux.AVC_CALLBACK_AUDITALLOW_DISABLE
+AVC_CALLBACK_AUDITDENY_ENABLE = _selinux.AVC_CALLBACK_AUDITDENY_ENABLE
+AVC_CALLBACK_AUDITDENY_DISABLE = _selinux.AVC_CALLBACK_AUDITDENY_DISABLE
+AVC_CACHE_STATS = _selinux.AVC_CACHE_STATS
+class avc_cache_stats(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, avc_cache_stats, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, avc_cache_stats, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["entry_lookups"] = _selinux.avc_cache_stats_entry_lookups_set
+ __swig_getmethods__["entry_lookups"] = _selinux.avc_cache_stats_entry_lookups_get
+ if _newclass:entry_lookups = _swig_property(_selinux.avc_cache_stats_entry_lookups_get, _selinux.avc_cache_stats_entry_lookups_set)
+ __swig_setmethods__["entry_hits"] = _selinux.avc_cache_stats_entry_hits_set
+ __swig_getmethods__["entry_hits"] = _selinux.avc_cache_stats_entry_hits_get
+ if _newclass:entry_hits = _swig_property(_selinux.avc_cache_stats_entry_hits_get, _selinux.avc_cache_stats_entry_hits_set)
+ __swig_setmethods__["entry_misses"] = _selinux.avc_cache_stats_entry_misses_set
+ __swig_getmethods__["entry_misses"] = _selinux.avc_cache_stats_entry_misses_get
+ if _newclass:entry_misses = _swig_property(_selinux.avc_cache_stats_entry_misses_get, _selinux.avc_cache_stats_entry_misses_set)
+ __swig_setmethods__["entry_discards"] = _selinux.avc_cache_stats_entry_discards_set
+ __swig_getmethods__["entry_discards"] = _selinux.avc_cache_stats_entry_discards_get
+ if _newclass:entry_discards = _swig_property(_selinux.avc_cache_stats_entry_discards_get, _selinux.avc_cache_stats_entry_discards_set)
+ __swig_setmethods__["cav_lookups"] = _selinux.avc_cache_stats_cav_lookups_set
+ __swig_getmethods__["cav_lookups"] = _selinux.avc_cache_stats_cav_lookups_get
+ if _newclass:cav_lookups = _swig_property(_selinux.avc_cache_stats_cav_lookups_get, _selinux.avc_cache_stats_cav_lookups_set)
+ __swig_setmethods__["cav_hits"] = _selinux.avc_cache_stats_cav_hits_set
+ __swig_getmethods__["cav_hits"] = _selinux.avc_cache_stats_cav_hits_get
+ if _newclass:cav_hits = _swig_property(_selinux.avc_cache_stats_cav_hits_get, _selinux.avc_cache_stats_cav_hits_set)
+ __swig_setmethods__["cav_probes"] = _selinux.avc_cache_stats_cav_probes_set
+ __swig_getmethods__["cav_probes"] = _selinux.avc_cache_stats_cav_probes_get
+ if _newclass:cav_probes = _swig_property(_selinux.avc_cache_stats_cav_probes_get, _selinux.avc_cache_stats_cav_probes_set)
+ __swig_setmethods__["cav_misses"] = _selinux.avc_cache_stats_cav_misses_set
+ __swig_getmethods__["cav_misses"] = _selinux.avc_cache_stats_cav_misses_get
+ if _newclass:cav_misses = _swig_property(_selinux.avc_cache_stats_cav_misses_get, _selinux.avc_cache_stats_cav_misses_set)
+ def __init__(self):
+ this = _selinux.new_avc_cache_stats()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_avc_cache_stats
+ __del__ = lambda self : None;
+avc_cache_stats_swigregister = _selinux.avc_cache_stats_swigregister
+avc_cache_stats_swigregister(avc_cache_stats)
+
+
+def avc_av_stats():
+ return _selinux.avc_av_stats()
+avc_av_stats = _selinux.avc_av_stats
+
+def avc_sid_stats():
+ return _selinux.avc_sid_stats()
+avc_sid_stats = _selinux.avc_sid_stats
+
+def avc_netlink_open(*args):
+ return _selinux.avc_netlink_open(*args)
+avc_netlink_open = _selinux.avc_netlink_open
+
+def avc_netlink_loop():
+ return _selinux.avc_netlink_loop()
+avc_netlink_loop = _selinux.avc_netlink_loop
+
+def avc_netlink_close():
+ return _selinux.avc_netlink_close()
+avc_netlink_close = _selinux.avc_netlink_close
+
+def selinux_status_open(*args):
+ return _selinux.selinux_status_open(*args)
+selinux_status_open = _selinux.selinux_status_open
+
+def selinux_status_close():
+ return _selinux.selinux_status_close()
+selinux_status_close = _selinux.selinux_status_close
+
+def selinux_status_updated():
+ return _selinux.selinux_status_updated()
+selinux_status_updated = _selinux.selinux_status_updated
+
+def selinux_status_getenforce():
+ return _selinux.selinux_status_getenforce()
+selinux_status_getenforce = _selinux.selinux_status_getenforce
+
+def selinux_status_policyload():
+ return _selinux.selinux_status_policyload()
+selinux_status_policyload = _selinux.selinux_status_policyload
+
+def selinux_status_deny_unknown():
+ return _selinux.selinux_status_deny_unknown()
+selinux_status_deny_unknown = _selinux.selinux_status_deny_unknown
+COMMON_FILE__IOCTL = _selinux.COMMON_FILE__IOCTL
+COMMON_FILE__READ = _selinux.COMMON_FILE__READ
+COMMON_FILE__WRITE = _selinux.COMMON_FILE__WRITE
+COMMON_FILE__CREATE = _selinux.COMMON_FILE__CREATE
+COMMON_FILE__GETATTR = _selinux.COMMON_FILE__GETATTR
+COMMON_FILE__SETATTR = _selinux.COMMON_FILE__SETATTR
+COMMON_FILE__LOCK = _selinux.COMMON_FILE__LOCK
+COMMON_FILE__RELABELFROM = _selinux.COMMON_FILE__RELABELFROM
+COMMON_FILE__RELABELTO = _selinux.COMMON_FILE__RELABELTO
+COMMON_FILE__APPEND = _selinux.COMMON_FILE__APPEND
+COMMON_FILE__UNLINK = _selinux.COMMON_FILE__UNLINK
+COMMON_FILE__LINK = _selinux.COMMON_FILE__LINK
+COMMON_FILE__RENAME = _selinux.COMMON_FILE__RENAME
+COMMON_FILE__EXECUTE = _selinux.COMMON_FILE__EXECUTE
+COMMON_FILE__SWAPON = _selinux.COMMON_FILE__SWAPON
+COMMON_FILE__QUOTAON = _selinux.COMMON_FILE__QUOTAON
+COMMON_FILE__MOUNTON = _selinux.COMMON_FILE__MOUNTON
+COMMON_SOCKET__IOCTL = _selinux.COMMON_SOCKET__IOCTL
+COMMON_SOCKET__READ = _selinux.COMMON_SOCKET__READ
+COMMON_SOCKET__WRITE = _selinux.COMMON_SOCKET__WRITE
+COMMON_SOCKET__CREATE = _selinux.COMMON_SOCKET__CREATE
+COMMON_SOCKET__GETATTR = _selinux.COMMON_SOCKET__GETATTR
+COMMON_SOCKET__SETATTR = _selinux.COMMON_SOCKET__SETATTR
+COMMON_SOCKET__LOCK = _selinux.COMMON_SOCKET__LOCK
+COMMON_SOCKET__RELABELFROM = _selinux.COMMON_SOCKET__RELABELFROM
+COMMON_SOCKET__RELABELTO = _selinux.COMMON_SOCKET__RELABELTO
+COMMON_SOCKET__APPEND = _selinux.COMMON_SOCKET__APPEND
+COMMON_SOCKET__BIND = _selinux.COMMON_SOCKET__BIND
+COMMON_SOCKET__CONNECT = _selinux.COMMON_SOCKET__CONNECT
+COMMON_SOCKET__LISTEN = _selinux.COMMON_SOCKET__LISTEN
+COMMON_SOCKET__ACCEPT = _selinux.COMMON_SOCKET__ACCEPT
+COMMON_SOCKET__GETOPT = _selinux.COMMON_SOCKET__GETOPT
+COMMON_SOCKET__SETOPT = _selinux.COMMON_SOCKET__SETOPT
+COMMON_SOCKET__SHUTDOWN = _selinux.COMMON_SOCKET__SHUTDOWN
+COMMON_SOCKET__RECVFROM = _selinux.COMMON_SOCKET__RECVFROM
+COMMON_SOCKET__SENDTO = _selinux.COMMON_SOCKET__SENDTO
+COMMON_SOCKET__RECV_MSG = _selinux.COMMON_SOCKET__RECV_MSG
+COMMON_SOCKET__SEND_MSG = _selinux.COMMON_SOCKET__SEND_MSG
+COMMON_SOCKET__NAME_BIND = _selinux.COMMON_SOCKET__NAME_BIND
+COMMON_IPC__CREATE = _selinux.COMMON_IPC__CREATE
+COMMON_IPC__DESTROY = _selinux.COMMON_IPC__DESTROY
+COMMON_IPC__GETATTR = _selinux.COMMON_IPC__GETATTR
+COMMON_IPC__SETATTR = _selinux.COMMON_IPC__SETATTR
+COMMON_IPC__READ = _selinux.COMMON_IPC__READ
+COMMON_IPC__WRITE = _selinux.COMMON_IPC__WRITE
+COMMON_IPC__ASSOCIATE = _selinux.COMMON_IPC__ASSOCIATE
+COMMON_IPC__UNIX_READ = _selinux.COMMON_IPC__UNIX_READ
+COMMON_IPC__UNIX_WRITE = _selinux.COMMON_IPC__UNIX_WRITE
+COMMON_DATABASE__CREATE = _selinux.COMMON_DATABASE__CREATE
+COMMON_DATABASE__DROP = _selinux.COMMON_DATABASE__DROP
+COMMON_DATABASE__GETATTR = _selinux.COMMON_DATABASE__GETATTR
+COMMON_DATABASE__SETATTR = _selinux.COMMON_DATABASE__SETATTR
+COMMON_DATABASE__RELABELFROM = _selinux.COMMON_DATABASE__RELABELFROM
+COMMON_DATABASE__RELABELTO = _selinux.COMMON_DATABASE__RELABELTO
+FILESYSTEM__MOUNT = _selinux.FILESYSTEM__MOUNT
+FILESYSTEM__REMOUNT = _selinux.FILESYSTEM__REMOUNT
+FILESYSTEM__UNMOUNT = _selinux.FILESYSTEM__UNMOUNT
+FILESYSTEM__GETATTR = _selinux.FILESYSTEM__GETATTR
+FILESYSTEM__RELABELFROM = _selinux.FILESYSTEM__RELABELFROM
+FILESYSTEM__RELABELTO = _selinux.FILESYSTEM__RELABELTO
+FILESYSTEM__TRANSITION = _selinux.FILESYSTEM__TRANSITION
+FILESYSTEM__ASSOCIATE = _selinux.FILESYSTEM__ASSOCIATE
+FILESYSTEM__QUOTAMOD = _selinux.FILESYSTEM__QUOTAMOD
+FILESYSTEM__QUOTAGET = _selinux.FILESYSTEM__QUOTAGET
+DIR__IOCTL = _selinux.DIR__IOCTL
+DIR__READ = _selinux.DIR__READ
+DIR__WRITE = _selinux.DIR__WRITE
+DIR__CREATE = _selinux.DIR__CREATE
+DIR__GETATTR = _selinux.DIR__GETATTR
+DIR__SETATTR = _selinux.DIR__SETATTR
+DIR__LOCK = _selinux.DIR__LOCK
+DIR__RELABELFROM = _selinux.DIR__RELABELFROM
+DIR__RELABELTO = _selinux.DIR__RELABELTO
+DIR__APPEND = _selinux.DIR__APPEND
+DIR__UNLINK = _selinux.DIR__UNLINK
+DIR__LINK = _selinux.DIR__LINK
+DIR__RENAME = _selinux.DIR__RENAME
+DIR__EXECUTE = _selinux.DIR__EXECUTE
+DIR__SWAPON = _selinux.DIR__SWAPON
+DIR__QUOTAON = _selinux.DIR__QUOTAON
+DIR__MOUNTON = _selinux.DIR__MOUNTON
+DIR__ADD_NAME = _selinux.DIR__ADD_NAME
+DIR__REMOVE_NAME = _selinux.DIR__REMOVE_NAME
+DIR__REPARENT = _selinux.DIR__REPARENT
+DIR__SEARCH = _selinux.DIR__SEARCH
+DIR__RMDIR = _selinux.DIR__RMDIR
+DIR__OPEN = _selinux.DIR__OPEN
+FILE__IOCTL = _selinux.FILE__IOCTL
+FILE__READ = _selinux.FILE__READ
+FILE__WRITE = _selinux.FILE__WRITE
+FILE__CREATE = _selinux.FILE__CREATE
+FILE__GETATTR = _selinux.FILE__GETATTR
+FILE__SETATTR = _selinux.FILE__SETATTR
+FILE__LOCK = _selinux.FILE__LOCK
+FILE__RELABELFROM = _selinux.FILE__RELABELFROM
+FILE__RELABELTO = _selinux.FILE__RELABELTO
+FILE__APPEND = _selinux.FILE__APPEND
+FILE__UNLINK = _selinux.FILE__UNLINK
+FILE__LINK = _selinux.FILE__LINK
+FILE__RENAME = _selinux.FILE__RENAME
+FILE__EXECUTE = _selinux.FILE__EXECUTE
+FILE__SWAPON = _selinux.FILE__SWAPON
+FILE__QUOTAON = _selinux.FILE__QUOTAON
+FILE__MOUNTON = _selinux.FILE__MOUNTON
+FILE__EXECUTE_NO_TRANS = _selinux.FILE__EXECUTE_NO_TRANS
+FILE__ENTRYPOINT = _selinux.FILE__ENTRYPOINT
+FILE__EXECMOD = _selinux.FILE__EXECMOD
+FILE__OPEN = _selinux.FILE__OPEN
+LNK_FILE__IOCTL = _selinux.LNK_FILE__IOCTL
+LNK_FILE__READ = _selinux.LNK_FILE__READ
+LNK_FILE__WRITE = _selinux.LNK_FILE__WRITE
+LNK_FILE__CREATE = _selinux.LNK_FILE__CREATE
+LNK_FILE__GETATTR = _selinux.LNK_FILE__GETATTR
+LNK_FILE__SETATTR = _selinux.LNK_FILE__SETATTR
+LNK_FILE__LOCK = _selinux.LNK_FILE__LOCK
+LNK_FILE__RELABELFROM = _selinux.LNK_FILE__RELABELFROM
+LNK_FILE__RELABELTO = _selinux.LNK_FILE__RELABELTO
+LNK_FILE__APPEND = _selinux.LNK_FILE__APPEND
+LNK_FILE__UNLINK = _selinux.LNK_FILE__UNLINK
+LNK_FILE__LINK = _selinux.LNK_FILE__LINK
+LNK_FILE__RENAME = _selinux.LNK_FILE__RENAME
+LNK_FILE__EXECUTE = _selinux.LNK_FILE__EXECUTE
+LNK_FILE__SWAPON = _selinux.LNK_FILE__SWAPON
+LNK_FILE__QUOTAON = _selinux.LNK_FILE__QUOTAON
+LNK_FILE__MOUNTON = _selinux.LNK_FILE__MOUNTON
+CHR_FILE__IOCTL = _selinux.CHR_FILE__IOCTL
+CHR_FILE__READ = _selinux.CHR_FILE__READ
+CHR_FILE__WRITE = _selinux.CHR_FILE__WRITE
+CHR_FILE__CREATE = _selinux.CHR_FILE__CREATE
+CHR_FILE__GETATTR = _selinux.CHR_FILE__GETATTR
+CHR_FILE__SETATTR = _selinux.CHR_FILE__SETATTR
+CHR_FILE__LOCK = _selinux.CHR_FILE__LOCK
+CHR_FILE__RELABELFROM = _selinux.CHR_FILE__RELABELFROM
+CHR_FILE__RELABELTO = _selinux.CHR_FILE__RELABELTO
+CHR_FILE__APPEND = _selinux.CHR_FILE__APPEND
+CHR_FILE__UNLINK = _selinux.CHR_FILE__UNLINK
+CHR_FILE__LINK = _selinux.CHR_FILE__LINK
+CHR_FILE__RENAME = _selinux.CHR_FILE__RENAME
+CHR_FILE__EXECUTE = _selinux.CHR_FILE__EXECUTE
+CHR_FILE__SWAPON = _selinux.CHR_FILE__SWAPON
+CHR_FILE__QUOTAON = _selinux.CHR_FILE__QUOTAON
+CHR_FILE__MOUNTON = _selinux.CHR_FILE__MOUNTON
+CHR_FILE__EXECUTE_NO_TRANS = _selinux.CHR_FILE__EXECUTE_NO_TRANS
+CHR_FILE__ENTRYPOINT = _selinux.CHR_FILE__ENTRYPOINT
+CHR_FILE__EXECMOD = _selinux.CHR_FILE__EXECMOD
+CHR_FILE__OPEN = _selinux.CHR_FILE__OPEN
+BLK_FILE__IOCTL = _selinux.BLK_FILE__IOCTL
+BLK_FILE__READ = _selinux.BLK_FILE__READ
+BLK_FILE__WRITE = _selinux.BLK_FILE__WRITE
+BLK_FILE__CREATE = _selinux.BLK_FILE__CREATE
+BLK_FILE__GETATTR = _selinux.BLK_FILE__GETATTR
+BLK_FILE__SETATTR = _selinux.BLK_FILE__SETATTR
+BLK_FILE__LOCK = _selinux.BLK_FILE__LOCK
+BLK_FILE__RELABELFROM = _selinux.BLK_FILE__RELABELFROM
+BLK_FILE__RELABELTO = _selinux.BLK_FILE__RELABELTO
+BLK_FILE__APPEND = _selinux.BLK_FILE__APPEND
+BLK_FILE__UNLINK = _selinux.BLK_FILE__UNLINK
+BLK_FILE__LINK = _selinux.BLK_FILE__LINK
+BLK_FILE__RENAME = _selinux.BLK_FILE__RENAME
+BLK_FILE__EXECUTE = _selinux.BLK_FILE__EXECUTE
+BLK_FILE__SWAPON = _selinux.BLK_FILE__SWAPON
+BLK_FILE__QUOTAON = _selinux.BLK_FILE__QUOTAON
+BLK_FILE__MOUNTON = _selinux.BLK_FILE__MOUNTON
+BLK_FILE__OPEN = _selinux.BLK_FILE__OPEN
+SOCK_FILE__IOCTL = _selinux.SOCK_FILE__IOCTL
+SOCK_FILE__READ = _selinux.SOCK_FILE__READ
+SOCK_FILE__WRITE = _selinux.SOCK_FILE__WRITE
+SOCK_FILE__CREATE = _selinux.SOCK_FILE__CREATE
+SOCK_FILE__GETATTR = _selinux.SOCK_FILE__GETATTR
+SOCK_FILE__SETATTR = _selinux.SOCK_FILE__SETATTR
+SOCK_FILE__LOCK = _selinux.SOCK_FILE__LOCK
+SOCK_FILE__RELABELFROM = _selinux.SOCK_FILE__RELABELFROM
+SOCK_FILE__RELABELTO = _selinux.SOCK_FILE__RELABELTO
+SOCK_FILE__APPEND = _selinux.SOCK_FILE__APPEND
+SOCK_FILE__UNLINK = _selinux.SOCK_FILE__UNLINK
+SOCK_FILE__LINK = _selinux.SOCK_FILE__LINK
+SOCK_FILE__RENAME = _selinux.SOCK_FILE__RENAME
+SOCK_FILE__EXECUTE = _selinux.SOCK_FILE__EXECUTE
+SOCK_FILE__SWAPON = _selinux.SOCK_FILE__SWAPON
+SOCK_FILE__QUOTAON = _selinux.SOCK_FILE__QUOTAON
+SOCK_FILE__MOUNTON = _selinux.SOCK_FILE__MOUNTON
+FIFO_FILE__IOCTL = _selinux.FIFO_FILE__IOCTL
+FIFO_FILE__READ = _selinux.FIFO_FILE__READ
+FIFO_FILE__WRITE = _selinux.FIFO_FILE__WRITE
+FIFO_FILE__CREATE = _selinux.FIFO_FILE__CREATE
+FIFO_FILE__GETATTR = _selinux.FIFO_FILE__GETATTR
+FIFO_FILE__SETATTR = _selinux.FIFO_FILE__SETATTR
+FIFO_FILE__LOCK = _selinux.FIFO_FILE__LOCK
+FIFO_FILE__RELABELFROM = _selinux.FIFO_FILE__RELABELFROM
+FIFO_FILE__RELABELTO = _selinux.FIFO_FILE__RELABELTO
+FIFO_FILE__APPEND = _selinux.FIFO_FILE__APPEND
+FIFO_FILE__UNLINK = _selinux.FIFO_FILE__UNLINK
+FIFO_FILE__LINK = _selinux.FIFO_FILE__LINK
+FIFO_FILE__RENAME = _selinux.FIFO_FILE__RENAME
+FIFO_FILE__EXECUTE = _selinux.FIFO_FILE__EXECUTE
+FIFO_FILE__SWAPON = _selinux.FIFO_FILE__SWAPON
+FIFO_FILE__QUOTAON = _selinux.FIFO_FILE__QUOTAON
+FIFO_FILE__MOUNTON = _selinux.FIFO_FILE__MOUNTON
+FIFO_FILE__OPEN = _selinux.FIFO_FILE__OPEN
+FD__USE = _selinux.FD__USE
+SOCKET__IOCTL = _selinux.SOCKET__IOCTL
+SOCKET__READ = _selinux.SOCKET__READ
+SOCKET__WRITE = _selinux.SOCKET__WRITE
+SOCKET__CREATE = _selinux.SOCKET__CREATE
+SOCKET__GETATTR = _selinux.SOCKET__GETATTR
+SOCKET__SETATTR = _selinux.SOCKET__SETATTR
+SOCKET__LOCK = _selinux.SOCKET__LOCK
+SOCKET__RELABELFROM = _selinux.SOCKET__RELABELFROM
+SOCKET__RELABELTO = _selinux.SOCKET__RELABELTO
+SOCKET__APPEND = _selinux.SOCKET__APPEND
+SOCKET__BIND = _selinux.SOCKET__BIND
+SOCKET__CONNECT = _selinux.SOCKET__CONNECT
+SOCKET__LISTEN = _selinux.SOCKET__LISTEN
+SOCKET__ACCEPT = _selinux.SOCKET__ACCEPT
+SOCKET__GETOPT = _selinux.SOCKET__GETOPT
+SOCKET__SETOPT = _selinux.SOCKET__SETOPT
+SOCKET__SHUTDOWN = _selinux.SOCKET__SHUTDOWN
+SOCKET__RECVFROM = _selinux.SOCKET__RECVFROM
+SOCKET__SENDTO = _selinux.SOCKET__SENDTO
+SOCKET__RECV_MSG = _selinux.SOCKET__RECV_MSG
+SOCKET__SEND_MSG = _selinux.SOCKET__SEND_MSG
+SOCKET__NAME_BIND = _selinux.SOCKET__NAME_BIND
+TCP_SOCKET__IOCTL = _selinux.TCP_SOCKET__IOCTL
+TCP_SOCKET__READ = _selinux.TCP_SOCKET__READ
+TCP_SOCKET__WRITE = _selinux.TCP_SOCKET__WRITE
+TCP_SOCKET__CREATE = _selinux.TCP_SOCKET__CREATE
+TCP_SOCKET__GETATTR = _selinux.TCP_SOCKET__GETATTR
+TCP_SOCKET__SETATTR = _selinux.TCP_SOCKET__SETATTR
+TCP_SOCKET__LOCK = _selinux.TCP_SOCKET__LOCK
+TCP_SOCKET__RELABELFROM = _selinux.TCP_SOCKET__RELABELFROM
+TCP_SOCKET__RELABELTO = _selinux.TCP_SOCKET__RELABELTO
+TCP_SOCKET__APPEND = _selinux.TCP_SOCKET__APPEND
+TCP_SOCKET__BIND = _selinux.TCP_SOCKET__BIND
+TCP_SOCKET__CONNECT = _selinux.TCP_SOCKET__CONNECT
+TCP_SOCKET__LISTEN = _selinux.TCP_SOCKET__LISTEN
+TCP_SOCKET__ACCEPT = _selinux.TCP_SOCKET__ACCEPT
+TCP_SOCKET__GETOPT = _selinux.TCP_SOCKET__GETOPT
+TCP_SOCKET__SETOPT = _selinux.TCP_SOCKET__SETOPT
+TCP_SOCKET__SHUTDOWN = _selinux.TCP_SOCKET__SHUTDOWN
+TCP_SOCKET__RECVFROM = _selinux.TCP_SOCKET__RECVFROM
+TCP_SOCKET__SENDTO = _selinux.TCP_SOCKET__SENDTO
+TCP_SOCKET__RECV_MSG = _selinux.TCP_SOCKET__RECV_MSG
+TCP_SOCKET__SEND_MSG = _selinux.TCP_SOCKET__SEND_MSG
+TCP_SOCKET__NAME_BIND = _selinux.TCP_SOCKET__NAME_BIND
+TCP_SOCKET__CONNECTTO = _selinux.TCP_SOCKET__CONNECTTO
+TCP_SOCKET__NEWCONN = _selinux.TCP_SOCKET__NEWCONN
+TCP_SOCKET__ACCEPTFROM = _selinux.TCP_SOCKET__ACCEPTFROM
+TCP_SOCKET__NODE_BIND = _selinux.TCP_SOCKET__NODE_BIND
+TCP_SOCKET__NAME_CONNECT = _selinux.TCP_SOCKET__NAME_CONNECT
+UDP_SOCKET__IOCTL = _selinux.UDP_SOCKET__IOCTL
+UDP_SOCKET__READ = _selinux.UDP_SOCKET__READ
+UDP_SOCKET__WRITE = _selinux.UDP_SOCKET__WRITE
+UDP_SOCKET__CREATE = _selinux.UDP_SOCKET__CREATE
+UDP_SOCKET__GETATTR = _selinux.UDP_SOCKET__GETATTR
+UDP_SOCKET__SETATTR = _selinux.UDP_SOCKET__SETATTR
+UDP_SOCKET__LOCK = _selinux.UDP_SOCKET__LOCK
+UDP_SOCKET__RELABELFROM = _selinux.UDP_SOCKET__RELABELFROM
+UDP_SOCKET__RELABELTO = _selinux.UDP_SOCKET__RELABELTO
+UDP_SOCKET__APPEND = _selinux.UDP_SOCKET__APPEND
+UDP_SOCKET__BIND = _selinux.UDP_SOCKET__BIND
+UDP_SOCKET__CONNECT = _selinux.UDP_SOCKET__CONNECT
+UDP_SOCKET__LISTEN = _selinux.UDP_SOCKET__LISTEN
+UDP_SOCKET__ACCEPT = _selinux.UDP_SOCKET__ACCEPT
+UDP_SOCKET__GETOPT = _selinux.UDP_SOCKET__GETOPT
+UDP_SOCKET__SETOPT = _selinux.UDP_SOCKET__SETOPT
+UDP_SOCKET__SHUTDOWN = _selinux.UDP_SOCKET__SHUTDOWN
+UDP_SOCKET__RECVFROM = _selinux.UDP_SOCKET__RECVFROM
+UDP_SOCKET__SENDTO = _selinux.UDP_SOCKET__SENDTO
+UDP_SOCKET__RECV_MSG = _selinux.UDP_SOCKET__RECV_MSG
+UDP_SOCKET__SEND_MSG = _selinux.UDP_SOCKET__SEND_MSG
+UDP_SOCKET__NAME_BIND = _selinux.UDP_SOCKET__NAME_BIND
+UDP_SOCKET__NODE_BIND = _selinux.UDP_SOCKET__NODE_BIND
+RAWIP_SOCKET__IOCTL = _selinux.RAWIP_SOCKET__IOCTL
+RAWIP_SOCKET__READ = _selinux.RAWIP_SOCKET__READ
+RAWIP_SOCKET__WRITE = _selinux.RAWIP_SOCKET__WRITE
+RAWIP_SOCKET__CREATE = _selinux.RAWIP_SOCKET__CREATE
+RAWIP_SOCKET__GETATTR = _selinux.RAWIP_SOCKET__GETATTR
+RAWIP_SOCKET__SETATTR = _selinux.RAWIP_SOCKET__SETATTR
+RAWIP_SOCKET__LOCK = _selinux.RAWIP_SOCKET__LOCK
+RAWIP_SOCKET__RELABELFROM = _selinux.RAWIP_SOCKET__RELABELFROM
+RAWIP_SOCKET__RELABELTO = _selinux.RAWIP_SOCKET__RELABELTO
+RAWIP_SOCKET__APPEND = _selinux.RAWIP_SOCKET__APPEND
+RAWIP_SOCKET__BIND = _selinux.RAWIP_SOCKET__BIND
+RAWIP_SOCKET__CONNECT = _selinux.RAWIP_SOCKET__CONNECT
+RAWIP_SOCKET__LISTEN = _selinux.RAWIP_SOCKET__LISTEN
+RAWIP_SOCKET__ACCEPT = _selinux.RAWIP_SOCKET__ACCEPT
+RAWIP_SOCKET__GETOPT = _selinux.RAWIP_SOCKET__GETOPT
+RAWIP_SOCKET__SETOPT = _selinux.RAWIP_SOCKET__SETOPT
+RAWIP_SOCKET__SHUTDOWN = _selinux.RAWIP_SOCKET__SHUTDOWN
+RAWIP_SOCKET__RECVFROM = _selinux.RAWIP_SOCKET__RECVFROM
+RAWIP_SOCKET__SENDTO = _selinux.RAWIP_SOCKET__SENDTO
+RAWIP_SOCKET__RECV_MSG = _selinux.RAWIP_SOCKET__RECV_MSG
+RAWIP_SOCKET__SEND_MSG = _selinux.RAWIP_SOCKET__SEND_MSG
+RAWIP_SOCKET__NAME_BIND = _selinux.RAWIP_SOCKET__NAME_BIND
+RAWIP_SOCKET__NODE_BIND = _selinux.RAWIP_SOCKET__NODE_BIND
+NODE__TCP_RECV = _selinux.NODE__TCP_RECV
+NODE__TCP_SEND = _selinux.NODE__TCP_SEND
+NODE__UDP_RECV = _selinux.NODE__UDP_RECV
+NODE__UDP_SEND = _selinux.NODE__UDP_SEND
+NODE__RAWIP_RECV = _selinux.NODE__RAWIP_RECV
+NODE__RAWIP_SEND = _selinux.NODE__RAWIP_SEND
+NODE__ENFORCE_DEST = _selinux.NODE__ENFORCE_DEST
+NODE__DCCP_RECV = _selinux.NODE__DCCP_RECV
+NODE__DCCP_SEND = _selinux.NODE__DCCP_SEND
+NODE__RECVFROM = _selinux.NODE__RECVFROM
+NODE__SENDTO = _selinux.NODE__SENDTO
+NETIF__TCP_RECV = _selinux.NETIF__TCP_RECV
+NETIF__TCP_SEND = _selinux.NETIF__TCP_SEND
+NETIF__UDP_RECV = _selinux.NETIF__UDP_RECV
+NETIF__UDP_SEND = _selinux.NETIF__UDP_SEND
+NETIF__RAWIP_RECV = _selinux.NETIF__RAWIP_RECV
+NETIF__RAWIP_SEND = _selinux.NETIF__RAWIP_SEND
+NETIF__DCCP_RECV = _selinux.NETIF__DCCP_RECV
+NETIF__DCCP_SEND = _selinux.NETIF__DCCP_SEND
+NETIF__INGRESS = _selinux.NETIF__INGRESS
+NETIF__EGRESS = _selinux.NETIF__EGRESS
+NETLINK_SOCKET__IOCTL = _selinux.NETLINK_SOCKET__IOCTL
+NETLINK_SOCKET__READ = _selinux.NETLINK_SOCKET__READ
+NETLINK_SOCKET__WRITE = _selinux.NETLINK_SOCKET__WRITE
+NETLINK_SOCKET__CREATE = _selinux.NETLINK_SOCKET__CREATE
+NETLINK_SOCKET__GETATTR = _selinux.NETLINK_SOCKET__GETATTR
+NETLINK_SOCKET__SETATTR = _selinux.NETLINK_SOCKET__SETATTR
+NETLINK_SOCKET__LOCK = _selinux.NETLINK_SOCKET__LOCK
+NETLINK_SOCKET__RELABELFROM = _selinux.NETLINK_SOCKET__RELABELFROM
+NETLINK_SOCKET__RELABELTO = _selinux.NETLINK_SOCKET__RELABELTO
+NETLINK_SOCKET__APPEND = _selinux.NETLINK_SOCKET__APPEND
+NETLINK_SOCKET__BIND = _selinux.NETLINK_SOCKET__BIND
+NETLINK_SOCKET__CONNECT = _selinux.NETLINK_SOCKET__CONNECT
+NETLINK_SOCKET__LISTEN = _selinux.NETLINK_SOCKET__LISTEN
+NETLINK_SOCKET__ACCEPT = _selinux.NETLINK_SOCKET__ACCEPT
+NETLINK_SOCKET__GETOPT = _selinux.NETLINK_SOCKET__GETOPT
+NETLINK_SOCKET__SETOPT = _selinux.NETLINK_SOCKET__SETOPT
+NETLINK_SOCKET__SHUTDOWN = _selinux.NETLINK_SOCKET__SHUTDOWN
+NETLINK_SOCKET__RECVFROM = _selinux.NETLINK_SOCKET__RECVFROM
+NETLINK_SOCKET__SENDTO = _selinux.NETLINK_SOCKET__SENDTO
+NETLINK_SOCKET__RECV_MSG = _selinux.NETLINK_SOCKET__RECV_MSG
+NETLINK_SOCKET__SEND_MSG = _selinux.NETLINK_SOCKET__SEND_MSG
+NETLINK_SOCKET__NAME_BIND = _selinux.NETLINK_SOCKET__NAME_BIND
+PACKET_SOCKET__IOCTL = _selinux.PACKET_SOCKET__IOCTL
+PACKET_SOCKET__READ = _selinux.PACKET_SOCKET__READ
+PACKET_SOCKET__WRITE = _selinux.PACKET_SOCKET__WRITE
+PACKET_SOCKET__CREATE = _selinux.PACKET_SOCKET__CREATE
+PACKET_SOCKET__GETATTR = _selinux.PACKET_SOCKET__GETATTR
+PACKET_SOCKET__SETATTR = _selinux.PACKET_SOCKET__SETATTR
+PACKET_SOCKET__LOCK = _selinux.PACKET_SOCKET__LOCK
+PACKET_SOCKET__RELABELFROM = _selinux.PACKET_SOCKET__RELABELFROM
+PACKET_SOCKET__RELABELTO = _selinux.PACKET_SOCKET__RELABELTO
+PACKET_SOCKET__APPEND = _selinux.PACKET_SOCKET__APPEND
+PACKET_SOCKET__BIND = _selinux.PACKET_SOCKET__BIND
+PACKET_SOCKET__CONNECT = _selinux.PACKET_SOCKET__CONNECT
+PACKET_SOCKET__LISTEN = _selinux.PACKET_SOCKET__LISTEN
+PACKET_SOCKET__ACCEPT = _selinux.PACKET_SOCKET__ACCEPT
+PACKET_SOCKET__GETOPT = _selinux.PACKET_SOCKET__GETOPT
+PACKET_SOCKET__SETOPT = _selinux.PACKET_SOCKET__SETOPT
+PACKET_SOCKET__SHUTDOWN = _selinux.PACKET_SOCKET__SHUTDOWN
+PACKET_SOCKET__RECVFROM = _selinux.PACKET_SOCKET__RECVFROM
+PACKET_SOCKET__SENDTO = _selinux.PACKET_SOCKET__SENDTO
+PACKET_SOCKET__RECV_MSG = _selinux.PACKET_SOCKET__RECV_MSG
+PACKET_SOCKET__SEND_MSG = _selinux.PACKET_SOCKET__SEND_MSG
+PACKET_SOCKET__NAME_BIND = _selinux.PACKET_SOCKET__NAME_BIND
+KEY_SOCKET__IOCTL = _selinux.KEY_SOCKET__IOCTL
+KEY_SOCKET__READ = _selinux.KEY_SOCKET__READ
+KEY_SOCKET__WRITE = _selinux.KEY_SOCKET__WRITE
+KEY_SOCKET__CREATE = _selinux.KEY_SOCKET__CREATE
+KEY_SOCKET__GETATTR = _selinux.KEY_SOCKET__GETATTR
+KEY_SOCKET__SETATTR = _selinux.KEY_SOCKET__SETATTR
+KEY_SOCKET__LOCK = _selinux.KEY_SOCKET__LOCK
+KEY_SOCKET__RELABELFROM = _selinux.KEY_SOCKET__RELABELFROM
+KEY_SOCKET__RELABELTO = _selinux.KEY_SOCKET__RELABELTO
+KEY_SOCKET__APPEND = _selinux.KEY_SOCKET__APPEND
+KEY_SOCKET__BIND = _selinux.KEY_SOCKET__BIND
+KEY_SOCKET__CONNECT = _selinux.KEY_SOCKET__CONNECT
+KEY_SOCKET__LISTEN = _selinux.KEY_SOCKET__LISTEN
+KEY_SOCKET__ACCEPT = _selinux.KEY_SOCKET__ACCEPT
+KEY_SOCKET__GETOPT = _selinux.KEY_SOCKET__GETOPT
+KEY_SOCKET__SETOPT = _selinux.KEY_SOCKET__SETOPT
+KEY_SOCKET__SHUTDOWN = _selinux.KEY_SOCKET__SHUTDOWN
+KEY_SOCKET__RECVFROM = _selinux.KEY_SOCKET__RECVFROM
+KEY_SOCKET__SENDTO = _selinux.KEY_SOCKET__SENDTO
+KEY_SOCKET__RECV_MSG = _selinux.KEY_SOCKET__RECV_MSG
+KEY_SOCKET__SEND_MSG = _selinux.KEY_SOCKET__SEND_MSG
+KEY_SOCKET__NAME_BIND = _selinux.KEY_SOCKET__NAME_BIND
+UNIX_STREAM_SOCKET__IOCTL = _selinux.UNIX_STREAM_SOCKET__IOCTL
+UNIX_STREAM_SOCKET__READ = _selinux.UNIX_STREAM_SOCKET__READ
+UNIX_STREAM_SOCKET__WRITE = _selinux.UNIX_STREAM_SOCKET__WRITE
+UNIX_STREAM_SOCKET__CREATE = _selinux.UNIX_STREAM_SOCKET__CREATE
+UNIX_STREAM_SOCKET__GETATTR = _selinux.UNIX_STREAM_SOCKET__GETATTR
+UNIX_STREAM_SOCKET__SETATTR = _selinux.UNIX_STREAM_SOCKET__SETATTR
+UNIX_STREAM_SOCKET__LOCK = _selinux.UNIX_STREAM_SOCKET__LOCK
+UNIX_STREAM_SOCKET__RELABELFROM = _selinux.UNIX_STREAM_SOCKET__RELABELFROM
+UNIX_STREAM_SOCKET__RELABELTO = _selinux.UNIX_STREAM_SOCKET__RELABELTO
+UNIX_STREAM_SOCKET__APPEND = _selinux.UNIX_STREAM_SOCKET__APPEND
+UNIX_STREAM_SOCKET__BIND = _selinux.UNIX_STREAM_SOCKET__BIND
+UNIX_STREAM_SOCKET__CONNECT = _selinux.UNIX_STREAM_SOCKET__CONNECT
+UNIX_STREAM_SOCKET__LISTEN = _selinux.UNIX_STREAM_SOCKET__LISTEN
+UNIX_STREAM_SOCKET__ACCEPT = _selinux.UNIX_STREAM_SOCKET__ACCEPT
+UNIX_STREAM_SOCKET__GETOPT = _selinux.UNIX_STREAM_SOCKET__GETOPT
+UNIX_STREAM_SOCKET__SETOPT = _selinux.UNIX_STREAM_SOCKET__SETOPT
+UNIX_STREAM_SOCKET__SHUTDOWN = _selinux.UNIX_STREAM_SOCKET__SHUTDOWN
+UNIX_STREAM_SOCKET__RECVFROM = _selinux.UNIX_STREAM_SOCKET__RECVFROM
+UNIX_STREAM_SOCKET__SENDTO = _selinux.UNIX_STREAM_SOCKET__SENDTO
+UNIX_STREAM_SOCKET__RECV_MSG = _selinux.UNIX_STREAM_SOCKET__RECV_MSG
+UNIX_STREAM_SOCKET__SEND_MSG = _selinux.UNIX_STREAM_SOCKET__SEND_MSG
+UNIX_STREAM_SOCKET__NAME_BIND = _selinux.UNIX_STREAM_SOCKET__NAME_BIND
+UNIX_STREAM_SOCKET__CONNECTTO = _selinux.UNIX_STREAM_SOCKET__CONNECTTO
+UNIX_STREAM_SOCKET__NEWCONN = _selinux.UNIX_STREAM_SOCKET__NEWCONN
+UNIX_STREAM_SOCKET__ACCEPTFROM = _selinux.UNIX_STREAM_SOCKET__ACCEPTFROM
+UNIX_DGRAM_SOCKET__IOCTL = _selinux.UNIX_DGRAM_SOCKET__IOCTL
+UNIX_DGRAM_SOCKET__READ = _selinux.UNIX_DGRAM_SOCKET__READ
+UNIX_DGRAM_SOCKET__WRITE = _selinux.UNIX_DGRAM_SOCKET__WRITE
+UNIX_DGRAM_SOCKET__CREATE = _selinux.UNIX_DGRAM_SOCKET__CREATE
+UNIX_DGRAM_SOCKET__GETATTR = _selinux.UNIX_DGRAM_SOCKET__GETATTR
+UNIX_DGRAM_SOCKET__SETATTR = _selinux.UNIX_DGRAM_SOCKET__SETATTR
+UNIX_DGRAM_SOCKET__LOCK = _selinux.UNIX_DGRAM_SOCKET__LOCK
+UNIX_DGRAM_SOCKET__RELABELFROM = _selinux.UNIX_DGRAM_SOCKET__RELABELFROM
+UNIX_DGRAM_SOCKET__RELABELTO = _selinux.UNIX_DGRAM_SOCKET__RELABELTO
+UNIX_DGRAM_SOCKET__APPEND = _selinux.UNIX_DGRAM_SOCKET__APPEND
+UNIX_DGRAM_SOCKET__BIND = _selinux.UNIX_DGRAM_SOCKET__BIND
+UNIX_DGRAM_SOCKET__CONNECT = _selinux.UNIX_DGRAM_SOCKET__CONNECT
+UNIX_DGRAM_SOCKET__LISTEN = _selinux.UNIX_DGRAM_SOCKET__LISTEN
+UNIX_DGRAM_SOCKET__ACCEPT = _selinux.UNIX_DGRAM_SOCKET__ACCEPT
+UNIX_DGRAM_SOCKET__GETOPT = _selinux.UNIX_DGRAM_SOCKET__GETOPT
+UNIX_DGRAM_SOCKET__SETOPT = _selinux.UNIX_DGRAM_SOCKET__SETOPT
+UNIX_DGRAM_SOCKET__SHUTDOWN = _selinux.UNIX_DGRAM_SOCKET__SHUTDOWN
+UNIX_DGRAM_SOCKET__RECVFROM = _selinux.UNIX_DGRAM_SOCKET__RECVFROM
+UNIX_DGRAM_SOCKET__SENDTO = _selinux.UNIX_DGRAM_SOCKET__SENDTO
+UNIX_DGRAM_SOCKET__RECV_MSG = _selinux.UNIX_DGRAM_SOCKET__RECV_MSG
+UNIX_DGRAM_SOCKET__SEND_MSG = _selinux.UNIX_DGRAM_SOCKET__SEND_MSG
+UNIX_DGRAM_SOCKET__NAME_BIND = _selinux.UNIX_DGRAM_SOCKET__NAME_BIND
+PROCESS__FORK = _selinux.PROCESS__FORK
+PROCESS__TRANSITION = _selinux.PROCESS__TRANSITION
+PROCESS__SIGCHLD = _selinux.PROCESS__SIGCHLD
+PROCESS__SIGKILL = _selinux.PROCESS__SIGKILL
+PROCESS__SIGSTOP = _selinux.PROCESS__SIGSTOP
+PROCESS__SIGNULL = _selinux.PROCESS__SIGNULL
+PROCESS__SIGNAL = _selinux.PROCESS__SIGNAL
+PROCESS__PTRACE = _selinux.PROCESS__PTRACE
+PROCESS__GETSCHED = _selinux.PROCESS__GETSCHED
+PROCESS__SETSCHED = _selinux.PROCESS__SETSCHED
+PROCESS__GETSESSION = _selinux.PROCESS__GETSESSION
+PROCESS__GETPGID = _selinux.PROCESS__GETPGID
+PROCESS__SETPGID = _selinux.PROCESS__SETPGID
+PROCESS__GETCAP = _selinux.PROCESS__GETCAP
+PROCESS__SETCAP = _selinux.PROCESS__SETCAP
+PROCESS__SHARE = _selinux.PROCESS__SHARE
+PROCESS__GETATTR = _selinux.PROCESS__GETATTR
+PROCESS__SETEXEC = _selinux.PROCESS__SETEXEC
+PROCESS__SETFSCREATE = _selinux.PROCESS__SETFSCREATE
+PROCESS__NOATSECURE = _selinux.PROCESS__NOATSECURE
+PROCESS__SIGINH = _selinux.PROCESS__SIGINH
+PROCESS__SETRLIMIT = _selinux.PROCESS__SETRLIMIT
+PROCESS__RLIMITINH = _selinux.PROCESS__RLIMITINH
+PROCESS__DYNTRANSITION = _selinux.PROCESS__DYNTRANSITION
+PROCESS__SETCURRENT = _selinux.PROCESS__SETCURRENT
+PROCESS__EXECMEM = _selinux.PROCESS__EXECMEM
+PROCESS__EXECSTACK = _selinux.PROCESS__EXECSTACK
+PROCESS__EXECHEAP = _selinux.PROCESS__EXECHEAP
+PROCESS__SETKEYCREATE = _selinux.PROCESS__SETKEYCREATE
+PROCESS__SETSOCKCREATE = _selinux.PROCESS__SETSOCKCREATE
+IPC__CREATE = _selinux.IPC__CREATE
+IPC__DESTROY = _selinux.IPC__DESTROY
+IPC__GETATTR = _selinux.IPC__GETATTR
+IPC__SETATTR = _selinux.IPC__SETATTR
+IPC__READ = _selinux.IPC__READ
+IPC__WRITE = _selinux.IPC__WRITE
+IPC__ASSOCIATE = _selinux.IPC__ASSOCIATE
+IPC__UNIX_READ = _selinux.IPC__UNIX_READ
+IPC__UNIX_WRITE = _selinux.IPC__UNIX_WRITE
+SEM__CREATE = _selinux.SEM__CREATE
+SEM__DESTROY = _selinux.SEM__DESTROY
+SEM__GETATTR = _selinux.SEM__GETATTR
+SEM__SETATTR = _selinux.SEM__SETATTR
+SEM__READ = _selinux.SEM__READ
+SEM__WRITE = _selinux.SEM__WRITE
+SEM__ASSOCIATE = _selinux.SEM__ASSOCIATE
+SEM__UNIX_READ = _selinux.SEM__UNIX_READ
+SEM__UNIX_WRITE = _selinux.SEM__UNIX_WRITE
+MSGQ__CREATE = _selinux.MSGQ__CREATE
+MSGQ__DESTROY = _selinux.MSGQ__DESTROY
+MSGQ__GETATTR = _selinux.MSGQ__GETATTR
+MSGQ__SETATTR = _selinux.MSGQ__SETATTR
+MSGQ__READ = _selinux.MSGQ__READ
+MSGQ__WRITE = _selinux.MSGQ__WRITE
+MSGQ__ASSOCIATE = _selinux.MSGQ__ASSOCIATE
+MSGQ__UNIX_READ = _selinux.MSGQ__UNIX_READ
+MSGQ__UNIX_WRITE = _selinux.MSGQ__UNIX_WRITE
+MSGQ__ENQUEUE = _selinux.MSGQ__ENQUEUE
+MSG__SEND = _selinux.MSG__SEND
+MSG__RECEIVE = _selinux.MSG__RECEIVE
+SHM__CREATE = _selinux.SHM__CREATE
+SHM__DESTROY = _selinux.SHM__DESTROY
+SHM__GETATTR = _selinux.SHM__GETATTR
+SHM__SETATTR = _selinux.SHM__SETATTR
+SHM__READ = _selinux.SHM__READ
+SHM__WRITE = _selinux.SHM__WRITE
+SHM__ASSOCIATE = _selinux.SHM__ASSOCIATE
+SHM__UNIX_READ = _selinux.SHM__UNIX_READ
+SHM__UNIX_WRITE = _selinux.SHM__UNIX_WRITE
+SHM__LOCK = _selinux.SHM__LOCK
+SECURITY__COMPUTE_AV = _selinux.SECURITY__COMPUTE_AV
+SECURITY__COMPUTE_CREATE = _selinux.SECURITY__COMPUTE_CREATE
+SECURITY__COMPUTE_MEMBER = _selinux.SECURITY__COMPUTE_MEMBER
+SECURITY__CHECK_CONTEXT = _selinux.SECURITY__CHECK_CONTEXT
+SECURITY__LOAD_POLICY = _selinux.SECURITY__LOAD_POLICY
+SECURITY__COMPUTE_RELABEL = _selinux.SECURITY__COMPUTE_RELABEL
+SECURITY__COMPUTE_USER = _selinux.SECURITY__COMPUTE_USER
+SECURITY__SETENFORCE = _selinux.SECURITY__SETENFORCE
+SECURITY__SETBOOL = _selinux.SECURITY__SETBOOL
+SECURITY__SETSECPARAM = _selinux.SECURITY__SETSECPARAM
+SECURITY__SETCHECKREQPROT = _selinux.SECURITY__SETCHECKREQPROT
+SYSTEM__IPC_INFO = _selinux.SYSTEM__IPC_INFO
+SYSTEM__SYSLOG_READ = _selinux.SYSTEM__SYSLOG_READ
+SYSTEM__SYSLOG_MOD = _selinux.SYSTEM__SYSLOG_MOD
+SYSTEM__SYSLOG_CONSOLE = _selinux.SYSTEM__SYSLOG_CONSOLE
+CAPABILITY__CHOWN = _selinux.CAPABILITY__CHOWN
+CAPABILITY__DAC_OVERRIDE = _selinux.CAPABILITY__DAC_OVERRIDE
+CAPABILITY__DAC_READ_SEARCH = _selinux.CAPABILITY__DAC_READ_SEARCH
+CAPABILITY__FOWNER = _selinux.CAPABILITY__FOWNER
+CAPABILITY__FSETID = _selinux.CAPABILITY__FSETID
+CAPABILITY__KILL = _selinux.CAPABILITY__KILL
+CAPABILITY__SETGID = _selinux.CAPABILITY__SETGID
+CAPABILITY__SETUID = _selinux.CAPABILITY__SETUID
+CAPABILITY__SETPCAP = _selinux.CAPABILITY__SETPCAP
+CAPABILITY__LINUX_IMMUTABLE = _selinux.CAPABILITY__LINUX_IMMUTABLE
+CAPABILITY__NET_BIND_SERVICE = _selinux.CAPABILITY__NET_BIND_SERVICE
+CAPABILITY__NET_BROADCAST = _selinux.CAPABILITY__NET_BROADCAST
+CAPABILITY__NET_ADMIN = _selinux.CAPABILITY__NET_ADMIN
+CAPABILITY__NET_RAW = _selinux.CAPABILITY__NET_RAW
+CAPABILITY__IPC_LOCK = _selinux.CAPABILITY__IPC_LOCK
+CAPABILITY__IPC_OWNER = _selinux.CAPABILITY__IPC_OWNER
+CAPABILITY__SYS_MODULE = _selinux.CAPABILITY__SYS_MODULE
+CAPABILITY__SYS_RAWIO = _selinux.CAPABILITY__SYS_RAWIO
+CAPABILITY__SYS_CHROOT = _selinux.CAPABILITY__SYS_CHROOT
+CAPABILITY__SYS_PTRACE = _selinux.CAPABILITY__SYS_PTRACE
+CAPABILITY__SYS_PACCT = _selinux.CAPABILITY__SYS_PACCT
+CAPABILITY__SYS_ADMIN = _selinux.CAPABILITY__SYS_ADMIN
+CAPABILITY__SYS_BOOT = _selinux.CAPABILITY__SYS_BOOT
+CAPABILITY__SYS_NICE = _selinux.CAPABILITY__SYS_NICE
+CAPABILITY__SYS_RESOURCE = _selinux.CAPABILITY__SYS_RESOURCE
+CAPABILITY__SYS_TIME = _selinux.CAPABILITY__SYS_TIME
+CAPABILITY__SYS_TTY_CONFIG = _selinux.CAPABILITY__SYS_TTY_CONFIG
+CAPABILITY__MKNOD = _selinux.CAPABILITY__MKNOD
+CAPABILITY__LEASE = _selinux.CAPABILITY__LEASE
+CAPABILITY__AUDIT_WRITE = _selinux.CAPABILITY__AUDIT_WRITE
+CAPABILITY__AUDIT_CONTROL = _selinux.CAPABILITY__AUDIT_CONTROL
+CAPABILITY__SETFCAP = _selinux.CAPABILITY__SETFCAP
+CAPABILITY2__MAC_OVERRIDE = _selinux.CAPABILITY2__MAC_OVERRIDE
+CAPABILITY2__MAC_ADMIN = _selinux.CAPABILITY2__MAC_ADMIN
+PASSWD__PASSWD = _selinux.PASSWD__PASSWD
+PASSWD__CHFN = _selinux.PASSWD__CHFN
+PASSWD__CHSH = _selinux.PASSWD__CHSH
+PASSWD__ROOTOK = _selinux.PASSWD__ROOTOK
+PASSWD__CRONTAB = _selinux.PASSWD__CRONTAB
+X_DRAWABLE__CREATE = _selinux.X_DRAWABLE__CREATE
+X_DRAWABLE__DESTROY = _selinux.X_DRAWABLE__DESTROY
+X_DRAWABLE__READ = _selinux.X_DRAWABLE__READ
+X_DRAWABLE__WRITE = _selinux.X_DRAWABLE__WRITE
+X_DRAWABLE__BLEND = _selinux.X_DRAWABLE__BLEND
+X_DRAWABLE__GETATTR = _selinux.X_DRAWABLE__GETATTR
+X_DRAWABLE__SETATTR = _selinux.X_DRAWABLE__SETATTR
+X_DRAWABLE__LIST_CHILD = _selinux.X_DRAWABLE__LIST_CHILD
+X_DRAWABLE__ADD_CHILD = _selinux.X_DRAWABLE__ADD_CHILD
+X_DRAWABLE__REMOVE_CHILD = _selinux.X_DRAWABLE__REMOVE_CHILD
+X_DRAWABLE__LIST_PROPERTY = _selinux.X_DRAWABLE__LIST_PROPERTY
+X_DRAWABLE__GET_PROPERTY = _selinux.X_DRAWABLE__GET_PROPERTY
+X_DRAWABLE__SET_PROPERTY = _selinux.X_DRAWABLE__SET_PROPERTY
+X_DRAWABLE__MANAGE = _selinux.X_DRAWABLE__MANAGE
+X_DRAWABLE__OVERRIDE = _selinux.X_DRAWABLE__OVERRIDE
+X_DRAWABLE__SHOW = _selinux.X_DRAWABLE__SHOW
+X_DRAWABLE__HIDE = _selinux.X_DRAWABLE__HIDE
+X_DRAWABLE__SEND = _selinux.X_DRAWABLE__SEND
+X_DRAWABLE__RECEIVE = _selinux.X_DRAWABLE__RECEIVE
+X_SCREEN__GETATTR = _selinux.X_SCREEN__GETATTR
+X_SCREEN__SETATTR = _selinux.X_SCREEN__SETATTR
+X_SCREEN__HIDE_CURSOR = _selinux.X_SCREEN__HIDE_CURSOR
+X_SCREEN__SHOW_CURSOR = _selinux.X_SCREEN__SHOW_CURSOR
+X_SCREEN__SAVER_GETATTR = _selinux.X_SCREEN__SAVER_GETATTR
+X_SCREEN__SAVER_SETATTR = _selinux.X_SCREEN__SAVER_SETATTR
+X_SCREEN__SAVER_HIDE = _selinux.X_SCREEN__SAVER_HIDE
+X_SCREEN__SAVER_SHOW = _selinux.X_SCREEN__SAVER_SHOW
+X_GC__CREATE = _selinux.X_GC__CREATE
+X_GC__DESTROY = _selinux.X_GC__DESTROY
+X_GC__GETATTR = _selinux.X_GC__GETATTR
+X_GC__SETATTR = _selinux.X_GC__SETATTR
+X_GC__USE = _selinux.X_GC__USE
+X_FONT__CREATE = _selinux.X_FONT__CREATE
+X_FONT__DESTROY = _selinux.X_FONT__DESTROY
+X_FONT__GETATTR = _selinux.X_FONT__GETATTR
+X_FONT__ADD_GLYPH = _selinux.X_FONT__ADD_GLYPH
+X_FONT__REMOVE_GLYPH = _selinux.X_FONT__REMOVE_GLYPH
+X_FONT__USE = _selinux.X_FONT__USE
+X_COLORMAP__CREATE = _selinux.X_COLORMAP__CREATE
+X_COLORMAP__DESTROY = _selinux.X_COLORMAP__DESTROY
+X_COLORMAP__READ = _selinux.X_COLORMAP__READ
+X_COLORMAP__WRITE = _selinux.X_COLORMAP__WRITE
+X_COLORMAP__GETATTR = _selinux.X_COLORMAP__GETATTR
+X_COLORMAP__ADD_COLOR = _selinux.X_COLORMAP__ADD_COLOR
+X_COLORMAP__REMOVE_COLOR = _selinux.X_COLORMAP__REMOVE_COLOR
+X_COLORMAP__INSTALL = _selinux.X_COLORMAP__INSTALL
+X_COLORMAP__UNINSTALL = _selinux.X_COLORMAP__UNINSTALL
+X_COLORMAP__USE = _selinux.X_COLORMAP__USE
+X_PROPERTY__CREATE = _selinux.X_PROPERTY__CREATE
+X_PROPERTY__DESTROY = _selinux.X_PROPERTY__DESTROY
+X_PROPERTY__READ = _selinux.X_PROPERTY__READ
+X_PROPERTY__WRITE = _selinux.X_PROPERTY__WRITE
+X_PROPERTY__APPEND = _selinux.X_PROPERTY__APPEND
+X_PROPERTY__GETATTR = _selinux.X_PROPERTY__GETATTR
+X_PROPERTY__SETATTR = _selinux.X_PROPERTY__SETATTR
+X_SELECTION__READ = _selinux.X_SELECTION__READ
+X_SELECTION__WRITE = _selinux.X_SELECTION__WRITE
+X_SELECTION__GETATTR = _selinux.X_SELECTION__GETATTR
+X_SELECTION__SETATTR = _selinux.X_SELECTION__SETATTR
+X_CURSOR__CREATE = _selinux.X_CURSOR__CREATE
+X_CURSOR__DESTROY = _selinux.X_CURSOR__DESTROY
+X_CURSOR__READ = _selinux.X_CURSOR__READ
+X_CURSOR__WRITE = _selinux.X_CURSOR__WRITE
+X_CURSOR__GETATTR = _selinux.X_CURSOR__GETATTR
+X_CURSOR__SETATTR = _selinux.X_CURSOR__SETATTR
+X_CURSOR__USE = _selinux.X_CURSOR__USE
+X_CLIENT__DESTROY = _selinux.X_CLIENT__DESTROY
+X_CLIENT__GETATTR = _selinux.X_CLIENT__GETATTR
+X_CLIENT__SETATTR = _selinux.X_CLIENT__SETATTR
+X_CLIENT__MANAGE = _selinux.X_CLIENT__MANAGE
+X_DEVICE__GETATTR = _selinux.X_DEVICE__GETATTR
+X_DEVICE__SETATTR = _selinux.X_DEVICE__SETATTR
+X_DEVICE__USE = _selinux.X_DEVICE__USE
+X_DEVICE__READ = _selinux.X_DEVICE__READ
+X_DEVICE__WRITE = _selinux.X_DEVICE__WRITE
+X_DEVICE__GETFOCUS = _selinux.X_DEVICE__GETFOCUS
+X_DEVICE__SETFOCUS = _selinux.X_DEVICE__SETFOCUS
+X_DEVICE__BELL = _selinux.X_DEVICE__BELL
+X_DEVICE__FORCE_CURSOR = _selinux.X_DEVICE__FORCE_CURSOR
+X_DEVICE__FREEZE = _selinux.X_DEVICE__FREEZE
+X_DEVICE__GRAB = _selinux.X_DEVICE__GRAB
+X_DEVICE__MANAGE = _selinux.X_DEVICE__MANAGE
+X_SERVER__GETATTR = _selinux.X_SERVER__GETATTR
+X_SERVER__SETATTR = _selinux.X_SERVER__SETATTR
+X_SERVER__RECORD = _selinux.X_SERVER__RECORD
+X_SERVER__DEBUG = _selinux.X_SERVER__DEBUG
+X_SERVER__GRAB = _selinux.X_SERVER__GRAB
+X_SERVER__MANAGE = _selinux.X_SERVER__MANAGE
+X_EXTENSION__QUERY = _selinux.X_EXTENSION__QUERY
+X_EXTENSION__USE = _selinux.X_EXTENSION__USE
+X_RESOURCE__READ = _selinux.X_RESOURCE__READ
+X_RESOURCE__WRITE = _selinux.X_RESOURCE__WRITE
+X_EVENT__SEND = _selinux.X_EVENT__SEND
+X_EVENT__RECEIVE = _selinux.X_EVENT__RECEIVE
+X_SYNTHETIC_EVENT__SEND = _selinux.X_SYNTHETIC_EVENT__SEND
+X_SYNTHETIC_EVENT__RECEIVE = _selinux.X_SYNTHETIC_EVENT__RECEIVE
+NETLINK_ROUTE_SOCKET__IOCTL = _selinux.NETLINK_ROUTE_SOCKET__IOCTL
+NETLINK_ROUTE_SOCKET__READ = _selinux.NETLINK_ROUTE_SOCKET__READ
+NETLINK_ROUTE_SOCKET__WRITE = _selinux.NETLINK_ROUTE_SOCKET__WRITE
+NETLINK_ROUTE_SOCKET__CREATE = _selinux.NETLINK_ROUTE_SOCKET__CREATE
+NETLINK_ROUTE_SOCKET__GETATTR = _selinux.NETLINK_ROUTE_SOCKET__GETATTR
+NETLINK_ROUTE_SOCKET__SETATTR = _selinux.NETLINK_ROUTE_SOCKET__SETATTR
+NETLINK_ROUTE_SOCKET__LOCK = _selinux.NETLINK_ROUTE_SOCKET__LOCK
+NETLINK_ROUTE_SOCKET__RELABELFROM = _selinux.NETLINK_ROUTE_SOCKET__RELABELFROM
+NETLINK_ROUTE_SOCKET__RELABELTO = _selinux.NETLINK_ROUTE_SOCKET__RELABELTO
+NETLINK_ROUTE_SOCKET__APPEND = _selinux.NETLINK_ROUTE_SOCKET__APPEND
+NETLINK_ROUTE_SOCKET__BIND = _selinux.NETLINK_ROUTE_SOCKET__BIND
+NETLINK_ROUTE_SOCKET__CONNECT = _selinux.NETLINK_ROUTE_SOCKET__CONNECT
+NETLINK_ROUTE_SOCKET__LISTEN = _selinux.NETLINK_ROUTE_SOCKET__LISTEN
+NETLINK_ROUTE_SOCKET__ACCEPT = _selinux.NETLINK_ROUTE_SOCKET__ACCEPT
+NETLINK_ROUTE_SOCKET__GETOPT = _selinux.NETLINK_ROUTE_SOCKET__GETOPT
+NETLINK_ROUTE_SOCKET__SETOPT = _selinux.NETLINK_ROUTE_SOCKET__SETOPT
+NETLINK_ROUTE_SOCKET__SHUTDOWN = _selinux.NETLINK_ROUTE_SOCKET__SHUTDOWN
+NETLINK_ROUTE_SOCKET__RECVFROM = _selinux.NETLINK_ROUTE_SOCKET__RECVFROM
+NETLINK_ROUTE_SOCKET__SENDTO = _selinux.NETLINK_ROUTE_SOCKET__SENDTO
+NETLINK_ROUTE_SOCKET__RECV_MSG = _selinux.NETLINK_ROUTE_SOCKET__RECV_MSG
+NETLINK_ROUTE_SOCKET__SEND_MSG = _selinux.NETLINK_ROUTE_SOCKET__SEND_MSG
+NETLINK_ROUTE_SOCKET__NAME_BIND = _selinux.NETLINK_ROUTE_SOCKET__NAME_BIND
+NETLINK_ROUTE_SOCKET__NLMSG_READ = _selinux.NETLINK_ROUTE_SOCKET__NLMSG_READ
+NETLINK_ROUTE_SOCKET__NLMSG_WRITE = _selinux.NETLINK_ROUTE_SOCKET__NLMSG_WRITE
+NETLINK_FIREWALL_SOCKET__IOCTL = _selinux.NETLINK_FIREWALL_SOCKET__IOCTL
+NETLINK_FIREWALL_SOCKET__READ = _selinux.NETLINK_FIREWALL_SOCKET__READ
+NETLINK_FIREWALL_SOCKET__WRITE = _selinux.NETLINK_FIREWALL_SOCKET__WRITE
+NETLINK_FIREWALL_SOCKET__CREATE = _selinux.NETLINK_FIREWALL_SOCKET__CREATE
+NETLINK_FIREWALL_SOCKET__GETATTR = _selinux.NETLINK_FIREWALL_SOCKET__GETATTR
+NETLINK_FIREWALL_SOCKET__SETATTR = _selinux.NETLINK_FIREWALL_SOCKET__SETATTR
+NETLINK_FIREWALL_SOCKET__LOCK = _selinux.NETLINK_FIREWALL_SOCKET__LOCK
+NETLINK_FIREWALL_SOCKET__RELABELFROM = _selinux.NETLINK_FIREWALL_SOCKET__RELABELFROM
+NETLINK_FIREWALL_SOCKET__RELABELTO = _selinux.NETLINK_FIREWALL_SOCKET__RELABELTO
+NETLINK_FIREWALL_SOCKET__APPEND = _selinux.NETLINK_FIREWALL_SOCKET__APPEND
+NETLINK_FIREWALL_SOCKET__BIND = _selinux.NETLINK_FIREWALL_SOCKET__BIND
+NETLINK_FIREWALL_SOCKET__CONNECT = _selinux.NETLINK_FIREWALL_SOCKET__CONNECT
+NETLINK_FIREWALL_SOCKET__LISTEN = _selinux.NETLINK_FIREWALL_SOCKET__LISTEN
+NETLINK_FIREWALL_SOCKET__ACCEPT = _selinux.NETLINK_FIREWALL_SOCKET__ACCEPT
+NETLINK_FIREWALL_SOCKET__GETOPT = _selinux.NETLINK_FIREWALL_SOCKET__GETOPT
+NETLINK_FIREWALL_SOCKET__SETOPT = _selinux.NETLINK_FIREWALL_SOCKET__SETOPT
+NETLINK_FIREWALL_SOCKET__SHUTDOWN = _selinux.NETLINK_FIREWALL_SOCKET__SHUTDOWN
+NETLINK_FIREWALL_SOCKET__RECVFROM = _selinux.NETLINK_FIREWALL_SOCKET__RECVFROM
+NETLINK_FIREWALL_SOCKET__SENDTO = _selinux.NETLINK_FIREWALL_SOCKET__SENDTO
+NETLINK_FIREWALL_SOCKET__RECV_MSG = _selinux.NETLINK_FIREWALL_SOCKET__RECV_MSG
+NETLINK_FIREWALL_SOCKET__SEND_MSG = _selinux.NETLINK_FIREWALL_SOCKET__SEND_MSG
+NETLINK_FIREWALL_SOCKET__NAME_BIND = _selinux.NETLINK_FIREWALL_SOCKET__NAME_BIND
+NETLINK_FIREWALL_SOCKET__NLMSG_READ = _selinux.NETLINK_FIREWALL_SOCKET__NLMSG_READ
+NETLINK_FIREWALL_SOCKET__NLMSG_WRITE = _selinux.NETLINK_FIREWALL_SOCKET__NLMSG_WRITE
+NETLINK_TCPDIAG_SOCKET__IOCTL = _selinux.NETLINK_TCPDIAG_SOCKET__IOCTL
+NETLINK_TCPDIAG_SOCKET__READ = _selinux.NETLINK_TCPDIAG_SOCKET__READ
+NETLINK_TCPDIAG_SOCKET__WRITE = _selinux.NETLINK_TCPDIAG_SOCKET__WRITE
+NETLINK_TCPDIAG_SOCKET__CREATE = _selinux.NETLINK_TCPDIAG_SOCKET__CREATE
+NETLINK_TCPDIAG_SOCKET__GETATTR = _selinux.NETLINK_TCPDIAG_SOCKET__GETATTR
+NETLINK_TCPDIAG_SOCKET__SETATTR = _selinux.NETLINK_TCPDIAG_SOCKET__SETATTR
+NETLINK_TCPDIAG_SOCKET__LOCK = _selinux.NETLINK_TCPDIAG_SOCKET__LOCK
+NETLINK_TCPDIAG_SOCKET__RELABELFROM = _selinux.NETLINK_TCPDIAG_SOCKET__RELABELFROM
+NETLINK_TCPDIAG_SOCKET__RELABELTO = _selinux.NETLINK_TCPDIAG_SOCKET__RELABELTO
+NETLINK_TCPDIAG_SOCKET__APPEND = _selinux.NETLINK_TCPDIAG_SOCKET__APPEND
+NETLINK_TCPDIAG_SOCKET__BIND = _selinux.NETLINK_TCPDIAG_SOCKET__BIND
+NETLINK_TCPDIAG_SOCKET__CONNECT = _selinux.NETLINK_TCPDIAG_SOCKET__CONNECT
+NETLINK_TCPDIAG_SOCKET__LISTEN = _selinux.NETLINK_TCPDIAG_SOCKET__LISTEN
+NETLINK_TCPDIAG_SOCKET__ACCEPT = _selinux.NETLINK_TCPDIAG_SOCKET__ACCEPT
+NETLINK_TCPDIAG_SOCKET__GETOPT = _selinux.NETLINK_TCPDIAG_SOCKET__GETOPT
+NETLINK_TCPDIAG_SOCKET__SETOPT = _selinux.NETLINK_TCPDIAG_SOCKET__SETOPT
+NETLINK_TCPDIAG_SOCKET__SHUTDOWN = _selinux.NETLINK_TCPDIAG_SOCKET__SHUTDOWN
+NETLINK_TCPDIAG_SOCKET__RECVFROM = _selinux.NETLINK_TCPDIAG_SOCKET__RECVFROM
+NETLINK_TCPDIAG_SOCKET__SENDTO = _selinux.NETLINK_TCPDIAG_SOCKET__SENDTO
+NETLINK_TCPDIAG_SOCKET__RECV_MSG = _selinux.NETLINK_TCPDIAG_SOCKET__RECV_MSG
+NETLINK_TCPDIAG_SOCKET__SEND_MSG = _selinux.NETLINK_TCPDIAG_SOCKET__SEND_MSG
+NETLINK_TCPDIAG_SOCKET__NAME_BIND = _selinux.NETLINK_TCPDIAG_SOCKET__NAME_BIND
+NETLINK_TCPDIAG_SOCKET__NLMSG_READ = _selinux.NETLINK_TCPDIAG_SOCKET__NLMSG_READ
+NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE = _selinux.NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE
+NETLINK_NFLOG_SOCKET__IOCTL = _selinux.NETLINK_NFLOG_SOCKET__IOCTL
+NETLINK_NFLOG_SOCKET__READ = _selinux.NETLINK_NFLOG_SOCKET__READ
+NETLINK_NFLOG_SOCKET__WRITE = _selinux.NETLINK_NFLOG_SOCKET__WRITE
+NETLINK_NFLOG_SOCKET__CREATE = _selinux.NETLINK_NFLOG_SOCKET__CREATE
+NETLINK_NFLOG_SOCKET__GETATTR = _selinux.NETLINK_NFLOG_SOCKET__GETATTR
+NETLINK_NFLOG_SOCKET__SETATTR = _selinux.NETLINK_NFLOG_SOCKET__SETATTR
+NETLINK_NFLOG_SOCKET__LOCK = _selinux.NETLINK_NFLOG_SOCKET__LOCK
+NETLINK_NFLOG_SOCKET__RELABELFROM = _selinux.NETLINK_NFLOG_SOCKET__RELABELFROM
+NETLINK_NFLOG_SOCKET__RELABELTO = _selinux.NETLINK_NFLOG_SOCKET__RELABELTO
+NETLINK_NFLOG_SOCKET__APPEND = _selinux.NETLINK_NFLOG_SOCKET__APPEND
+NETLINK_NFLOG_SOCKET__BIND = _selinux.NETLINK_NFLOG_SOCKET__BIND
+NETLINK_NFLOG_SOCKET__CONNECT = _selinux.NETLINK_NFLOG_SOCKET__CONNECT
+NETLINK_NFLOG_SOCKET__LISTEN = _selinux.NETLINK_NFLOG_SOCKET__LISTEN
+NETLINK_NFLOG_SOCKET__ACCEPT = _selinux.NETLINK_NFLOG_SOCKET__ACCEPT
+NETLINK_NFLOG_SOCKET__GETOPT = _selinux.NETLINK_NFLOG_SOCKET__GETOPT
+NETLINK_NFLOG_SOCKET__SETOPT = _selinux.NETLINK_NFLOG_SOCKET__SETOPT
+NETLINK_NFLOG_SOCKET__SHUTDOWN = _selinux.NETLINK_NFLOG_SOCKET__SHUTDOWN
+NETLINK_NFLOG_SOCKET__RECVFROM = _selinux.NETLINK_NFLOG_SOCKET__RECVFROM
+NETLINK_NFLOG_SOCKET__SENDTO = _selinux.NETLINK_NFLOG_SOCKET__SENDTO
+NETLINK_NFLOG_SOCKET__RECV_MSG = _selinux.NETLINK_NFLOG_SOCKET__RECV_MSG
+NETLINK_NFLOG_SOCKET__SEND_MSG = _selinux.NETLINK_NFLOG_SOCKET__SEND_MSG
+NETLINK_NFLOG_SOCKET__NAME_BIND = _selinux.NETLINK_NFLOG_SOCKET__NAME_BIND
+NETLINK_XFRM_SOCKET__IOCTL = _selinux.NETLINK_XFRM_SOCKET__IOCTL
+NETLINK_XFRM_SOCKET__READ = _selinux.NETLINK_XFRM_SOCKET__READ
+NETLINK_XFRM_SOCKET__WRITE = _selinux.NETLINK_XFRM_SOCKET__WRITE
+NETLINK_XFRM_SOCKET__CREATE = _selinux.NETLINK_XFRM_SOCKET__CREATE
+NETLINK_XFRM_SOCKET__GETATTR = _selinux.NETLINK_XFRM_SOCKET__GETATTR
+NETLINK_XFRM_SOCKET__SETATTR = _selinux.NETLINK_XFRM_SOCKET__SETATTR
+NETLINK_XFRM_SOCKET__LOCK = _selinux.NETLINK_XFRM_SOCKET__LOCK
+NETLINK_XFRM_SOCKET__RELABELFROM = _selinux.NETLINK_XFRM_SOCKET__RELABELFROM
+NETLINK_XFRM_SOCKET__RELABELTO = _selinux.NETLINK_XFRM_SOCKET__RELABELTO
+NETLINK_XFRM_SOCKET__APPEND = _selinux.NETLINK_XFRM_SOCKET__APPEND
+NETLINK_XFRM_SOCKET__BIND = _selinux.NETLINK_XFRM_SOCKET__BIND
+NETLINK_XFRM_SOCKET__CONNECT = _selinux.NETLINK_XFRM_SOCKET__CONNECT
+NETLINK_XFRM_SOCKET__LISTEN = _selinux.NETLINK_XFRM_SOCKET__LISTEN
+NETLINK_XFRM_SOCKET__ACCEPT = _selinux.NETLINK_XFRM_SOCKET__ACCEPT
+NETLINK_XFRM_SOCKET__GETOPT = _selinux.NETLINK_XFRM_SOCKET__GETOPT
+NETLINK_XFRM_SOCKET__SETOPT = _selinux.NETLINK_XFRM_SOCKET__SETOPT
+NETLINK_XFRM_SOCKET__SHUTDOWN = _selinux.NETLINK_XFRM_SOCKET__SHUTDOWN
+NETLINK_XFRM_SOCKET__RECVFROM = _selinux.NETLINK_XFRM_SOCKET__RECVFROM
+NETLINK_XFRM_SOCKET__SENDTO = _selinux.NETLINK_XFRM_SOCKET__SENDTO
+NETLINK_XFRM_SOCKET__RECV_MSG = _selinux.NETLINK_XFRM_SOCKET__RECV_MSG
+NETLINK_XFRM_SOCKET__SEND_MSG = _selinux.NETLINK_XFRM_SOCKET__SEND_MSG
+NETLINK_XFRM_SOCKET__NAME_BIND = _selinux.NETLINK_XFRM_SOCKET__NAME_BIND
+NETLINK_XFRM_SOCKET__NLMSG_READ = _selinux.NETLINK_XFRM_SOCKET__NLMSG_READ
+NETLINK_XFRM_SOCKET__NLMSG_WRITE = _selinux.NETLINK_XFRM_SOCKET__NLMSG_WRITE
+NETLINK_SELINUX_SOCKET__IOCTL = _selinux.NETLINK_SELINUX_SOCKET__IOCTL
+NETLINK_SELINUX_SOCKET__READ = _selinux.NETLINK_SELINUX_SOCKET__READ
+NETLINK_SELINUX_SOCKET__WRITE = _selinux.NETLINK_SELINUX_SOCKET__WRITE
+NETLINK_SELINUX_SOCKET__CREATE = _selinux.NETLINK_SELINUX_SOCKET__CREATE
+NETLINK_SELINUX_SOCKET__GETATTR = _selinux.NETLINK_SELINUX_SOCKET__GETATTR
+NETLINK_SELINUX_SOCKET__SETATTR = _selinux.NETLINK_SELINUX_SOCKET__SETATTR
+NETLINK_SELINUX_SOCKET__LOCK = _selinux.NETLINK_SELINUX_SOCKET__LOCK
+NETLINK_SELINUX_SOCKET__RELABELFROM = _selinux.NETLINK_SELINUX_SOCKET__RELABELFROM
+NETLINK_SELINUX_SOCKET__RELABELTO = _selinux.NETLINK_SELINUX_SOCKET__RELABELTO
+NETLINK_SELINUX_SOCKET__APPEND = _selinux.NETLINK_SELINUX_SOCKET__APPEND
+NETLINK_SELINUX_SOCKET__BIND = _selinux.NETLINK_SELINUX_SOCKET__BIND
+NETLINK_SELINUX_SOCKET__CONNECT = _selinux.NETLINK_SELINUX_SOCKET__CONNECT
+NETLINK_SELINUX_SOCKET__LISTEN = _selinux.NETLINK_SELINUX_SOCKET__LISTEN
+NETLINK_SELINUX_SOCKET__ACCEPT = _selinux.NETLINK_SELINUX_SOCKET__ACCEPT
+NETLINK_SELINUX_SOCKET__GETOPT = _selinux.NETLINK_SELINUX_SOCKET__GETOPT
+NETLINK_SELINUX_SOCKET__SETOPT = _selinux.NETLINK_SELINUX_SOCKET__SETOPT
+NETLINK_SELINUX_SOCKET__SHUTDOWN = _selinux.NETLINK_SELINUX_SOCKET__SHUTDOWN
+NETLINK_SELINUX_SOCKET__RECVFROM = _selinux.NETLINK_SELINUX_SOCKET__RECVFROM
+NETLINK_SELINUX_SOCKET__SENDTO = _selinux.NETLINK_SELINUX_SOCKET__SENDTO
+NETLINK_SELINUX_SOCKET__RECV_MSG = _selinux.NETLINK_SELINUX_SOCKET__RECV_MSG
+NETLINK_SELINUX_SOCKET__SEND_MSG = _selinux.NETLINK_SELINUX_SOCKET__SEND_MSG
+NETLINK_SELINUX_SOCKET__NAME_BIND = _selinux.NETLINK_SELINUX_SOCKET__NAME_BIND
+NETLINK_AUDIT_SOCKET__IOCTL = _selinux.NETLINK_AUDIT_SOCKET__IOCTL
+NETLINK_AUDIT_SOCKET__READ = _selinux.NETLINK_AUDIT_SOCKET__READ
+NETLINK_AUDIT_SOCKET__WRITE = _selinux.NETLINK_AUDIT_SOCKET__WRITE
+NETLINK_AUDIT_SOCKET__CREATE = _selinux.NETLINK_AUDIT_SOCKET__CREATE
+NETLINK_AUDIT_SOCKET__GETATTR = _selinux.NETLINK_AUDIT_SOCKET__GETATTR
+NETLINK_AUDIT_SOCKET__SETATTR = _selinux.NETLINK_AUDIT_SOCKET__SETATTR
+NETLINK_AUDIT_SOCKET__LOCK = _selinux.NETLINK_AUDIT_SOCKET__LOCK
+NETLINK_AUDIT_SOCKET__RELABELFROM = _selinux.NETLINK_AUDIT_SOCKET__RELABELFROM
+NETLINK_AUDIT_SOCKET__RELABELTO = _selinux.NETLINK_AUDIT_SOCKET__RELABELTO
+NETLINK_AUDIT_SOCKET__APPEND = _selinux.NETLINK_AUDIT_SOCKET__APPEND
+NETLINK_AUDIT_SOCKET__BIND = _selinux.NETLINK_AUDIT_SOCKET__BIND
+NETLINK_AUDIT_SOCKET__CONNECT = _selinux.NETLINK_AUDIT_SOCKET__CONNECT
+NETLINK_AUDIT_SOCKET__LISTEN = _selinux.NETLINK_AUDIT_SOCKET__LISTEN
+NETLINK_AUDIT_SOCKET__ACCEPT = _selinux.NETLINK_AUDIT_SOCKET__ACCEPT
+NETLINK_AUDIT_SOCKET__GETOPT = _selinux.NETLINK_AUDIT_SOCKET__GETOPT
+NETLINK_AUDIT_SOCKET__SETOPT = _selinux.NETLINK_AUDIT_SOCKET__SETOPT
+NETLINK_AUDIT_SOCKET__SHUTDOWN = _selinux.NETLINK_AUDIT_SOCKET__SHUTDOWN
+NETLINK_AUDIT_SOCKET__RECVFROM = _selinux.NETLINK_AUDIT_SOCKET__RECVFROM
+NETLINK_AUDIT_SOCKET__SENDTO = _selinux.NETLINK_AUDIT_SOCKET__SENDTO
+NETLINK_AUDIT_SOCKET__RECV_MSG = _selinux.NETLINK_AUDIT_SOCKET__RECV_MSG
+NETLINK_AUDIT_SOCKET__SEND_MSG = _selinux.NETLINK_AUDIT_SOCKET__SEND_MSG
+NETLINK_AUDIT_SOCKET__NAME_BIND = _selinux.NETLINK_AUDIT_SOCKET__NAME_BIND
+NETLINK_AUDIT_SOCKET__NLMSG_READ = _selinux.NETLINK_AUDIT_SOCKET__NLMSG_READ
+NETLINK_AUDIT_SOCKET__NLMSG_WRITE = _selinux.NETLINK_AUDIT_SOCKET__NLMSG_WRITE
+NETLINK_AUDIT_SOCKET__NLMSG_RELAY = _selinux.NETLINK_AUDIT_SOCKET__NLMSG_RELAY
+NETLINK_AUDIT_SOCKET__NLMSG_READPRIV = _selinux.NETLINK_AUDIT_SOCKET__NLMSG_READPRIV
+NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT = _selinux.NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT
+NETLINK_IP6FW_SOCKET__IOCTL = _selinux.NETLINK_IP6FW_SOCKET__IOCTL
+NETLINK_IP6FW_SOCKET__READ = _selinux.NETLINK_IP6FW_SOCKET__READ
+NETLINK_IP6FW_SOCKET__WRITE = _selinux.NETLINK_IP6FW_SOCKET__WRITE
+NETLINK_IP6FW_SOCKET__CREATE = _selinux.NETLINK_IP6FW_SOCKET__CREATE
+NETLINK_IP6FW_SOCKET__GETATTR = _selinux.NETLINK_IP6FW_SOCKET__GETATTR
+NETLINK_IP6FW_SOCKET__SETATTR = _selinux.NETLINK_IP6FW_SOCKET__SETATTR
+NETLINK_IP6FW_SOCKET__LOCK = _selinux.NETLINK_IP6FW_SOCKET__LOCK
+NETLINK_IP6FW_SOCKET__RELABELFROM = _selinux.NETLINK_IP6FW_SOCKET__RELABELFROM
+NETLINK_IP6FW_SOCKET__RELABELTO = _selinux.NETLINK_IP6FW_SOCKET__RELABELTO
+NETLINK_IP6FW_SOCKET__APPEND = _selinux.NETLINK_IP6FW_SOCKET__APPEND
+NETLINK_IP6FW_SOCKET__BIND = _selinux.NETLINK_IP6FW_SOCKET__BIND
+NETLINK_IP6FW_SOCKET__CONNECT = _selinux.NETLINK_IP6FW_SOCKET__CONNECT
+NETLINK_IP6FW_SOCKET__LISTEN = _selinux.NETLINK_IP6FW_SOCKET__LISTEN
+NETLINK_IP6FW_SOCKET__ACCEPT = _selinux.NETLINK_IP6FW_SOCKET__ACCEPT
+NETLINK_IP6FW_SOCKET__GETOPT = _selinux.NETLINK_IP6FW_SOCKET__GETOPT
+NETLINK_IP6FW_SOCKET__SETOPT = _selinux.NETLINK_IP6FW_SOCKET__SETOPT
+NETLINK_IP6FW_SOCKET__SHUTDOWN = _selinux.NETLINK_IP6FW_SOCKET__SHUTDOWN
+NETLINK_IP6FW_SOCKET__RECVFROM = _selinux.NETLINK_IP6FW_SOCKET__RECVFROM
+NETLINK_IP6FW_SOCKET__SENDTO = _selinux.NETLINK_IP6FW_SOCKET__SENDTO
+NETLINK_IP6FW_SOCKET__RECV_MSG = _selinux.NETLINK_IP6FW_SOCKET__RECV_MSG
+NETLINK_IP6FW_SOCKET__SEND_MSG = _selinux.NETLINK_IP6FW_SOCKET__SEND_MSG
+NETLINK_IP6FW_SOCKET__NAME_BIND = _selinux.NETLINK_IP6FW_SOCKET__NAME_BIND
+NETLINK_IP6FW_SOCKET__NLMSG_READ = _selinux.NETLINK_IP6FW_SOCKET__NLMSG_READ
+NETLINK_IP6FW_SOCKET__NLMSG_WRITE = _selinux.NETLINK_IP6FW_SOCKET__NLMSG_WRITE
+NETLINK_DNRT_SOCKET__IOCTL = _selinux.NETLINK_DNRT_SOCKET__IOCTL
+NETLINK_DNRT_SOCKET__READ = _selinux.NETLINK_DNRT_SOCKET__READ
+NETLINK_DNRT_SOCKET__WRITE = _selinux.NETLINK_DNRT_SOCKET__WRITE
+NETLINK_DNRT_SOCKET__CREATE = _selinux.NETLINK_DNRT_SOCKET__CREATE
+NETLINK_DNRT_SOCKET__GETATTR = _selinux.NETLINK_DNRT_SOCKET__GETATTR
+NETLINK_DNRT_SOCKET__SETATTR = _selinux.NETLINK_DNRT_SOCKET__SETATTR
+NETLINK_DNRT_SOCKET__LOCK = _selinux.NETLINK_DNRT_SOCKET__LOCK
+NETLINK_DNRT_SOCKET__RELABELFROM = _selinux.NETLINK_DNRT_SOCKET__RELABELFROM
+NETLINK_DNRT_SOCKET__RELABELTO = _selinux.NETLINK_DNRT_SOCKET__RELABELTO
+NETLINK_DNRT_SOCKET__APPEND = _selinux.NETLINK_DNRT_SOCKET__APPEND
+NETLINK_DNRT_SOCKET__BIND = _selinux.NETLINK_DNRT_SOCKET__BIND
+NETLINK_DNRT_SOCKET__CONNECT = _selinux.NETLINK_DNRT_SOCKET__CONNECT
+NETLINK_DNRT_SOCKET__LISTEN = _selinux.NETLINK_DNRT_SOCKET__LISTEN
+NETLINK_DNRT_SOCKET__ACCEPT = _selinux.NETLINK_DNRT_SOCKET__ACCEPT
+NETLINK_DNRT_SOCKET__GETOPT = _selinux.NETLINK_DNRT_SOCKET__GETOPT
+NETLINK_DNRT_SOCKET__SETOPT = _selinux.NETLINK_DNRT_SOCKET__SETOPT
+NETLINK_DNRT_SOCKET__SHUTDOWN = _selinux.NETLINK_DNRT_SOCKET__SHUTDOWN
+NETLINK_DNRT_SOCKET__RECVFROM = _selinux.NETLINK_DNRT_SOCKET__RECVFROM
+NETLINK_DNRT_SOCKET__SENDTO = _selinux.NETLINK_DNRT_SOCKET__SENDTO
+NETLINK_DNRT_SOCKET__RECV_MSG = _selinux.NETLINK_DNRT_SOCKET__RECV_MSG
+NETLINK_DNRT_SOCKET__SEND_MSG = _selinux.NETLINK_DNRT_SOCKET__SEND_MSG
+NETLINK_DNRT_SOCKET__NAME_BIND = _selinux.NETLINK_DNRT_SOCKET__NAME_BIND
+DBUS__ACQUIRE_SVC = _selinux.DBUS__ACQUIRE_SVC
+DBUS__SEND_MSG = _selinux.DBUS__SEND_MSG
+NSCD__GETPWD = _selinux.NSCD__GETPWD
+NSCD__GETGRP = _selinux.NSCD__GETGRP
+NSCD__GETHOST = _selinux.NSCD__GETHOST
+NSCD__GETSTAT = _selinux.NSCD__GETSTAT
+NSCD__ADMIN = _selinux.NSCD__ADMIN
+NSCD__SHMEMPWD = _selinux.NSCD__SHMEMPWD
+NSCD__SHMEMGRP = _selinux.NSCD__SHMEMGRP
+NSCD__SHMEMHOST = _selinux.NSCD__SHMEMHOST
+NSCD__GETSERV = _selinux.NSCD__GETSERV
+NSCD__SHMEMSERV = _selinux.NSCD__SHMEMSERV
+ASSOCIATION__SENDTO = _selinux.ASSOCIATION__SENDTO
+ASSOCIATION__RECVFROM = _selinux.ASSOCIATION__RECVFROM
+ASSOCIATION__SETCONTEXT = _selinux.ASSOCIATION__SETCONTEXT
+ASSOCIATION__POLMATCH = _selinux.ASSOCIATION__POLMATCH
+NETLINK_KOBJECT_UEVENT_SOCKET__IOCTL = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__IOCTL
+NETLINK_KOBJECT_UEVENT_SOCKET__READ = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__READ
+NETLINK_KOBJECT_UEVENT_SOCKET__WRITE = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__WRITE
+NETLINK_KOBJECT_UEVENT_SOCKET__CREATE = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__CREATE
+NETLINK_KOBJECT_UEVENT_SOCKET__GETATTR = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__GETATTR
+NETLINK_KOBJECT_UEVENT_SOCKET__SETATTR = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__SETATTR
+NETLINK_KOBJECT_UEVENT_SOCKET__LOCK = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__LOCK
+NETLINK_KOBJECT_UEVENT_SOCKET__RELABELFROM = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__RELABELFROM
+NETLINK_KOBJECT_UEVENT_SOCKET__RELABELTO = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__RELABELTO
+NETLINK_KOBJECT_UEVENT_SOCKET__APPEND = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__APPEND
+NETLINK_KOBJECT_UEVENT_SOCKET__BIND = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__BIND
+NETLINK_KOBJECT_UEVENT_SOCKET__CONNECT = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__CONNECT
+NETLINK_KOBJECT_UEVENT_SOCKET__LISTEN = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__LISTEN
+NETLINK_KOBJECT_UEVENT_SOCKET__ACCEPT = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__ACCEPT
+NETLINK_KOBJECT_UEVENT_SOCKET__GETOPT = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__GETOPT
+NETLINK_KOBJECT_UEVENT_SOCKET__SETOPT = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__SETOPT
+NETLINK_KOBJECT_UEVENT_SOCKET__SHUTDOWN = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__SHUTDOWN
+NETLINK_KOBJECT_UEVENT_SOCKET__RECVFROM = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__RECVFROM
+NETLINK_KOBJECT_UEVENT_SOCKET__SENDTO = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__SENDTO
+NETLINK_KOBJECT_UEVENT_SOCKET__RECV_MSG = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__RECV_MSG
+NETLINK_KOBJECT_UEVENT_SOCKET__SEND_MSG = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__SEND_MSG
+NETLINK_KOBJECT_UEVENT_SOCKET__NAME_BIND = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__NAME_BIND
+APPLETALK_SOCKET__IOCTL = _selinux.APPLETALK_SOCKET__IOCTL
+APPLETALK_SOCKET__READ = _selinux.APPLETALK_SOCKET__READ
+APPLETALK_SOCKET__WRITE = _selinux.APPLETALK_SOCKET__WRITE
+APPLETALK_SOCKET__CREATE = _selinux.APPLETALK_SOCKET__CREATE
+APPLETALK_SOCKET__GETATTR = _selinux.APPLETALK_SOCKET__GETATTR
+APPLETALK_SOCKET__SETATTR = _selinux.APPLETALK_SOCKET__SETATTR
+APPLETALK_SOCKET__LOCK = _selinux.APPLETALK_SOCKET__LOCK
+APPLETALK_SOCKET__RELABELFROM = _selinux.APPLETALK_SOCKET__RELABELFROM
+APPLETALK_SOCKET__RELABELTO = _selinux.APPLETALK_SOCKET__RELABELTO
+APPLETALK_SOCKET__APPEND = _selinux.APPLETALK_SOCKET__APPEND
+APPLETALK_SOCKET__BIND = _selinux.APPLETALK_SOCKET__BIND
+APPLETALK_SOCKET__CONNECT = _selinux.APPLETALK_SOCKET__CONNECT
+APPLETALK_SOCKET__LISTEN = _selinux.APPLETALK_SOCKET__LISTEN
+APPLETALK_SOCKET__ACCEPT = _selinux.APPLETALK_SOCKET__ACCEPT
+APPLETALK_SOCKET__GETOPT = _selinux.APPLETALK_SOCKET__GETOPT
+APPLETALK_SOCKET__SETOPT = _selinux.APPLETALK_SOCKET__SETOPT
+APPLETALK_SOCKET__SHUTDOWN = _selinux.APPLETALK_SOCKET__SHUTDOWN
+APPLETALK_SOCKET__RECVFROM = _selinux.APPLETALK_SOCKET__RECVFROM
+APPLETALK_SOCKET__SENDTO = _selinux.APPLETALK_SOCKET__SENDTO
+APPLETALK_SOCKET__RECV_MSG = _selinux.APPLETALK_SOCKET__RECV_MSG
+APPLETALK_SOCKET__SEND_MSG = _selinux.APPLETALK_SOCKET__SEND_MSG
+APPLETALK_SOCKET__NAME_BIND = _selinux.APPLETALK_SOCKET__NAME_BIND
+PACKET__SEND = _selinux.PACKET__SEND
+PACKET__RECV = _selinux.PACKET__RECV
+PACKET__RELABELTO = _selinux.PACKET__RELABELTO
+PACKET__FLOW_IN = _selinux.PACKET__FLOW_IN
+PACKET__FLOW_OUT = _selinux.PACKET__FLOW_OUT
+PACKET__FORWARD_IN = _selinux.PACKET__FORWARD_IN
+PACKET__FORWARD_OUT = _selinux.PACKET__FORWARD_OUT
+KEY__VIEW = _selinux.KEY__VIEW
+KEY__READ = _selinux.KEY__READ
+KEY__WRITE = _selinux.KEY__WRITE
+KEY__SEARCH = _selinux.KEY__SEARCH
+KEY__LINK = _selinux.KEY__LINK
+KEY__SETATTR = _selinux.KEY__SETATTR
+KEY__CREATE = _selinux.KEY__CREATE
+CONTEXT__TRANSLATE = _selinux.CONTEXT__TRANSLATE
+CONTEXT__CONTAINS = _selinux.CONTEXT__CONTAINS
+DCCP_SOCKET__IOCTL = _selinux.DCCP_SOCKET__IOCTL
+DCCP_SOCKET__READ = _selinux.DCCP_SOCKET__READ
+DCCP_SOCKET__WRITE = _selinux.DCCP_SOCKET__WRITE
+DCCP_SOCKET__CREATE = _selinux.DCCP_SOCKET__CREATE
+DCCP_SOCKET__GETATTR = _selinux.DCCP_SOCKET__GETATTR
+DCCP_SOCKET__SETATTR = _selinux.DCCP_SOCKET__SETATTR
+DCCP_SOCKET__LOCK = _selinux.DCCP_SOCKET__LOCK
+DCCP_SOCKET__RELABELFROM = _selinux.DCCP_SOCKET__RELABELFROM
+DCCP_SOCKET__RELABELTO = _selinux.DCCP_SOCKET__RELABELTO
+DCCP_SOCKET__APPEND = _selinux.DCCP_SOCKET__APPEND
+DCCP_SOCKET__BIND = _selinux.DCCP_SOCKET__BIND
+DCCP_SOCKET__CONNECT = _selinux.DCCP_SOCKET__CONNECT
+DCCP_SOCKET__LISTEN = _selinux.DCCP_SOCKET__LISTEN
+DCCP_SOCKET__ACCEPT = _selinux.DCCP_SOCKET__ACCEPT
+DCCP_SOCKET__GETOPT = _selinux.DCCP_SOCKET__GETOPT
+DCCP_SOCKET__SETOPT = _selinux.DCCP_SOCKET__SETOPT
+DCCP_SOCKET__SHUTDOWN = _selinux.DCCP_SOCKET__SHUTDOWN
+DCCP_SOCKET__RECVFROM = _selinux.DCCP_SOCKET__RECVFROM
+DCCP_SOCKET__SENDTO = _selinux.DCCP_SOCKET__SENDTO
+DCCP_SOCKET__RECV_MSG = _selinux.DCCP_SOCKET__RECV_MSG
+DCCP_SOCKET__SEND_MSG = _selinux.DCCP_SOCKET__SEND_MSG
+DCCP_SOCKET__NAME_BIND = _selinux.DCCP_SOCKET__NAME_BIND
+DCCP_SOCKET__NODE_BIND = _selinux.DCCP_SOCKET__NODE_BIND
+DCCP_SOCKET__NAME_CONNECT = _selinux.DCCP_SOCKET__NAME_CONNECT
+MEMPROTECT__MMAP_ZERO = _selinux.MEMPROTECT__MMAP_ZERO
+DB_DATABASE__CREATE = _selinux.DB_DATABASE__CREATE
+DB_DATABASE__DROP = _selinux.DB_DATABASE__DROP
+DB_DATABASE__GETATTR = _selinux.DB_DATABASE__GETATTR
+DB_DATABASE__SETATTR = _selinux.DB_DATABASE__SETATTR
+DB_DATABASE__RELABELFROM = _selinux.DB_DATABASE__RELABELFROM
+DB_DATABASE__RELABELTO = _selinux.DB_DATABASE__RELABELTO
+DB_DATABASE__ACCESS = _selinux.DB_DATABASE__ACCESS
+DB_DATABASE__INSTALL_MODULE = _selinux.DB_DATABASE__INSTALL_MODULE
+DB_DATABASE__LOAD_MODULE = _selinux.DB_DATABASE__LOAD_MODULE
+DB_DATABASE__GET_PARAM = _selinux.DB_DATABASE__GET_PARAM
+DB_DATABASE__SET_PARAM = _selinux.DB_DATABASE__SET_PARAM
+DB_TABLE__CREATE = _selinux.DB_TABLE__CREATE
+DB_TABLE__DROP = _selinux.DB_TABLE__DROP
+DB_TABLE__GETATTR = _selinux.DB_TABLE__GETATTR
+DB_TABLE__SETATTR = _selinux.DB_TABLE__SETATTR
+DB_TABLE__RELABELFROM = _selinux.DB_TABLE__RELABELFROM
+DB_TABLE__RELABELTO = _selinux.DB_TABLE__RELABELTO
+DB_TABLE__USE = _selinux.DB_TABLE__USE
+DB_TABLE__SELECT = _selinux.DB_TABLE__SELECT
+DB_TABLE__UPDATE = _selinux.DB_TABLE__UPDATE
+DB_TABLE__INSERT = _selinux.DB_TABLE__INSERT
+DB_TABLE__DELETE = _selinux.DB_TABLE__DELETE
+DB_TABLE__LOCK = _selinux.DB_TABLE__LOCK
+DB_PROCEDURE__CREATE = _selinux.DB_PROCEDURE__CREATE
+DB_PROCEDURE__DROP = _selinux.DB_PROCEDURE__DROP
+DB_PROCEDURE__GETATTR = _selinux.DB_PROCEDURE__GETATTR
+DB_PROCEDURE__SETATTR = _selinux.DB_PROCEDURE__SETATTR
+DB_PROCEDURE__RELABELFROM = _selinux.DB_PROCEDURE__RELABELFROM
+DB_PROCEDURE__RELABELTO = _selinux.DB_PROCEDURE__RELABELTO
+DB_PROCEDURE__EXECUTE = _selinux.DB_PROCEDURE__EXECUTE
+DB_PROCEDURE__ENTRYPOINT = _selinux.DB_PROCEDURE__ENTRYPOINT
+DB_COLUMN__CREATE = _selinux.DB_COLUMN__CREATE
+DB_COLUMN__DROP = _selinux.DB_COLUMN__DROP
+DB_COLUMN__GETATTR = _selinux.DB_COLUMN__GETATTR
+DB_COLUMN__SETATTR = _selinux.DB_COLUMN__SETATTR
+DB_COLUMN__RELABELFROM = _selinux.DB_COLUMN__RELABELFROM
+DB_COLUMN__RELABELTO = _selinux.DB_COLUMN__RELABELTO
+DB_COLUMN__USE = _selinux.DB_COLUMN__USE
+DB_COLUMN__SELECT = _selinux.DB_COLUMN__SELECT
+DB_COLUMN__UPDATE = _selinux.DB_COLUMN__UPDATE
+DB_COLUMN__INSERT = _selinux.DB_COLUMN__INSERT
+DB_TUPLE__RELABELFROM = _selinux.DB_TUPLE__RELABELFROM
+DB_TUPLE__RELABELTO = _selinux.DB_TUPLE__RELABELTO
+DB_TUPLE__USE = _selinux.DB_TUPLE__USE
+DB_TUPLE__SELECT = _selinux.DB_TUPLE__SELECT
+DB_TUPLE__UPDATE = _selinux.DB_TUPLE__UPDATE
+DB_TUPLE__INSERT = _selinux.DB_TUPLE__INSERT
+DB_TUPLE__DELETE = _selinux.DB_TUPLE__DELETE
+DB_BLOB__CREATE = _selinux.DB_BLOB__CREATE
+DB_BLOB__DROP = _selinux.DB_BLOB__DROP
+DB_BLOB__GETATTR = _selinux.DB_BLOB__GETATTR
+DB_BLOB__SETATTR = _selinux.DB_BLOB__SETATTR
+DB_BLOB__RELABELFROM = _selinux.DB_BLOB__RELABELFROM
+DB_BLOB__RELABELTO = _selinux.DB_BLOB__RELABELTO
+DB_BLOB__READ = _selinux.DB_BLOB__READ
+DB_BLOB__WRITE = _selinux.DB_BLOB__WRITE
+DB_BLOB__IMPORT = _selinux.DB_BLOB__IMPORT
+DB_BLOB__EXPORT = _selinux.DB_BLOB__EXPORT
+PEER__RECV = _selinux.PEER__RECV
+X_APPLICATION_DATA__PASTE = _selinux.X_APPLICATION_DATA__PASTE
+X_APPLICATION_DATA__PASTE_AFTER_CONFIRM = _selinux.X_APPLICATION_DATA__PASTE_AFTER_CONFIRM
+X_APPLICATION_DATA__COPY = _selinux.X_APPLICATION_DATA__COPY
+class context_s_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, context_s_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, context_s_t, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["ptr"] = _selinux.context_s_t_ptr_set
+ __swig_getmethods__["ptr"] = _selinux.context_s_t_ptr_get
+ if _newclass:ptr = _swig_property(_selinux.context_s_t_ptr_get, _selinux.context_s_t_ptr_set)
+ def __init__(self):
+ this = _selinux.new_context_s_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_context_s_t
+ __del__ = lambda self : None;
+context_s_t_swigregister = _selinux.context_s_t_swigregister
+context_s_t_swigregister(context_s_t)
+
+
+def context_new(*args):
+ return _selinux.context_new(*args)
+context_new = _selinux.context_new
+
+def context_str(*args):
+ return _selinux.context_str(*args)
+context_str = _selinux.context_str
+
+def context_free(*args):
+ return _selinux.context_free(*args)
+context_free = _selinux.context_free
+
+def context_type_get(*args):
+ return _selinux.context_type_get(*args)
+context_type_get = _selinux.context_type_get
+
+def context_range_get(*args):
+ return _selinux.context_range_get(*args)
+context_range_get = _selinux.context_range_get
+
+def context_role_get(*args):
+ return _selinux.context_role_get(*args)
+context_role_get = _selinux.context_role_get
+
+def context_user_get(*args):
+ return _selinux.context_user_get(*args)
+context_user_get = _selinux.context_user_get
+
+def context_type_set(*args):
+ return _selinux.context_type_set(*args)
+context_type_set = _selinux.context_type_set
+
+def context_range_set(*args):
+ return _selinux.context_range_set(*args)
+context_range_set = _selinux.context_range_set
+
+def context_role_set(*args):
+ return _selinux.context_role_set(*args)
+context_role_set = _selinux.context_role_set
+
+def context_user_set(*args):
+ return _selinux.context_user_set(*args)
+context_user_set = _selinux.context_user_set
+SECCLASS_SECURITY = _selinux.SECCLASS_SECURITY
+SECCLASS_PROCESS = _selinux.SECCLASS_PROCESS
+SECCLASS_SYSTEM = _selinux.SECCLASS_SYSTEM
+SECCLASS_CAPABILITY = _selinux.SECCLASS_CAPABILITY
+SECCLASS_FILESYSTEM = _selinux.SECCLASS_FILESYSTEM
+SECCLASS_FILE = _selinux.SECCLASS_FILE
+SECCLASS_DIR = _selinux.SECCLASS_DIR
+SECCLASS_FD = _selinux.SECCLASS_FD
+SECCLASS_LNK_FILE = _selinux.SECCLASS_LNK_FILE
+SECCLASS_CHR_FILE = _selinux.SECCLASS_CHR_FILE
+SECCLASS_BLK_FILE = _selinux.SECCLASS_BLK_FILE
+SECCLASS_SOCK_FILE = _selinux.SECCLASS_SOCK_FILE
+SECCLASS_FIFO_FILE = _selinux.SECCLASS_FIFO_FILE
+SECCLASS_SOCKET = _selinux.SECCLASS_SOCKET
+SECCLASS_TCP_SOCKET = _selinux.SECCLASS_TCP_SOCKET
+SECCLASS_UDP_SOCKET = _selinux.SECCLASS_UDP_SOCKET
+SECCLASS_RAWIP_SOCKET = _selinux.SECCLASS_RAWIP_SOCKET
+SECCLASS_NODE = _selinux.SECCLASS_NODE
+SECCLASS_NETIF = _selinux.SECCLASS_NETIF
+SECCLASS_NETLINK_SOCKET = _selinux.SECCLASS_NETLINK_SOCKET
+SECCLASS_PACKET_SOCKET = _selinux.SECCLASS_PACKET_SOCKET
+SECCLASS_KEY_SOCKET = _selinux.SECCLASS_KEY_SOCKET
+SECCLASS_UNIX_STREAM_SOCKET = _selinux.SECCLASS_UNIX_STREAM_SOCKET
+SECCLASS_UNIX_DGRAM_SOCKET = _selinux.SECCLASS_UNIX_DGRAM_SOCKET
+SECCLASS_SEM = _selinux.SECCLASS_SEM
+SECCLASS_MSG = _selinux.SECCLASS_MSG
+SECCLASS_MSGQ = _selinux.SECCLASS_MSGQ
+SECCLASS_SHM = _selinux.SECCLASS_SHM
+SECCLASS_IPC = _selinux.SECCLASS_IPC
+SECCLASS_PASSWD = _selinux.SECCLASS_PASSWD
+SECCLASS_X_DRAWABLE = _selinux.SECCLASS_X_DRAWABLE
+SECCLASS_X_SCREEN = _selinux.SECCLASS_X_SCREEN
+SECCLASS_X_GC = _selinux.SECCLASS_X_GC
+SECCLASS_X_FONT = _selinux.SECCLASS_X_FONT
+SECCLASS_X_COLORMAP = _selinux.SECCLASS_X_COLORMAP
+SECCLASS_X_PROPERTY = _selinux.SECCLASS_X_PROPERTY
+SECCLASS_X_SELECTION = _selinux.SECCLASS_X_SELECTION
+SECCLASS_X_CURSOR = _selinux.SECCLASS_X_CURSOR
+SECCLASS_X_CLIENT = _selinux.SECCLASS_X_CLIENT
+SECCLASS_X_DEVICE = _selinux.SECCLASS_X_DEVICE
+SECCLASS_X_SERVER = _selinux.SECCLASS_X_SERVER
+SECCLASS_X_EXTENSION = _selinux.SECCLASS_X_EXTENSION
+SECCLASS_NETLINK_ROUTE_SOCKET = _selinux.SECCLASS_NETLINK_ROUTE_SOCKET
+SECCLASS_NETLINK_FIREWALL_SOCKET = _selinux.SECCLASS_NETLINK_FIREWALL_SOCKET
+SECCLASS_NETLINK_TCPDIAG_SOCKET = _selinux.SECCLASS_NETLINK_TCPDIAG_SOCKET
+SECCLASS_NETLINK_NFLOG_SOCKET = _selinux.SECCLASS_NETLINK_NFLOG_SOCKET
+SECCLASS_NETLINK_XFRM_SOCKET = _selinux.SECCLASS_NETLINK_XFRM_SOCKET
+SECCLASS_NETLINK_SELINUX_SOCKET = _selinux.SECCLASS_NETLINK_SELINUX_SOCKET
+SECCLASS_NETLINK_AUDIT_SOCKET = _selinux.SECCLASS_NETLINK_AUDIT_SOCKET
+SECCLASS_NETLINK_IP6FW_SOCKET = _selinux.SECCLASS_NETLINK_IP6FW_SOCKET
+SECCLASS_NETLINK_DNRT_SOCKET = _selinux.SECCLASS_NETLINK_DNRT_SOCKET
+SECCLASS_DBUS = _selinux.SECCLASS_DBUS
+SECCLASS_NSCD = _selinux.SECCLASS_NSCD
+SECCLASS_ASSOCIATION = _selinux.SECCLASS_ASSOCIATION
+SECCLASS_NETLINK_KOBJECT_UEVENT_SOCKET = _selinux.SECCLASS_NETLINK_KOBJECT_UEVENT_SOCKET
+SECCLASS_APPLETALK_SOCKET = _selinux.SECCLASS_APPLETALK_SOCKET
+SECCLASS_PACKET = _selinux.SECCLASS_PACKET
+SECCLASS_KEY = _selinux.SECCLASS_KEY
+SECCLASS_CONTEXT = _selinux.SECCLASS_CONTEXT
+SECCLASS_DCCP_SOCKET = _selinux.SECCLASS_DCCP_SOCKET
+SECCLASS_MEMPROTECT = _selinux.SECCLASS_MEMPROTECT
+SECCLASS_DB_DATABASE = _selinux.SECCLASS_DB_DATABASE
+SECCLASS_DB_TABLE = _selinux.SECCLASS_DB_TABLE
+SECCLASS_DB_PROCEDURE = _selinux.SECCLASS_DB_PROCEDURE
+SECCLASS_DB_COLUMN = _selinux.SECCLASS_DB_COLUMN
+SECCLASS_DB_TUPLE = _selinux.SECCLASS_DB_TUPLE
+SECCLASS_DB_BLOB = _selinux.SECCLASS_DB_BLOB
+SECCLASS_PEER = _selinux.SECCLASS_PEER
+SECCLASS_CAPABILITY2 = _selinux.SECCLASS_CAPABILITY2
+SECCLASS_X_RESOURCE = _selinux.SECCLASS_X_RESOURCE
+SECCLASS_X_EVENT = _selinux.SECCLASS_X_EVENT
+SECCLASS_X_SYNTHETIC_EVENT = _selinux.SECCLASS_X_SYNTHETIC_EVENT
+SECCLASS_X_APPLICATION_DATA = _selinux.SECCLASS_X_APPLICATION_DATA
+SECINITSID_KERNEL = _selinux.SECINITSID_KERNEL
+SECINITSID_SECURITY = _selinux.SECINITSID_SECURITY
+SECINITSID_UNLABELED = _selinux.SECINITSID_UNLABELED
+SECINITSID_FS = _selinux.SECINITSID_FS
+SECINITSID_FILE = _selinux.SECINITSID_FILE
+SECINITSID_FILE_LABELS = _selinux.SECINITSID_FILE_LABELS
+SECINITSID_INIT = _selinux.SECINITSID_INIT
+SECINITSID_ANY_SOCKET = _selinux.SECINITSID_ANY_SOCKET
+SECINITSID_PORT = _selinux.SECINITSID_PORT
+SECINITSID_NETIF = _selinux.SECINITSID_NETIF
+SECINITSID_NETMSG = _selinux.SECINITSID_NETMSG
+SECINITSID_NODE = _selinux.SECINITSID_NODE
+SECINITSID_IGMP_PACKET = _selinux.SECINITSID_IGMP_PACKET
+SECINITSID_ICMP_SOCKET = _selinux.SECINITSID_ICMP_SOCKET
+SECINITSID_TCP_SOCKET = _selinux.SECINITSID_TCP_SOCKET
+SECINITSID_SYSCTL_MODPROBE = _selinux.SECINITSID_SYSCTL_MODPROBE
+SECINITSID_SYSCTL = _selinux.SECINITSID_SYSCTL
+SECINITSID_SYSCTL_FS = _selinux.SECINITSID_SYSCTL_FS
+SECINITSID_SYSCTL_KERNEL = _selinux.SECINITSID_SYSCTL_KERNEL
+SECINITSID_SYSCTL_NET = _selinux.SECINITSID_SYSCTL_NET
+SECINITSID_SYSCTL_NET_UNIX = _selinux.SECINITSID_SYSCTL_NET_UNIX
+SECINITSID_SYSCTL_VM = _selinux.SECINITSID_SYSCTL_VM
+SECINITSID_SYSCTL_DEV = _selinux.SECINITSID_SYSCTL_DEV
+SECINITSID_KMOD = _selinux.SECINITSID_KMOD
+SECINITSID_POLICY = _selinux.SECINITSID_POLICY
+SECINITSID_SCMP_PACKET = _selinux.SECINITSID_SCMP_PACKET
+SECINITSID_DEVNULL = _selinux.SECINITSID_DEVNULL
+SECINITSID_NUM = _selinux.SECINITSID_NUM
+SELINUX_DEFAULTUSER = _selinux.SELINUX_DEFAULTUSER
+
+def get_ordered_context_list(*args):
+ return _selinux.get_ordered_context_list(*args)
+get_ordered_context_list = _selinux.get_ordered_context_list
+
+def get_ordered_context_list_with_level(*args):
+ return _selinux.get_ordered_context_list_with_level(*args)
+get_ordered_context_list_with_level = _selinux.get_ordered_context_list_with_level
+
+def get_default_context(*args):
+ return _selinux.get_default_context(*args)
+get_default_context = _selinux.get_default_context
+
+def get_default_context_with_level(*args):
+ return _selinux.get_default_context_with_level(*args)
+get_default_context_with_level = _selinux.get_default_context_with_level
+
+def get_default_context_with_role(*args):
+ return _selinux.get_default_context_with_role(*args)
+get_default_context_with_role = _selinux.get_default_context_with_role
+
+def get_default_context_with_rolelevel(*args):
+ return _selinux.get_default_context_with_rolelevel(*args)
+get_default_context_with_rolelevel = _selinux.get_default_context_with_rolelevel
+
+def query_user_context():
+ return _selinux.query_user_context()
+query_user_context = _selinux.query_user_context
+
+def manual_user_enter_context(*args):
+ return _selinux.manual_user_enter_context(*args)
+manual_user_enter_context = _selinux.manual_user_enter_context
+
+def selinux_default_type_path():
+ return _selinux.selinux_default_type_path()
+selinux_default_type_path = _selinux.selinux_default_type_path
+
+def get_default_type(*args):
+ return _selinux.get_default_type(*args)
+get_default_type = _selinux.get_default_type
+SELABEL_CTX_FILE = _selinux.SELABEL_CTX_FILE
+SELABEL_CTX_MEDIA = _selinux.SELABEL_CTX_MEDIA
+SELABEL_CTX_X = _selinux.SELABEL_CTX_X
+SELABEL_CTX_DB = _selinux.SELABEL_CTX_DB
+SELABEL_CTX_ANDROID_PROP = _selinux.SELABEL_CTX_ANDROID_PROP
+SELABEL_OPT_UNUSED = _selinux.SELABEL_OPT_UNUSED
+SELABEL_OPT_VALIDATE = _selinux.SELABEL_OPT_VALIDATE
+SELABEL_OPT_BASEONLY = _selinux.SELABEL_OPT_BASEONLY
+SELABEL_OPT_PATH = _selinux.SELABEL_OPT_PATH
+SELABEL_OPT_SUBSET = _selinux.SELABEL_OPT_SUBSET
+SELABEL_NOPT = _selinux.SELABEL_NOPT
+
+def selabel_open(*args):
+ return _selinux.selabel_open(*args)
+selabel_open = _selinux.selabel_open
+
+def selabel_close(*args):
+ return _selinux.selabel_close(*args)
+selabel_close = _selinux.selabel_close
+
+def selabel_lookup(*args):
+ return _selinux.selabel_lookup(*args)
+selabel_lookup = _selinux.selabel_lookup
+
+def selabel_lookup_raw(*args):
+ return _selinux.selabel_lookup_raw(*args)
+selabel_lookup_raw = _selinux.selabel_lookup_raw
+
+def selabel_partial_match(*args):
+ return _selinux.selabel_partial_match(*args)
+selabel_partial_match = _selinux.selabel_partial_match
+
+def selabel_lookup_best_match(*args):
+ return _selinux.selabel_lookup_best_match(*args)
+selabel_lookup_best_match = _selinux.selabel_lookup_best_match
+
+def selabel_lookup_best_match_raw(*args):
+ return _selinux.selabel_lookup_best_match_raw(*args)
+selabel_lookup_best_match_raw = _selinux.selabel_lookup_best_match_raw
+
+def selabel_stats(*args):
+ return _selinux.selabel_stats(*args)
+selabel_stats = _selinux.selabel_stats
+SELABEL_X_PROP = _selinux.SELABEL_X_PROP
+SELABEL_X_EXT = _selinux.SELABEL_X_EXT
+SELABEL_X_CLIENT = _selinux.SELABEL_X_CLIENT
+SELABEL_X_EVENT = _selinux.SELABEL_X_EVENT
+SELABEL_X_SELN = _selinux.SELABEL_X_SELN
+SELABEL_X_POLYPROP = _selinux.SELABEL_X_POLYPROP
+SELABEL_X_POLYSELN = _selinux.SELABEL_X_POLYSELN
+SELABEL_DB_DATABASE = _selinux.SELABEL_DB_DATABASE
+SELABEL_DB_SCHEMA = _selinux.SELABEL_DB_SCHEMA
+SELABEL_DB_TABLE = _selinux.SELABEL_DB_TABLE
+SELABEL_DB_COLUMN = _selinux.SELABEL_DB_COLUMN
+SELABEL_DB_SEQUENCE = _selinux.SELABEL_DB_SEQUENCE
+SELABEL_DB_VIEW = _selinux.SELABEL_DB_VIEW
+SELABEL_DB_PROCEDURE = _selinux.SELABEL_DB_PROCEDURE
+SELABEL_DB_BLOB = _selinux.SELABEL_DB_BLOB
+SELABEL_DB_TUPLE = _selinux.SELABEL_DB_TUPLE
+SELABEL_DB_LANGUAGE = _selinux.SELABEL_DB_LANGUAGE
+SELABEL_DB_EXCEPTION = _selinux.SELABEL_DB_EXCEPTION
+SELABEL_DB_DATATYPE = _selinux.SELABEL_DB_DATATYPE
+
+def is_selinux_enabled():
+ return _selinux.is_selinux_enabled()
+is_selinux_enabled = _selinux.is_selinux_enabled
+
+def is_selinux_mls_enabled():
+ return _selinux.is_selinux_mls_enabled()
+is_selinux_mls_enabled = _selinux.is_selinux_mls_enabled
+
+def getcon():
+ return _selinux.getcon()
+getcon = _selinux.getcon
+
+def getcon_raw():
+ return _selinux.getcon_raw()
+getcon_raw = _selinux.getcon_raw
+
+def setcon(*args):
+ return _selinux.setcon(*args)
+setcon = _selinux.setcon
+
+def setcon_raw(*args):
+ return _selinux.setcon_raw(*args)
+setcon_raw = _selinux.setcon_raw
+
+def getpidcon(*args):
+ return _selinux.getpidcon(*args)
+getpidcon = _selinux.getpidcon
+
+def getpidcon_raw(*args):
+ return _selinux.getpidcon_raw(*args)
+getpidcon_raw = _selinux.getpidcon_raw
+
+def getprevcon():
+ return _selinux.getprevcon()
+getprevcon = _selinux.getprevcon
+
+def getprevcon_raw():
+ return _selinux.getprevcon_raw()
+getprevcon_raw = _selinux.getprevcon_raw
+
+def getexeccon():
+ return _selinux.getexeccon()
+getexeccon = _selinux.getexeccon
+
+def getexeccon_raw():
+ return _selinux.getexeccon_raw()
+getexeccon_raw = _selinux.getexeccon_raw
+
+def setexeccon(*args):
+ return _selinux.setexeccon(*args)
+setexeccon = _selinux.setexeccon
+
+def setexeccon_raw(*args):
+ return _selinux.setexeccon_raw(*args)
+setexeccon_raw = _selinux.setexeccon_raw
+
+def getfscreatecon():
+ return _selinux.getfscreatecon()
+getfscreatecon = _selinux.getfscreatecon
+
+def getfscreatecon_raw():
+ return _selinux.getfscreatecon_raw()
+getfscreatecon_raw = _selinux.getfscreatecon_raw
+
+def setfscreatecon(*args):
+ return _selinux.setfscreatecon(*args)
+setfscreatecon = _selinux.setfscreatecon
+
+def setfscreatecon_raw(*args):
+ return _selinux.setfscreatecon_raw(*args)
+setfscreatecon_raw = _selinux.setfscreatecon_raw
+
+def getkeycreatecon():
+ return _selinux.getkeycreatecon()
+getkeycreatecon = _selinux.getkeycreatecon
+
+def getkeycreatecon_raw():
+ return _selinux.getkeycreatecon_raw()
+getkeycreatecon_raw = _selinux.getkeycreatecon_raw
+
+def setkeycreatecon(*args):
+ return _selinux.setkeycreatecon(*args)
+setkeycreatecon = _selinux.setkeycreatecon
+
+def setkeycreatecon_raw(*args):
+ return _selinux.setkeycreatecon_raw(*args)
+setkeycreatecon_raw = _selinux.setkeycreatecon_raw
+
+def getsockcreatecon():
+ return _selinux.getsockcreatecon()
+getsockcreatecon = _selinux.getsockcreatecon
+
+def getsockcreatecon_raw():
+ return _selinux.getsockcreatecon_raw()
+getsockcreatecon_raw = _selinux.getsockcreatecon_raw
+
+def setsockcreatecon(*args):
+ return _selinux.setsockcreatecon(*args)
+setsockcreatecon = _selinux.setsockcreatecon
+
+def setsockcreatecon_raw(*args):
+ return _selinux.setsockcreatecon_raw(*args)
+setsockcreatecon_raw = _selinux.setsockcreatecon_raw
+
+def getfilecon(*args):
+ return _selinux.getfilecon(*args)
+getfilecon = _selinux.getfilecon
+
+def getfilecon_raw(*args):
+ return _selinux.getfilecon_raw(*args)
+getfilecon_raw = _selinux.getfilecon_raw
+
+def lgetfilecon(*args):
+ return _selinux.lgetfilecon(*args)
+lgetfilecon = _selinux.lgetfilecon
+
+def lgetfilecon_raw(*args):
+ return _selinux.lgetfilecon_raw(*args)
+lgetfilecon_raw = _selinux.lgetfilecon_raw
+
+def fgetfilecon(*args):
+ return _selinux.fgetfilecon(*args)
+fgetfilecon = _selinux.fgetfilecon
+
+def fgetfilecon_raw(*args):
+ return _selinux.fgetfilecon_raw(*args)
+fgetfilecon_raw = _selinux.fgetfilecon_raw
+
+def setfilecon(*args):
+ return _selinux.setfilecon(*args)
+setfilecon = _selinux.setfilecon
+
+def setfilecon_raw(*args):
+ return _selinux.setfilecon_raw(*args)
+setfilecon_raw = _selinux.setfilecon_raw
+
+def lsetfilecon(*args):
+ return _selinux.lsetfilecon(*args)
+lsetfilecon = _selinux.lsetfilecon
+
+def lsetfilecon_raw(*args):
+ return _selinux.lsetfilecon_raw(*args)
+lsetfilecon_raw = _selinux.lsetfilecon_raw
+
+def fsetfilecon(*args):
+ return _selinux.fsetfilecon(*args)
+fsetfilecon = _selinux.fsetfilecon
+
+def fsetfilecon_raw(*args):
+ return _selinux.fsetfilecon_raw(*args)
+fsetfilecon_raw = _selinux.fsetfilecon_raw
+
+def getpeercon(*args):
+ return _selinux.getpeercon(*args)
+getpeercon = _selinux.getpeercon
+
+def getpeercon_raw(*args):
+ return _selinux.getpeercon_raw(*args)
+getpeercon_raw = _selinux.getpeercon_raw
+class av_decision(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, av_decision, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, av_decision, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["allowed"] = _selinux.av_decision_allowed_set
+ __swig_getmethods__["allowed"] = _selinux.av_decision_allowed_get
+ if _newclass:allowed = _swig_property(_selinux.av_decision_allowed_get, _selinux.av_decision_allowed_set)
+ __swig_setmethods__["decided"] = _selinux.av_decision_decided_set
+ __swig_getmethods__["decided"] = _selinux.av_decision_decided_get
+ if _newclass:decided = _swig_property(_selinux.av_decision_decided_get, _selinux.av_decision_decided_set)
+ __swig_setmethods__["auditallow"] = _selinux.av_decision_auditallow_set
+ __swig_getmethods__["auditallow"] = _selinux.av_decision_auditallow_get
+ if _newclass:auditallow = _swig_property(_selinux.av_decision_auditallow_get, _selinux.av_decision_auditallow_set)
+ __swig_setmethods__["auditdeny"] = _selinux.av_decision_auditdeny_set
+ __swig_getmethods__["auditdeny"] = _selinux.av_decision_auditdeny_get
+ if _newclass:auditdeny = _swig_property(_selinux.av_decision_auditdeny_get, _selinux.av_decision_auditdeny_set)
+ __swig_setmethods__["seqno"] = _selinux.av_decision_seqno_set
+ __swig_getmethods__["seqno"] = _selinux.av_decision_seqno_get
+ if _newclass:seqno = _swig_property(_selinux.av_decision_seqno_get, _selinux.av_decision_seqno_set)
+ __swig_setmethods__["flags"] = _selinux.av_decision_flags_set
+ __swig_getmethods__["flags"] = _selinux.av_decision_flags_get
+ if _newclass:flags = _swig_property(_selinux.av_decision_flags_get, _selinux.av_decision_flags_set)
+ def __init__(self):
+ this = _selinux.new_av_decision()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_av_decision
+ __del__ = lambda self : None;
+av_decision_swigregister = _selinux.av_decision_swigregister
+av_decision_swigregister(av_decision)
+
+SELINUX_AVD_FLAGS_PERMISSIVE = _selinux.SELINUX_AVD_FLAGS_PERMISSIVE
+class selinux_opt(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, selinux_opt, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, selinux_opt, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["type"] = _selinux.selinux_opt_type_set
+ __swig_getmethods__["type"] = _selinux.selinux_opt_type_get
+ if _newclass:type = _swig_property(_selinux.selinux_opt_type_get, _selinux.selinux_opt_type_set)
+ __swig_setmethods__["value"] = _selinux.selinux_opt_value_set
+ __swig_getmethods__["value"] = _selinux.selinux_opt_value_get
+ if _newclass:value = _swig_property(_selinux.selinux_opt_value_get, _selinux.selinux_opt_value_set)
+ def __init__(self):
+ this = _selinux.new_selinux_opt()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_selinux_opt
+ __del__ = lambda self : None;
+selinux_opt_swigregister = _selinux.selinux_opt_swigregister
+selinux_opt_swigregister(selinux_opt)
+
+class selinux_callback(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, selinux_callback, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, selinux_callback, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["func_log"] = _selinux.selinux_callback_func_log_set
+ __swig_getmethods__["func_log"] = _selinux.selinux_callback_func_log_get
+ if _newclass:func_log = _swig_property(_selinux.selinux_callback_func_log_get, _selinux.selinux_callback_func_log_set)
+ __swig_setmethods__["func_audit"] = _selinux.selinux_callback_func_audit_set
+ __swig_getmethods__["func_audit"] = _selinux.selinux_callback_func_audit_get
+ if _newclass:func_audit = _swig_property(_selinux.selinux_callback_func_audit_get, _selinux.selinux_callback_func_audit_set)
+ __swig_setmethods__["func_validate"] = _selinux.selinux_callback_func_validate_set
+ __swig_getmethods__["func_validate"] = _selinux.selinux_callback_func_validate_get
+ if _newclass:func_validate = _swig_property(_selinux.selinux_callback_func_validate_get, _selinux.selinux_callback_func_validate_set)
+ __swig_setmethods__["func_setenforce"] = _selinux.selinux_callback_func_setenforce_set
+ __swig_getmethods__["func_setenforce"] = _selinux.selinux_callback_func_setenforce_get
+ if _newclass:func_setenforce = _swig_property(_selinux.selinux_callback_func_setenforce_get, _selinux.selinux_callback_func_setenforce_set)
+ __swig_setmethods__["func_policyload"] = _selinux.selinux_callback_func_policyload_set
+ __swig_getmethods__["func_policyload"] = _selinux.selinux_callback_func_policyload_get
+ if _newclass:func_policyload = _swig_property(_selinux.selinux_callback_func_policyload_get, _selinux.selinux_callback_func_policyload_set)
+ def __init__(self):
+ this = _selinux.new_selinux_callback()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_selinux_callback
+ __del__ = lambda self : None;
+selinux_callback_swigregister = _selinux.selinux_callback_swigregister
+selinux_callback_swigregister(selinux_callback)
+
+SELINUX_CB_LOG = _selinux.SELINUX_CB_LOG
+SELINUX_CB_AUDIT = _selinux.SELINUX_CB_AUDIT
+SELINUX_CB_VALIDATE = _selinux.SELINUX_CB_VALIDATE
+SELINUX_CB_SETENFORCE = _selinux.SELINUX_CB_SETENFORCE
+SELINUX_CB_POLICYLOAD = _selinux.SELINUX_CB_POLICYLOAD
+
+def selinux_get_callback(*args):
+ return _selinux.selinux_get_callback(*args)
+selinux_get_callback = _selinux.selinux_get_callback
+
+def selinux_set_callback(*args):
+ return _selinux.selinux_set_callback(*args)
+selinux_set_callback = _selinux.selinux_set_callback
+SELINUX_ERROR = _selinux.SELINUX_ERROR
+SELINUX_WARNING = _selinux.SELINUX_WARNING
+SELINUX_INFO = _selinux.SELINUX_INFO
+SELINUX_AVC = _selinux.SELINUX_AVC
+SELINUX_TRANS_DIR = _selinux.SELINUX_TRANS_DIR
+
+def security_compute_av(*args):
+ return _selinux.security_compute_av(*args)
+security_compute_av = _selinux.security_compute_av
+
+def security_compute_av_raw(*args):
+ return _selinux.security_compute_av_raw(*args)
+security_compute_av_raw = _selinux.security_compute_av_raw
+
+def security_compute_av_flags(*args):
+ return _selinux.security_compute_av_flags(*args)
+security_compute_av_flags = _selinux.security_compute_av_flags
+
+def security_compute_av_flags_raw(*args):
+ return _selinux.security_compute_av_flags_raw(*args)
+security_compute_av_flags_raw = _selinux.security_compute_av_flags_raw
+
+def security_compute_create(*args):
+ return _selinux.security_compute_create(*args)
+security_compute_create = _selinux.security_compute_create
+
+def security_compute_create_raw(*args):
+ return _selinux.security_compute_create_raw(*args)
+security_compute_create_raw = _selinux.security_compute_create_raw
+
+def security_compute_create_name(*args):
+ return _selinux.security_compute_create_name(*args)
+security_compute_create_name = _selinux.security_compute_create_name
+
+def security_compute_create_name_raw(*args):
+ return _selinux.security_compute_create_name_raw(*args)
+security_compute_create_name_raw = _selinux.security_compute_create_name_raw
+
+def security_compute_relabel(*args):
+ return _selinux.security_compute_relabel(*args)
+security_compute_relabel = _selinux.security_compute_relabel
+
+def security_compute_relabel_raw(*args):
+ return _selinux.security_compute_relabel_raw(*args)
+security_compute_relabel_raw = _selinux.security_compute_relabel_raw
+
+def security_compute_member(*args):
+ return _selinux.security_compute_member(*args)
+security_compute_member = _selinux.security_compute_member
+
+def security_compute_member_raw(*args):
+ return _selinux.security_compute_member_raw(*args)
+security_compute_member_raw = _selinux.security_compute_member_raw
+
+def security_compute_user(*args):
+ return _selinux.security_compute_user(*args)
+security_compute_user = _selinux.security_compute_user
+
+def security_compute_user_raw(*args):
+ return _selinux.security_compute_user_raw(*args)
+security_compute_user_raw = _selinux.security_compute_user_raw
+
+def security_load_policy(*args):
+ return _selinux.security_load_policy(*args)
+security_load_policy = _selinux.security_load_policy
+
+def security_get_initial_context(*args):
+ return _selinux.security_get_initial_context(*args)
+security_get_initial_context = _selinux.security_get_initial_context
+
+def security_get_initial_context_raw(*args):
+ return _selinux.security_get_initial_context_raw(*args)
+security_get_initial_context_raw = _selinux.security_get_initial_context_raw
+
+def selinux_mkload_policy(*args):
+ return _selinux.selinux_mkload_policy(*args)
+selinux_mkload_policy = _selinux.selinux_mkload_policy
+
+def selinux_init_load_policy():
+ return _selinux.selinux_init_load_policy()
+selinux_init_load_policy = _selinux.selinux_init_load_policy
+class SELboolean(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, SELboolean, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, SELboolean, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["name"] = _selinux.SELboolean_name_set
+ __swig_getmethods__["name"] = _selinux.SELboolean_name_get
+ if _newclass:name = _swig_property(_selinux.SELboolean_name_get, _selinux.SELboolean_name_set)
+ __swig_setmethods__["value"] = _selinux.SELboolean_value_set
+ __swig_getmethods__["value"] = _selinux.SELboolean_value_get
+ if _newclass:value = _swig_property(_selinux.SELboolean_value_get, _selinux.SELboolean_value_set)
+ def __init__(self):
+ this = _selinux.new_SELboolean()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_SELboolean
+ __del__ = lambda self : None;
+SELboolean_swigregister = _selinux.SELboolean_swigregister
+SELboolean_swigregister(SELboolean)
+
+
+def security_set_boolean_list(*args):
+ return _selinux.security_set_boolean_list(*args)
+security_set_boolean_list = _selinux.security_set_boolean_list
+
+def security_load_booleans(*args):
+ return _selinux.security_load_booleans(*args)
+security_load_booleans = _selinux.security_load_booleans
+
+def security_check_context(*args):
+ return _selinux.security_check_context(*args)
+security_check_context = _selinux.security_check_context
+
+def security_check_context_raw(*args):
+ return _selinux.security_check_context_raw(*args)
+security_check_context_raw = _selinux.security_check_context_raw
+
+def security_canonicalize_context(*args):
+ return _selinux.security_canonicalize_context(*args)
+security_canonicalize_context = _selinux.security_canonicalize_context
+
+def security_canonicalize_context_raw(*args):
+ return _selinux.security_canonicalize_context_raw(*args)
+security_canonicalize_context_raw = _selinux.security_canonicalize_context_raw
+
+def security_getenforce():
+ return _selinux.security_getenforce()
+security_getenforce = _selinux.security_getenforce
+
+def security_setenforce(*args):
+ return _selinux.security_setenforce(*args)
+security_setenforce = _selinux.security_setenforce
+
+def security_deny_unknown():
+ return _selinux.security_deny_unknown()
+security_deny_unknown = _selinux.security_deny_unknown
+
+def security_disable():
+ return _selinux.security_disable()
+security_disable = _selinux.security_disable
+
+def security_policyvers():
+ return _selinux.security_policyvers()
+security_policyvers = _selinux.security_policyvers
+
+def security_get_boolean_names():
+ return _selinux.security_get_boolean_names()
+security_get_boolean_names = _selinux.security_get_boolean_names
+
+def security_get_boolean_pending(*args):
+ return _selinux.security_get_boolean_pending(*args)
+security_get_boolean_pending = _selinux.security_get_boolean_pending
+
+def security_get_boolean_active(*args):
+ return _selinux.security_get_boolean_active(*args)
+security_get_boolean_active = _selinux.security_get_boolean_active
+
+def security_set_boolean(*args):
+ return _selinux.security_set_boolean(*args)
+security_set_boolean = _selinux.security_set_boolean
+
+def security_commit_booleans():
+ return _selinux.security_commit_booleans()
+security_commit_booleans = _selinux.security_commit_booleans
+class security_class_mapping(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, security_class_mapping, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, security_class_mapping, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["name"] = _selinux.security_class_mapping_name_set
+ __swig_getmethods__["name"] = _selinux.security_class_mapping_name_get
+ if _newclass:name = _swig_property(_selinux.security_class_mapping_name_get, _selinux.security_class_mapping_name_set)
+ __swig_setmethods__["perms"] = _selinux.security_class_mapping_perms_set
+ __swig_getmethods__["perms"] = _selinux.security_class_mapping_perms_get
+ if _newclass:perms = _swig_property(_selinux.security_class_mapping_perms_get, _selinux.security_class_mapping_perms_set)
+ def __init__(self):
+ this = _selinux.new_security_class_mapping()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_security_class_mapping
+ __del__ = lambda self : None;
+security_class_mapping_swigregister = _selinux.security_class_mapping_swigregister
+security_class_mapping_swigregister(security_class_mapping)
+
+
+def selinux_set_mapping(*args):
+ return _selinux.selinux_set_mapping(*args)
+selinux_set_mapping = _selinux.selinux_set_mapping
+
+def mode_to_security_class(*args):
+ return _selinux.mode_to_security_class(*args)
+mode_to_security_class = _selinux.mode_to_security_class
+
+def string_to_security_class(*args):
+ return _selinux.string_to_security_class(*args)
+string_to_security_class = _selinux.string_to_security_class
+
+def security_class_to_string(*args):
+ return _selinux.security_class_to_string(*args)
+security_class_to_string = _selinux.security_class_to_string
+
+def security_av_perm_to_string(*args):
+ return _selinux.security_av_perm_to_string(*args)
+security_av_perm_to_string = _selinux.security_av_perm_to_string
+
+def string_to_av_perm(*args):
+ return _selinux.string_to_av_perm(*args)
+string_to_av_perm = _selinux.string_to_av_perm
+
+def security_av_string(*args):
+ return _selinux.security_av_string(*args)
+security_av_string = _selinux.security_av_string
+
+def print_access_vector(*args):
+ return _selinux.print_access_vector(*args)
+print_access_vector = _selinux.print_access_vector
+MATCHPATHCON_BASEONLY = _selinux.MATCHPATHCON_BASEONLY
+MATCHPATHCON_NOTRANS = _selinux.MATCHPATHCON_NOTRANS
+MATCHPATHCON_VALIDATE = _selinux.MATCHPATHCON_VALIDATE
+
+def set_matchpathcon_flags(*args):
+ return _selinux.set_matchpathcon_flags(*args)
+set_matchpathcon_flags = _selinux.set_matchpathcon_flags
+
+def matchpathcon_init(*args):
+ return _selinux.matchpathcon_init(*args)
+matchpathcon_init = _selinux.matchpathcon_init
+
+def matchpathcon_init_prefix(*args):
+ return _selinux.matchpathcon_init_prefix(*args)
+matchpathcon_init_prefix = _selinux.matchpathcon_init_prefix
+
+def matchpathcon_fini():
+ return _selinux.matchpathcon_fini()
+matchpathcon_fini = _selinux.matchpathcon_fini
+
+def realpath_not_final(*args):
+ return _selinux.realpath_not_final(*args)
+realpath_not_final = _selinux.realpath_not_final
+
+def matchpathcon(*args):
+ return _selinux.matchpathcon(*args)
+matchpathcon = _selinux.matchpathcon
+
+def matchpathcon_index(*args):
+ return _selinux.matchpathcon_index(*args)
+matchpathcon_index = _selinux.matchpathcon_index
+
+def matchpathcon_filespec_add(*args):
+ return _selinux.matchpathcon_filespec_add(*args)
+matchpathcon_filespec_add = _selinux.matchpathcon_filespec_add
+
+def matchpathcon_filespec_destroy():
+ return _selinux.matchpathcon_filespec_destroy()
+matchpathcon_filespec_destroy = _selinux.matchpathcon_filespec_destroy
+
+def matchpathcon_filespec_eval():
+ return _selinux.matchpathcon_filespec_eval()
+matchpathcon_filespec_eval = _selinux.matchpathcon_filespec_eval
+
+def matchpathcon_checkmatches(*args):
+ return _selinux.matchpathcon_checkmatches(*args)
+matchpathcon_checkmatches = _selinux.matchpathcon_checkmatches
+
+def matchmediacon(*args):
+ return _selinux.matchmediacon(*args)
+matchmediacon = _selinux.matchmediacon
+
+def selinux_getenforcemode():
+ return _selinux.selinux_getenforcemode()
+selinux_getenforcemode = _selinux.selinux_getenforcemode
+
+def selinux_boolean_sub(*args):
+ return _selinux.selinux_boolean_sub(*args)
+selinux_boolean_sub = _selinux.selinux_boolean_sub
+
+def selinux_getpolicytype():
+ return _selinux.selinux_getpolicytype()
+selinux_getpolicytype = _selinux.selinux_getpolicytype
+
+def selinux_policy_root():
+ return _selinux.selinux_policy_root()
+selinux_policy_root = _selinux.selinux_policy_root
+
+def selinux_set_policy_root(*args):
+ return _selinux.selinux_set_policy_root(*args)
+selinux_set_policy_root = _selinux.selinux_set_policy_root
+
+def selinux_current_policy_path():
+ return _selinux.selinux_current_policy_path()
+selinux_current_policy_path = _selinux.selinux_current_policy_path
+
+def selinux_binary_policy_path():
+ return _selinux.selinux_binary_policy_path()
+selinux_binary_policy_path = _selinux.selinux_binary_policy_path
+
+def selinux_failsafe_context_path():
+ return _selinux.selinux_failsafe_context_path()
+selinux_failsafe_context_path = _selinux.selinux_failsafe_context_path
+
+def selinux_removable_context_path():
+ return _selinux.selinux_removable_context_path()
+selinux_removable_context_path = _selinux.selinux_removable_context_path
+
+def selinux_default_context_path():
+ return _selinux.selinux_default_context_path()
+selinux_default_context_path = _selinux.selinux_default_context_path
+
+def selinux_user_contexts_path():
+ return _selinux.selinux_user_contexts_path()
+selinux_user_contexts_path = _selinux.selinux_user_contexts_path
+
+def selinux_file_context_path():
+ return _selinux.selinux_file_context_path()
+selinux_file_context_path = _selinux.selinux_file_context_path
+
+def selinux_file_context_homedir_path():
+ return _selinux.selinux_file_context_homedir_path()
+selinux_file_context_homedir_path = _selinux.selinux_file_context_homedir_path
+
+def selinux_file_context_local_path():
+ return _selinux.selinux_file_context_local_path()
+selinux_file_context_local_path = _selinux.selinux_file_context_local_path
+
+def selinux_file_context_subs_path():
+ return _selinux.selinux_file_context_subs_path()
+selinux_file_context_subs_path = _selinux.selinux_file_context_subs_path
+
+def selinux_file_context_subs_dist_path():
+ return _selinux.selinux_file_context_subs_dist_path()
+selinux_file_context_subs_dist_path = _selinux.selinux_file_context_subs_dist_path
+
+def selinux_homedir_context_path():
+ return _selinux.selinux_homedir_context_path()
+selinux_homedir_context_path = _selinux.selinux_homedir_context_path
+
+def selinux_media_context_path():
+ return _selinux.selinux_media_context_path()
+selinux_media_context_path = _selinux.selinux_media_context_path
+
+def selinux_virtual_domain_context_path():
+ return _selinux.selinux_virtual_domain_context_path()
+selinux_virtual_domain_context_path = _selinux.selinux_virtual_domain_context_path
+
+def selinux_virtual_image_context_path():
+ return _selinux.selinux_virtual_image_context_path()
+selinux_virtual_image_context_path = _selinux.selinux_virtual_image_context_path
+
+def selinux_lxc_contexts_path():
+ return _selinux.selinux_lxc_contexts_path()
+selinux_lxc_contexts_path = _selinux.selinux_lxc_contexts_path
+
+def selinux_x_context_path():
+ return _selinux.selinux_x_context_path()
+selinux_x_context_path = _selinux.selinux_x_context_path
+
+def selinux_sepgsql_context_path():
+ return _selinux.selinux_sepgsql_context_path()
+selinux_sepgsql_context_path = _selinux.selinux_sepgsql_context_path
+
+def selinux_systemd_contexts_path():
+ return _selinux.selinux_systemd_contexts_path()
+selinux_systemd_contexts_path = _selinux.selinux_systemd_contexts_path
+
+def selinux_contexts_path():
+ return _selinux.selinux_contexts_path()
+selinux_contexts_path = _selinux.selinux_contexts_path
+
+def selinux_securetty_types_path():
+ return _selinux.selinux_securetty_types_path()
+selinux_securetty_types_path = _selinux.selinux_securetty_types_path
+
+def selinux_booleans_subs_path():
+ return _selinux.selinux_booleans_subs_path()
+selinux_booleans_subs_path = _selinux.selinux_booleans_subs_path
+
+def selinux_booleans_path():
+ return _selinux.selinux_booleans_path()
+selinux_booleans_path = _selinux.selinux_booleans_path
+
+def selinux_customizable_types_path():
+ return _selinux.selinux_customizable_types_path()
+selinux_customizable_types_path = _selinux.selinux_customizable_types_path
+
+def selinux_users_path():
+ return _selinux.selinux_users_path()
+selinux_users_path = _selinux.selinux_users_path
+
+def selinux_usersconf_path():
+ return _selinux.selinux_usersconf_path()
+selinux_usersconf_path = _selinux.selinux_usersconf_path
+
+def selinux_translations_path():
+ return _selinux.selinux_translations_path()
+selinux_translations_path = _selinux.selinux_translations_path
+
+def selinux_colors_path():
+ return _selinux.selinux_colors_path()
+selinux_colors_path = _selinux.selinux_colors_path
+
+def selinux_netfilter_context_path():
+ return _selinux.selinux_netfilter_context_path()
+selinux_netfilter_context_path = _selinux.selinux_netfilter_context_path
+
+def selinux_path():
+ return _selinux.selinux_path()
+selinux_path = _selinux.selinux_path
+
+def selinux_check_access(*args):
+ return _selinux.selinux_check_access(*args)
+selinux_check_access = _selinux.selinux_check_access
+
+def selinux_check_passwd_access(*args):
+ return _selinux.selinux_check_passwd_access(*args)
+selinux_check_passwd_access = _selinux.selinux_check_passwd_access
+
+def checkPasswdAccess(*args):
+ return _selinux.checkPasswdAccess(*args)
+checkPasswdAccess = _selinux.checkPasswdAccess
+
+def selinux_check_securetty_context(*args):
+ return _selinux.selinux_check_securetty_context(*args)
+selinux_check_securetty_context = _selinux.selinux_check_securetty_context
+
+def set_selinuxmnt(*args):
+ return _selinux.set_selinuxmnt(*args)
+set_selinuxmnt = _selinux.set_selinuxmnt
+
+def selinuxfs_exists():
+ return _selinux.selinuxfs_exists()
+selinuxfs_exists = _selinux.selinuxfs_exists
+
+def fini_selinuxmnt():
+ return _selinux.fini_selinuxmnt()
+fini_selinuxmnt = _selinux.fini_selinuxmnt
+
+def setexecfilecon(*args):
+ return _selinux.setexecfilecon(*args)
+setexecfilecon = _selinux.setexecfilecon
+
+def rpm_execcon(*args):
+ return _selinux.rpm_execcon(*args)
+rpm_execcon = _selinux.rpm_execcon
+
+def is_context_customizable(*args):
+ return _selinux.is_context_customizable(*args)
+is_context_customizable = _selinux.is_context_customizable
+
+def selinux_trans_to_raw_context(*args):
+ return _selinux.selinux_trans_to_raw_context(*args)
+selinux_trans_to_raw_context = _selinux.selinux_trans_to_raw_context
+
+def selinux_raw_to_trans_context(*args):
+ return _selinux.selinux_raw_to_trans_context(*args)
+selinux_raw_to_trans_context = _selinux.selinux_raw_to_trans_context
+
+def selinux_raw_context_to_color(*args):
+ return _selinux.selinux_raw_context_to_color(*args)
+selinux_raw_context_to_color = _selinux.selinux_raw_context_to_color
+
+def getseuserbyname(*args):
+ return _selinux.getseuserbyname(*args)
+getseuserbyname = _selinux.getseuserbyname
+
+def getseuser(*args):
+ return _selinux.getseuser(*args)
+getseuser = _selinux.getseuser
+
+def selinux_file_context_cmp(*args):
+ return _selinux.selinux_file_context_cmp(*args)
+selinux_file_context_cmp = _selinux.selinux_file_context_cmp
+
+def selinux_file_context_verify(*args):
+ return _selinux.selinux_file_context_verify(*args)
+selinux_file_context_verify = _selinux.selinux_file_context_verify
+
+def selinux_lsetfilecon_default(*args):
+ return _selinux.selinux_lsetfilecon_default(*args)
+selinux_lsetfilecon_default = _selinux.selinux_lsetfilecon_default
+
+def selinux_reset_config():
+ return _selinux.selinux_reset_config()
+selinux_reset_config = _selinux.selinux_reset_config
+# This file is compatible with both classic and new-style classes.
+
+
diff --git a/lib/python2.7/site-packages/selinux/_selinux.so b/lib/python2.7/site-packages/selinux/_selinux.so
new file mode 100755
index 0000000..f4a045f
--- /dev/null
+++ b/lib/python2.7/site-packages/selinux/_selinux.so
Binary files differ
diff --git a/lib/python2.7/site-packages/selinux/audit2why.so b/lib/python2.7/site-packages/selinux/audit2why.so
new file mode 100755
index 0000000..e7daaab
--- /dev/null
+++ b/lib/python2.7/site-packages/selinux/audit2why.so
Binary files differ
diff --git a/lib/python2.7/site-packages/sepolgen/__init__.py b/lib/python2.7/site-packages/sepolgen/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/python2.7/site-packages/sepolgen/__init__.py
diff --git a/lib/python2.7/site-packages/sepolgen/access.py b/lib/python2.7/site-packages/sepolgen/access.py
new file mode 100644
index 0000000..cf13210
--- /dev/null
+++ b/lib/python2.7/site-packages/sepolgen/access.py
@@ -0,0 +1,331 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+"""
+Classes representing basic access.
+
+SELinux - at the most basic level - represents access as
+the 4-tuple subject (type or context), target (type or context),
+object class, permission. The policy language elaborates this basic
+access to faciliate more concise rules (e.g., allow rules can have multiple
+source or target types - see refpolicy for more information).
+
+This module has objects for representing the most basic access (AccessVector)
+and sets of that access (AccessVectorSet). These objects are used in Madison
+in a variety of ways, but they are the fundamental representation of access.
+"""
+
+import refpolicy
+from selinux import audit2why
+
+def is_idparam(id):
+ """Determine if an id is a paramater in the form $N, where N is
+ an integer.
+
+ Returns:
+ True if the id is a paramater
+ False if the id is not a paramater
+ """
+ if len(id) > 1 and id[0] == '$':
+ try:
+ int(id[1:])
+ except ValueError:
+ return False
+ return True
+ else:
+ return False
+
+class AccessVector:
+ """
+ An access vector is the basic unit of access in SELinux.
+
+ Access vectors are the most basic representation of access within
+ SELinux. It represents the access a source type has to a target
+ type in terms of an object class and a set of permissions.
+
+ Access vectors are distinct from AVRules in that they can only
+ store a single source type, target type, and object class. The
+ simplicity of AccessVectors makes them useful for storing access
+ in a form that is easy to search and compare.
+
+ The source, target, and object are stored as string. No checking
+ done to verify that the strings are valid SELinux identifiers.
+ Identifiers in the form $N (where N is an integer) are reserved as
+ interface parameters and are treated as wild cards in many
+ circumstances.
+
+ Properties:
+ .src_type - The source type allowed access. [String or None]
+ .tgt_type - The target type to which access is allowed. [String or None]
+ .obj_class - The object class to which access is allowed. [String or None]
+ .perms - The permissions allowed to the object class. [IdSet]
+ .audit_msgs - The audit messages that generated this access vector [List of strings]
+ """
+ def __init__(self, init_list=None):
+ if init_list:
+ self.from_list(init_list)
+ else:
+ self.src_type = None
+ self.tgt_type = None
+ self.obj_class = None
+ self.perms = refpolicy.IdSet()
+ self.audit_msgs = []
+ self.type = audit2why.TERULE
+ self.data = []
+
+ # The direction of the information flow represented by this
+ # access vector - used for matching
+ self.info_flow_dir = None
+
+ def from_list(self, list):
+ """Initialize an access vector from a list.
+
+ Initialize an access vector from a list treating the list as
+ positional arguments - i.e., 0 = src_type, 1 = tgt_type, etc.
+ All of the list elements 3 and greater are treated as perms.
+ For example, the list ['foo_t', 'bar_t', 'file', 'read', 'write']
+ would create an access vector list with the source type 'foo_t',
+ target type 'bar_t', object class 'file', and permissions 'read'
+ and 'write'.
+
+ This format is useful for very simple storage to strings or disc
+ (see to_list) and for initializing access vectors.
+ """
+ if len(list) < 4:
+ raise ValueError("List must contain at least four elements %s" % str(list))
+ self.src_type = list[0]
+ self.tgt_type = list[1]
+ self.obj_class = list[2]
+ self.perms = refpolicy.IdSet(list[3:])
+
+ def to_list(self):
+ """
+ Convert an access vector to a list.
+
+ Convert an access vector to a list treating the list as positional
+ values. See from_list for more information on how an access vector
+ is represented in a list.
+ """
+ l = [self.src_type, self.tgt_type, self.obj_class]
+ l.extend(self.perms)
+ return l
+
+ def __str__(self):
+ return self.to_string()
+
+ def to_string(self):
+ return "allow %s %s:%s %s;" % (self.src_type, self.tgt_type,
+ self.obj_class, self.perms.to_space_str())
+
+ def __cmp__(self, other):
+ if self.src_type != other.src_type:
+ return cmp(self.src_type, other.src_type)
+ if self.tgt_type != other.tgt_type:
+ return cmp(self.tgt_type, other.tgt_type)
+ if self.obj_class != self.obj_class:
+ return cmp(self.obj_class, other.obj_class)
+ if len(self.perms) != len(other.perms):
+ return cmp(len(self.perms), len(other.perms))
+ x = list(self.perms)
+ x.sort()
+ y = list(other.perms)
+ y.sort()
+ for pa, pb in zip(x, y):
+ if pa != pb:
+ return cmp(pa, pb)
+ return 0
+
+def avrule_to_access_vectors(avrule):
+ """Convert an avrule into a list of access vectors.
+
+ AccessVectors and AVRules are similary, but differ in that
+ an AVRule can more than one source type, target type, and
+ object class. This function expands a single avrule into a
+ list of one or more AccessVectors representing the access
+ defined in the AVRule.
+
+
+ """
+ if isinstance(avrule, AccessVector):
+ return [avrule]
+ a = []
+ for src_type in avrule.src_types:
+ for tgt_type in avrule.tgt_types:
+ for obj_class in avrule.obj_classes:
+ access = AccessVector()
+ access.src_type = src_type
+ access.tgt_type = tgt_type
+ access.obj_class = obj_class
+ access.perms = avrule.perms.copy()
+ a.append(access)
+ return a
+
+class AccessVectorSet:
+ """A non-overlapping set of access vectors.
+
+ An AccessVectorSet is designed to store one or more access vectors
+ that are non-overlapping. Access can be added to the set
+ incrementally and access vectors will be added or merged as
+ necessary. For example, adding the following access vectors using
+ add_av:
+ allow $1 etc_t : read;
+ allow $1 etc_t : write;
+ allow $1 var_log_t : read;
+ Would result in an access vector set with the access vectors:
+ allow $1 etc_t : { read write};
+ allow $1 var_log_t : read;
+ """
+ def __init__(self):
+ """Initialize an access vector set.
+ """
+ self.src = {}
+ # The information flow direction of this access vector
+ # set - see objectmodel.py for more information. This
+ # stored here to speed up searching - see matching.py.
+ self.info_dir = None
+
+ def __iter__(self):
+ """Iterate over all of the unique access vectors in the set."""
+ for tgts in self.src.values():
+ for objs in tgts.values():
+ for av in objs.values():
+ yield av
+
+ def __len__(self):
+ """Return the number of unique access vectors in the set.
+
+ Because of the inernal representation of the access vector set,
+ __len__ is not a constant time operation. Worst case is O(N)
+ where N is the number of unique access vectors, but the common
+ case is probably better.
+ """
+ l = 0
+ for tgts in self.src.values():
+ for objs in tgts.values():
+ l += len(objs)
+ return l
+
+ def to_list(self):
+ """Return the unique access vectors in the set as a list.
+
+ The format of the returned list is a set of nested lists,
+ each access vector represented by a list. This format is
+ designed to be simply serializable to a file.
+
+ For example, consider an access vector set with the following
+ access vectors:
+ allow $1 user_t : file read;
+ allow $1 etc_t : file { read write};
+ to_list would return the following:
+ [[$1, user_t, file, read]
+ [$1, etc_t, file, read, write]]
+
+ See AccessVector.to_list for more information.
+ """
+ l = []
+ for av in self:
+ l.append(av.to_list())
+
+ return l
+
+ def from_list(self, l):
+ """Add access vectors stored in a list.
+
+ See to list for more information on the list format that this
+ method accepts.
+
+ This will add all of the access from the list. Any existing
+ access vectors in the set will be retained.
+ """
+ for av in l:
+ self.add_av(AccessVector(av))
+
+ def add(self, src_type, tgt_type, obj_class, perms, audit_msg=None, avc_type=audit2why.TERULE, data=[]):
+ """Add an access vector to the set.
+ """
+ tgt = self.src.setdefault(src_type, { })
+ cls = tgt.setdefault(tgt_type, { })
+
+ if cls.has_key((obj_class, avc_type)):
+ access = cls[obj_class, avc_type]
+ else:
+ access = AccessVector()
+ access.src_type = src_type
+ access.tgt_type = tgt_type
+ access.obj_class = obj_class
+ access.data = data
+ access.type = avc_type
+ cls[obj_class, avc_type] = access
+
+ access.perms.update(perms)
+ if audit_msg:
+ access.audit_msgs.append(audit_msg)
+
+ def add_av(self, av, audit_msg=None):
+ """Add an access vector to the set."""
+ self.add(av.src_type, av.tgt_type, av.obj_class, av.perms)
+
+
+def avs_extract_types(avs):
+ types = refpolicy.IdSet()
+ for av in avs:
+ types.add(av.src_type)
+ types.add(av.tgt_type)
+
+ return types
+
+def avs_extract_obj_perms(avs):
+ perms = { }
+ for av in avs:
+ if perms.has_key(av.obj_class):
+ s = perms[av.obj_class]
+ else:
+ s = refpolicy.IdSet()
+ perms[av.obj_class] = s
+ s.update(av.perms)
+ return perms
+
+class RoleTypeSet:
+ """A non-overlapping set of role type statements.
+
+ This clas allows the incremental addition of role type statements and
+ maintains a non-overlapping list of statements.
+ """
+ def __init__(self):
+ """Initialize an access vector set."""
+ self.role_types = {}
+
+ def __iter__(self):
+ """Iterate over all of the unique role allows statements in the set."""
+ for role_type in self.role_types.values():
+ yield role_type
+
+ def __len__(self):
+ """Return the unique number of role allow statements."""
+ return len(self.role_types.keys())
+
+ def add(self, role, type):
+ if self.role_types.has_key(role):
+ role_type = self.role_types[role]
+ else:
+ role_type = refpolicy.RoleType()
+ role_type.role = role
+ self.role_types[role] = role_type
+
+ role_type.types.add(type)
diff --git a/lib/python2.7/site-packages/sepolgen/audit.py b/lib/python2.7/site-packages/sepolgen/audit.py
new file mode 100644
index 0000000..56919be
--- /dev/null
+++ b/lib/python2.7/site-packages/sepolgen/audit.py
@@ -0,0 +1,549 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+import refpolicy
+import access
+import re
+import sys
+
+# Convenience functions
+
+def get_audit_boot_msgs():
+ """Obtain all of the avc and policy load messages from the audit
+ log. This function uses ausearch and requires that the current
+ process have sufficient rights to run ausearch.
+
+ Returns:
+ string contain all of the audit messages returned by ausearch.
+ """
+ import subprocess
+ import time
+ fd=open("/proc/uptime", "r")
+ off=float(fd.read().split()[0])
+ fd.close
+ s = time.localtime(time.time() - off)
+ bootdate = time.strftime("%x", s)
+ boottime = time.strftime("%X", s)
+ output = subprocess.Popen(["/sbin/ausearch", "-m", "AVC,USER_AVC,MAC_POLICY_LOAD,DAEMON_START,SELINUX_ERR", "-ts", bootdate, boottime],
+ stdout=subprocess.PIPE).communicate()[0]
+ return output
+
+def get_audit_msgs():
+ """Obtain all of the avc and policy load messages from the audit
+ log. This function uses ausearch and requires that the current
+ process have sufficient rights to run ausearch.
+
+ Returns:
+ string contain all of the audit messages returned by ausearch.
+ """
+ import subprocess
+ output = subprocess.Popen(["/sbin/ausearch", "-m", "AVC,USER_AVC,MAC_POLICY_LOAD,DAEMON_START,SELINUX_ERR"],
+ stdout=subprocess.PIPE).communicate()[0]
+ return output
+
+def get_dmesg_msgs():
+ """Obtain all of the avc and policy load messages from /bin/dmesg.
+
+ Returns:
+ string contain all of the audit messages returned by dmesg.
+ """
+ import subprocess
+ output = subprocess.Popen(["/bin/dmesg"],
+ stdout=subprocess.PIPE).communicate()[0]
+ return output
+
+# Classes representing audit messages
+
+class AuditMessage:
+ """Base class for all objects representing audit messages.
+
+ AuditMessage is a base class for all audit messages and only
+ provides storage for the raw message (as a string) and a
+ parsing function that does nothing.
+ """
+ def __init__(self, message):
+ self.message = message
+ self.header = ""
+
+ def from_split_string(self, recs):
+ """Parse a string that has been split into records by space into
+ an audit message.
+
+ This method should be overridden by subclasses. Error reporting
+ should be done by raise ValueError exceptions.
+ """
+ for msg in recs:
+ fields = msg.split("=")
+ if len(fields) != 2:
+ if msg[:6] == "audit(":
+ self.header = msg
+ return
+ else:
+ continue
+
+ if fields[0] == "msg":
+ self.header = fields[1]
+ return
+
+
+class InvalidMessage(AuditMessage):
+ """Class representing invalid audit messages. This is used to differentiate
+ between audit messages that aren't recognized (that should return None from
+ the audit message parser) and a message that is recognized but is malformed
+ in some way.
+ """
+ def __init__(self, message):
+ AuditMessage.__init__(self, message)
+
+class PathMessage(AuditMessage):
+ """Class representing a path message"""
+ def __init__(self, message):
+ AuditMessage.__init__(self, message)
+ self.path = ""
+
+ def from_split_string(self, recs):
+ AuditMessage.from_split_string(self, recs)
+
+ for msg in recs:
+ fields = msg.split("=")
+ if len(fields) != 2:
+ continue
+ if fields[0] == "path":
+ self.path = fields[1][1:-1]
+ return
+import selinux.audit2why as audit2why
+
+avcdict = {}
+
+class AVCMessage(AuditMessage):
+ """AVC message representing an access denial or granted message.
+
+ This is a very basic class and does not represent all possible fields
+ in an avc message. Currently the fields are:
+ scontext - context for the source (process) that generated the message
+ tcontext - context for the target
+ tclass - object class for the target (only one)
+ comm - the process name
+ exe - the on-disc binary
+ path - the path of the target
+ access - list of accesses that were allowed or denied
+ denial - boolean indicating whether this was a denial (True) or granted
+ (False) message.
+
+ An example audit message generated from the audit daemon looks like (line breaks
+ added):
+ 'type=AVC msg=audit(1155568085.407:10877): avc: denied { search } for
+ pid=677 comm="python" name="modules" dev=dm-0 ino=13716388
+ scontext=user_u:system_r:setroubleshootd_t:s0
+ tcontext=system_u:object_r:modules_object_t:s0 tclass=dir'
+
+ An example audit message stored in syslog (not processed by the audit daemon - line
+ breaks added):
+ 'Sep 12 08:26:43 dhcp83-5 kernel: audit(1158064002.046:4): avc: denied { read }
+ for pid=2 496 comm="bluez-pin" name=".gdm1K3IFT" dev=dm-0 ino=3601333
+ scontext=user_u:system_r:bluetooth_helper_t:s0-s0:c0
+ tcontext=system_u:object_r:xdm_tmp_t:s0 tclass=file
+ """
+ def __init__(self, message):
+ AuditMessage.__init__(self, message)
+ self.scontext = refpolicy.SecurityContext()
+ self.tcontext = refpolicy.SecurityContext()
+ self.tclass = ""
+ self.comm = ""
+ self.exe = ""
+ self.path = ""
+ self.name = ""
+ self.accesses = []
+ self.denial = True
+ self.type = audit2why.TERULE
+
+ def __parse_access(self, recs, start):
+ # This is kind of sucky - the access that is in a space separated
+ # list like '{ read write }'. This doesn't fit particularly well with splitting
+ # the string on spaces. This function takes the list of recs and a starting
+ # position one beyond the open brace. It then adds the accesses until it finds
+ # the close brace or the end of the list (which is an error if reached without
+ # seeing a close brace).
+ found_close = False
+ i = start
+ if i == (len(recs) - 1):
+ raise ValueError("AVC message in invalid format [%s]\n" % self.message)
+ while i < len(recs):
+ if recs[i] == "}":
+ found_close = True
+ break
+ self.accesses.append(recs[i])
+ i = i + 1
+ if not found_close:
+ raise ValueError("AVC message in invalid format [%s]\n" % self.message)
+ return i + 1
+
+
+ def from_split_string(self, recs):
+ AuditMessage.from_split_string(self, recs)
+ # FUTURE - fully parse avc messages and store all possible fields
+ # Required fields
+ found_src = False
+ found_tgt = False
+ found_class = False
+ found_access = False
+
+ for i in range(len(recs)):
+ if recs[i] == "{":
+ i = self.__parse_access(recs, i + 1)
+ found_access = True
+ continue
+ elif recs[i] == "granted":
+ self.denial = False
+
+ fields = recs[i].split("=")
+ if len(fields) != 2:
+ continue
+ if fields[0] == "scontext":
+ self.scontext = refpolicy.SecurityContext(fields[1])
+ found_src = True
+ elif fields[0] == "tcontext":
+ self.tcontext = refpolicy.SecurityContext(fields[1])
+ found_tgt = True
+ elif fields[0] == "tclass":
+ self.tclass = fields[1]
+ found_class = True
+ elif fields[0] == "comm":
+ self.comm = fields[1][1:-1]
+ elif fields[0] == "exe":
+ self.exe = fields[1][1:-1]
+ elif fields[0] == "name":
+ self.name = fields[1][1:-1]
+
+ if not found_src or not found_tgt or not found_class or not found_access:
+ raise ValueError("AVC message in invalid format [%s]\n" % self.message)
+ self.analyze()
+
+ def analyze(self):
+ tcontext = self.tcontext.to_string()
+ scontext = self.scontext.to_string()
+ access_tuple = tuple( self.accesses)
+ self.data = []
+
+ if (scontext, tcontext, self.tclass, access_tuple) in avcdict.keys():
+ self.type, self.data = avcdict[(scontext, tcontext, self.tclass, access_tuple)]
+ else:
+ self.type, self.data = audit2why.analyze(scontext, tcontext, self.tclass, self.accesses);
+ if self.type == audit2why.NOPOLICY:
+ self.type = audit2why.TERULE
+ if self.type == audit2why.BADTCON:
+ raise ValueError("Invalid Target Context %s\n" % tcontext)
+ if self.type == audit2why.BADSCON:
+ raise ValueError("Invalid Source Context %s\n" % scontext)
+ if self.type == audit2why.BADSCON:
+ raise ValueError("Invalid Type Class %s\n" % self.tclass)
+ if self.type == audit2why.BADPERM:
+ raise ValueError("Invalid permission %s\n" % " ".join(self.accesses))
+ if self.type == audit2why.BADCOMPUTE:
+ raise ValueError("Error during access vector computation")
+
+ if self.type == audit2why.CONSTRAINT:
+ self.data = [ self.data ]
+ if self.scontext.user != self.tcontext.user:
+ self.data.append(("user (%s)" % self.scontext.user, 'user (%s)' % self.tcontext.user))
+ if self.scontext.role != self.tcontext.role and self.tcontext.role != "object_r":
+ self.data.append(("role (%s)" % self.scontext.role, 'role (%s)' % self.tcontext.role))
+ if self.scontext.level != self.tcontext.level:
+ self.data.append(("level (%s)" % self.scontext.level, 'level (%s)' % self.tcontext.level))
+
+ avcdict[(scontext, tcontext, self.tclass, access_tuple)] = (self.type, self.data)
+
+class PolicyLoadMessage(AuditMessage):
+ """Audit message indicating that the policy was reloaded."""
+ def __init__(self, message):
+ AuditMessage.__init__(self, message)
+
+class DaemonStartMessage(AuditMessage):
+ """Audit message indicating that a daemon was started."""
+ def __init__(self, message):
+ AuditMessage.__init__(self, message)
+ self.auditd = False
+
+ def from_split_string(self, recs):
+ AuditMessage.from_split_string(self, recs)
+ if "auditd" in recs:
+ self.auditd = True
+
+
+class ComputeSidMessage(AuditMessage):
+ """Audit message indicating that a sid was not valid.
+
+ Compute sid messages are generated on attempting to create a security
+ context that is not valid. Security contexts are invalid if the role is
+ not authorized for the user or the type is not authorized for the role.
+
+ This class does not store all of the fields from the compute sid message -
+ just the type and role.
+ """
+ def __init__(self, message):
+ AuditMessage.__init__(self, message)
+ self.invalid_context = refpolicy.SecurityContext()
+ self.scontext = refpolicy.SecurityContext()
+ self.tcontext = refpolicy.SecurityContext()
+ self.tclass = ""
+
+ def from_split_string(self, recs):
+ AuditMessage.from_split_string(self, recs)
+ if len(recs) < 10:
+ raise ValueError("Split string does not represent a valid compute sid message")
+
+ try:
+ self.invalid_context = refpolicy.SecurityContext(recs[5])
+ self.scontext = refpolicy.SecurityContext(recs[7].split("=")[1])
+ self.tcontext = refpolicy.SecurityContext(recs[8].split("=")[1])
+ self.tclass = recs[9].split("=")[1]
+ except:
+ raise ValueError("Split string does not represent a valid compute sid message")
+ def output(self):
+ return "role %s types %s;\n" % (self.role, self.type)
+
+# Parser for audit messages
+
+class AuditParser:
+ """Parser for audit messages.
+
+ This class parses audit messages and stores them according to their message
+ type. This is not a general purpose audit message parser - it only extracts
+ selinux related messages.
+
+ Each audit messages are stored in one of four lists:
+ avc_msgs - avc denial or granted messages. Messages are stored in
+ AVCMessage objects.
+ comput_sid_messages - invalid sid messages. Messages are stored in
+ ComputSidMessage objects.
+ invalid_msgs - selinux related messages that are not valid. Messages
+ are stored in InvalidMessageObjects.
+ policy_load_messages - policy load messages. Messages are stored in
+ PolicyLoadMessage objects.
+
+ These lists will be reset when a policy load message is seen if
+ AuditParser.last_load_only is set to true. It is assumed that messages
+ are fed to the parser in chronological order - time stamps are not
+ parsed.
+ """
+ def __init__(self, last_load_only=False):
+ self.__initialize()
+ self.last_load_only = last_load_only
+
+ def __initialize(self):
+ self.avc_msgs = []
+ self.compute_sid_msgs = []
+ self.invalid_msgs = []
+ self.policy_load_msgs = []
+ self.path_msgs = []
+ self.by_header = { }
+ self.check_input_file = False
+
+ # Low-level parsing function - tries to determine if this audit
+ # message is an SELinux related message and then parses it into
+ # the appropriate AuditMessage subclass. This function deliberately
+ # does not impose policy (e.g., on policy load message) or store
+ # messages to make as simple and reusable as possible.
+ #
+ # Return values:
+ # None - no recognized audit message found in this line
+ #
+ # InvalidMessage - a recognized but invalid message was found.
+ #
+ # AuditMessage (or subclass) - object representing a parsed
+ # and valid audit message.
+ def __parse_line(self, line):
+ rec = line.split()
+ for i in rec:
+ found = False
+ if i == "avc:" or i == "message=avc:" or i == "msg='avc:":
+ msg = AVCMessage(line)
+ found = True
+ elif i == "security_compute_sid:":
+ msg = ComputeSidMessage(line)
+ found = True
+ elif i == "type=MAC_POLICY_LOAD" or i == "type=1403":
+ msg = PolicyLoadMessage(line)
+ found = True
+ elif i == "type=AVC_PATH":
+ msg = PathMessage(line)
+ found = True
+ elif i == "type=DAEMON_START":
+ msg = DaemonStartMessage(list)
+ found = True
+
+ if found:
+ self.check_input_file = True
+ try:
+ msg.from_split_string(rec)
+ except ValueError:
+ msg = InvalidMessage(line)
+ return msg
+ return None
+
+ # Higher-level parse function - take a line, parse it into an
+ # AuditMessage object, and store it in the appropriate list.
+ # This function will optionally reset all of the lists when
+ # it sees a load policy message depending on the value of
+ # self.last_load_only.
+ def __parse(self, line):
+ msg = self.__parse_line(line)
+ if msg is None:
+ return
+
+ # Append to the correct list
+ if isinstance(msg, PolicyLoadMessage):
+ if self.last_load_only:
+ self.__initialize()
+ elif isinstance(msg, DaemonStartMessage):
+ # We initialize every time the auditd is started. This
+ # is less than ideal, but unfortunately it is the only
+ # way to catch reboots since the initial policy load
+ # by init is not stored in the audit log.
+ if msg.auditd and self.last_load_only:
+ self.__initialize()
+ self.policy_load_msgs.append(msg)
+ elif isinstance(msg, AVCMessage):
+ self.avc_msgs.append(msg)
+ elif isinstance(msg, ComputeSidMessage):
+ self.compute_sid_msgs.append(msg)
+ elif isinstance(msg, InvalidMessage):
+ self.invalid_msgs.append(msg)
+ elif isinstance(msg, PathMessage):
+ self.path_msgs.append(msg)
+
+ # Group by audit header
+ if msg.header != "":
+ if self.by_header.has_key(msg.header):
+ self.by_header[msg.header].append(msg)
+ else:
+ self.by_header[msg.header] = [msg]
+
+
+ # Post processing will add additional information from AVC messages
+ # from related messages - only works on messages generated by
+ # the audit system.
+ def __post_process(self):
+ for value in self.by_header.values():
+ avc = []
+ path = None
+ for msg in value:
+ if isinstance(msg, PathMessage):
+ path = msg
+ elif isinstance(msg, AVCMessage):
+ avc.append(msg)
+ if len(avc) > 0 and path:
+ for a in avc:
+ a.path = path.path
+
+ def parse_file(self, input):
+ """Parse the contents of a file object. This method can be called
+ multiple times (along with parse_string)."""
+ line = input.readline()
+ while line:
+ self.__parse(line)
+ line = input.readline()
+ if not self.check_input_file:
+ sys.stderr.write("Nothing to do\n")
+ sys.exit(0)
+ self.__post_process()
+
+ def parse_string(self, input):
+ """Parse a string containing audit messages - messages should
+ be separated by new lines. This method can be called multiple
+ times (along with parse_file)."""
+ lines = input.split('\n')
+ for l in lines:
+ self.__parse(l)
+ self.__post_process()
+
+ def to_role(self, role_filter=None):
+ """Return RoleAllowSet statements matching the specified filter
+
+ Filter out types that match the filer, or all roles
+
+ Params:
+ role_filter - [optional] Filter object used to filter the
+ output.
+ Returns:
+ Access vector set representing the denied access in the
+ audit logs parsed by this object.
+ """
+ role_types = access.RoleTypeSet()
+ for cs in self.compute_sid_msgs:
+ if not role_filter or role_filter.filter(cs):
+ role_types.add(cs.invalid_context.role, cs.invalid_context.type)
+
+ return role_types
+
+ def to_access(self, avc_filter=None, only_denials=True):
+ """Convert the audit logs access into a an access vector set.
+
+ Convert the audit logs into an access vector set, optionally
+ filtering the restults with the passed in filter object.
+
+ Filter objects are object instances with a .filter method
+ that takes and access vector and returns True if the message
+ should be included in the final output and False otherwise.
+
+ Params:
+ avc_filter - [optional] Filter object used to filter the
+ output.
+ Returns:
+ Access vector set representing the denied access in the
+ audit logs parsed by this object.
+ """
+ av_set = access.AccessVectorSet()
+ for avc in self.avc_msgs:
+ if avc.denial != True and only_denials:
+ continue
+ if avc_filter:
+ if avc_filter.filter(avc):
+ av_set.add(avc.scontext.type, avc.tcontext.type, avc.tclass,
+ avc.accesses, avc, avc_type=avc.type, data=avc.data)
+ else:
+ av_set.add(avc.scontext.type, avc.tcontext.type, avc.tclass,
+ avc.accesses, avc, avc_type=avc.type, data=avc.data)
+ return av_set
+
+class AVCTypeFilter:
+ def __init__(self, regex):
+ self.regex = re.compile(regex)
+
+ def filter(self, avc):
+ if self.regex.match(avc.scontext.type):
+ return True
+ if self.regex.match(avc.tcontext.type):
+ return True
+ return False
+
+class ComputeSidTypeFilter:
+ def __init__(self, regex):
+ self.regex = re.compile(regex)
+
+ def filter(self, avc):
+ if self.regex.match(avc.invalid_context.type):
+ return True
+ if self.regex.match(avc.scontext.type):
+ return True
+ if self.regex.match(avc.tcontext.type):
+ return True
+ return False
+
+
diff --git a/lib/python2.7/site-packages/sepolgen/classperms.py b/lib/python2.7/site-packages/sepolgen/classperms.py
new file mode 100644
index 0000000..c925dee
--- /dev/null
+++ b/lib/python2.7/site-packages/sepolgen/classperms.py
@@ -0,0 +1,116 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+import sys
+
+tokens = ('DEFINE',
+ 'NAME',
+ 'TICK',
+ 'SQUOTE',
+ 'OBRACE',
+ 'CBRACE',
+ 'SEMI',
+ 'OPAREN',
+ 'CPAREN',
+ 'COMMA')
+
+reserved = {
+ 'define' : 'DEFINE' }
+
+t_TICK = r'\`'
+t_SQUOTE = r'\''
+t_OBRACE = r'\{'
+t_CBRACE = r'\}'
+t_SEMI = r'\;'
+t_OPAREN = r'\('
+t_CPAREN = r'\)'
+t_COMMA = r'\,'
+
+t_ignore = " \t\n"
+
+def t_NAME(t):
+ r'[a-zA-Z_][a-zA-Z0-9_]*'
+ t.type = reserved.get(t.value,'NAME')
+ return t
+
+def t_error(t):
+ print "Illegal character '%s'" % t.value[0]
+ t.skip(1)
+
+import lex
+lex.lex()
+
+def p_statements(p):
+ '''statements : define_stmt
+ | define_stmt statements
+ '''
+ if len(p) == 2:
+ p[0] = [p[1]]
+ else:
+ p[0] = [p[1]] + [p[2]]
+
+def p_define_stmt(p):
+ # This sucks - corresponds to 'define(`foo',`{ read write }')
+ '''define_stmt : DEFINE OPAREN TICK NAME SQUOTE COMMA TICK list SQUOTE CPAREN
+ '''
+
+ p[0] = [p[4], p[8]]
+
+def p_list(p):
+ '''list : NAME
+ | OBRACE names CBRACE
+ '''
+ if p[1] == "{":
+ p[0] = p[2]
+ else:
+ p[0] = [p[1]]
+
+def p_names(p):
+ '''names : NAME
+ | NAME names
+ '''
+ if len(p) == 2:
+ p[0] = [p[1]]
+ else:
+ p[0] = [p[1]] + p[2]
+
+def p_error(p):
+ print "Syntax error on line %d %s [type=%s]" % (p.lineno, p.value, p.type)
+
+import yacc
+yacc.yacc()
+
+
+f = open("all_perms.spt")
+txt = f.read()
+f.close()
+
+#lex.input(txt)
+#while 1:
+# tok = lex.token()
+# if not tok:
+# break
+# print tok
+
+test = "define(`foo',`{ read write append }')"
+test2 = """define(`all_filesystem_perms',`{ mount remount unmount getattr relabelfrom relabelto transition associate quotamod quotaget }')
+define(`all_security_perms',`{ compute_av compute_create compute_member check_context load_policy compute_relabel compute_user setenforce setbool setsecparam setcheckreqprot }')
+"""
+result = yacc.parse(txt)
+print result
+
diff --git a/lib/python2.7/site-packages/sepolgen/defaults.py b/lib/python2.7/site-packages/sepolgen/defaults.py
new file mode 100644
index 0000000..218bc7c
--- /dev/null
+++ b/lib/python2.7/site-packages/sepolgen/defaults.py
@@ -0,0 +1,77 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+import os
+import re
+
+# Select the correct location for the development files based on a
+# path variable (optionally read from a configuration file)
+class PathChoooser(object):
+ def __init__(self, pathname):
+ self.config = dict()
+ if not os.path.exists(pathname):
+ self.config_pathname = "(defaults)"
+ self.config["SELINUX_DEVEL_PATH"] = "/usr/share/selinux/default:/usr/share/selinux/mls:/usr/share/selinux/devel"
+ return
+ self.config_pathname = pathname
+ ignore = re.compile(r"^\s*(?:#.+)?$")
+ consider = re.compile(r"^\s*(\w+)\s*=\s*(.+?)\s*$")
+ for lineno, line in enumerate(open(pathname)):
+ if ignore.match(line): continue
+ mo = consider.match(line)
+ if not mo:
+ raise ValueError, "%s:%d: line is not in key = value format" % (pathname, lineno+1)
+ self.config[mo.group(1)] = mo.group(2)
+
+ # We're only exporting one useful function, so why not be a function
+ def __call__(self, testfilename, pathset="SELINUX_DEVEL_PATH"):
+ paths = self.config.get(pathset, None)
+ if paths is None:
+ raise ValueError, "%s was not in %s" % (pathset, self.config_pathname)
+ paths = paths.split(":")
+ for p in paths:
+ target = os.path.join(p, testfilename)
+ if os.path.exists(target): return target
+ return os.path.join(paths[0], testfilename)
+
+
+"""
+Various default settings, including file and directory locations.
+"""
+
+def data_dir():
+ return "/var/lib/sepolgen"
+
+def perm_map():
+ return data_dir() + "/perm_map"
+
+def interface_info():
+ return data_dir() + "/interface_info"
+
+def attribute_info():
+ return data_dir() + "/attribute_info"
+
+def refpolicy_makefile():
+ chooser = PathChoooser("/etc/selinux/sepolgen.conf")
+ return chooser("Makefile")
+
+def headers():
+ chooser = PathChoooser("/etc/selinux/sepolgen.conf")
+ return chooser("include")
+
diff --git a/lib/python2.7/site-packages/sepolgen/interfaces.py b/lib/python2.7/site-packages/sepolgen/interfaces.py
new file mode 100644
index 0000000..88a6dc3
--- /dev/null
+++ b/lib/python2.7/site-packages/sepolgen/interfaces.py
@@ -0,0 +1,509 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+"""
+Classes for representing and manipulating interfaces.
+"""
+
+import access
+import refpolicy
+import itertools
+import objectmodel
+import matching
+
+from sepolgeni18n import _
+
+import copy
+
+class Param:
+ """
+ Object representing a paramater for an interface.
+ """
+ def __init__(self):
+ self.__name = ""
+ self.type = refpolicy.SRC_TYPE
+ self.obj_classes = refpolicy.IdSet()
+ self.required = True
+
+ def set_name(self, name):
+ if not access.is_idparam(name):
+ raise ValueError("Name [%s] is not a param" % name)
+ self.__name = name
+
+ def get_name(self):
+ return self.__name
+
+ name = property(get_name, set_name)
+
+ num = property(fget=lambda self: int(self.name[1:]))
+
+ def __repr__(self):
+ return "<sepolgen.policygen.Param instance [%s, %s, %s]>" % \
+ (self.name, refpolicy.field_to_str[self.type], " ".join(self.obj_classes))
+
+
+# Helper for extract perms
+def __param_insert(name, type, av, params):
+ ret = 0
+ if name in params:
+ p = params[name]
+ # The entries are identical - we're done
+ if type == p.type:
+ return
+ # Hanldle implicitly typed objects (like process)
+ if (type == refpolicy.SRC_TYPE or type == refpolicy.TGT_TYPE) and \
+ (p.type == refpolicy.TGT_TYPE or p.type == refpolicy.SRC_TYPE):
+ #print name, refpolicy.field_to_str[p.type]
+ # If the object is not implicitly typed, tell the
+ # caller there is a likely conflict.
+ ret = 1
+ if av:
+ avobjs = [av.obj_class]
+ else:
+ avobjs = []
+ for obj in itertools.chain(p.obj_classes, avobjs):
+ if obj in objectmodel.implicitly_typed_objects:
+ ret = 0
+ break
+ # "Promote" to a SRC_TYPE as this is the likely usage.
+ # We do this even if the above test fails on purpose
+ # as there is really no sane way to resolve the conflict
+ # here. The caller can take other actions if needed.
+ p.type = refpolicy.SRC_TYPE
+ else:
+ # There is some conflict - no way to resolve it really
+ # so we just leave the first entry and tell the caller
+ # there was a conflict.
+ ret = 1
+ else:
+ p = Param()
+ p.name = name
+ p.type = type
+ params[p.name] = p
+
+ if av:
+ p.obj_classes.add(av.obj_class)
+ return ret
+
+
+
+def av_extract_params(av, params):
+ """Extract the paramaters from an access vector.
+
+ Extract the paramaters (in the form $N) from an access
+ vector, storing them as Param objects in a dictionary.
+ Some attempt is made at resolving conflicts with other
+ entries in the dict, but if an unresolvable conflict is
+ found it is reported to the caller.
+
+ The goal here is to figure out how interface paramaters are
+ actually used in the interface - e.g., that $1 is a domain used as
+ a SRC_TYPE. In general an interface will look like this:
+
+ interface(`foo', `
+ allow $1 foo : file read;
+ ')
+
+ This is simple to figure out - $1 is a SRC_TYPE. A few interfaces
+ are more complex, for example:
+
+ interface(`foo_trans',`
+ domain_auto_trans($1,fingerd_exec_t,fingerd_t)
+
+ allow $1 fingerd_t:fd use;
+ allow fingerd_t $1:fd use;
+ allow fingerd_t $1:fifo_file rw_file_perms;
+ allow fingerd_t $1:process sigchld;
+ ')
+
+ Here the usage seems ambigious, but it is not. $1 is still domain
+ and therefore should be returned as a SRC_TYPE.
+
+ Returns:
+ 0 - success
+ 1 - conflict found
+ """
+ ret = 0
+ found_src = False
+ if access.is_idparam(av.src_type):
+ if __param_insert(av.src_type, refpolicy.SRC_TYPE, av, params) == 1:
+ ret = 1
+
+ if access.is_idparam(av.tgt_type):
+ if __param_insert(av.tgt_type, refpolicy.TGT_TYPE, av, params) == 1:
+ ret = 1
+
+ if access.is_idparam(av.obj_class):
+ if __param_insert(av.obj_class, refpolicy.OBJ_CLASS, av, params) == 1:
+ ret = 1
+
+ for perm in av.perms:
+ if access.is_idparam(perm):
+ if __param_insert(perm, PERM) == 1:
+ ret = 1
+
+ return ret
+
+def role_extract_params(role, params):
+ if access.is_idparam(role.role):
+ return __param_insert(role.role, refpolicy.ROLE, None, params)
+
+def type_rule_extract_params(rule, params):
+ def extract_from_set(set, type):
+ ret = 0
+ for x in set:
+ if access.is_idparam(x):
+ if __param_insert(x, type, None, params):
+ ret = 1
+ return ret
+
+ ret = 0
+ if extract_from_set(rule.src_types, refpolicy.SRC_TYPE):
+ ret = 1
+
+ if extract_from_set(rule.tgt_types, refpolicy.TGT_TYPE):
+ ret = 1
+
+ if extract_from_set(rule.obj_classes, refpolicy.OBJ_CLASS):
+ ret = 1
+
+ if access.is_idparam(rule.dest_type):
+ if __param_insert(rule.dest_type, refpolicy.DEST_TYPE, None, params):
+ ret = 1
+
+ return ret
+
+def ifcall_extract_params(ifcall, params):
+ ret = 0
+ for arg in ifcall.args:
+ if access.is_idparam(arg):
+ # Assume interface arguments are source types. Fairly safe
+ # assumption for most interfaces
+ if __param_insert(arg, refpolicy.SRC_TYPE, None, params):
+ ret = 1
+
+ return ret
+
+class AttributeVector:
+ def __init__(self):
+ self.name = ""
+ self.access = access.AccessVectorSet()
+
+ def add_av(self, av):
+ self.access.add_av(av)
+
+class AttributeSet:
+ def __init__(self):
+ self.attributes = { }
+
+ def add_attr(self, attr):
+ self.attributes[attr.name] = attr
+
+ def from_file(self, fd):
+ def parse_attr(line):
+ fields = line[1:-1].split()
+ if len(fields) != 2 or fields[0] != "Attribute":
+ raise SyntaxError("Syntax error Attribute statement %s" % line)
+ a = AttributeVector()
+ a.name = fields[1]
+
+ return a
+
+ a = None
+ for line in fd:
+ line = line[:-1]
+ if line[0] == "[":
+ if a:
+ self.add_attr(a)
+ a = parse_attr(line)
+ elif a:
+ l = line.split(",")
+ av = access.AccessVector(l)
+ a.add_av(av)
+ if a:
+ self.add_attr(a)
+
+class InterfaceVector:
+ def __init__(self, interface=None, attributes={}):
+ # Enabled is a loose concept currently - we are essentially
+ # not enabling interfaces that we can't handle currently.
+ # See InterfaceVector.add_ifv for more information.
+ self.enabled = True
+ self.name = ""
+ # The access that is enabled by this interface - eventually
+ # this will include indirect access from typeattribute
+ # statements.
+ self.access = access.AccessVectorSet()
+ # Paramaters are stored in a dictionary (key: param name
+ # value: Param object).
+ self.params = { }
+ if interface:
+ self.from_interface(interface, attributes)
+ self.expanded = False
+
+ def from_interface(self, interface, attributes={}):
+ self.name = interface.name
+
+ # Add allow rules
+ for avrule in interface.avrules():
+ if avrule.rule_type != refpolicy.AVRule.ALLOW:
+ continue
+ # Handle some policy bugs
+ if "dontaudit" in interface.name:
+ #print "allow rule in interface: %s" % interface
+ continue
+ avs = access.avrule_to_access_vectors(avrule)
+ for av in avs:
+ self.add_av(av)
+
+ # Add typeattribute access
+ if attributes:
+ for typeattribute in interface.typeattributes():
+ for attr in typeattribute.attributes:
+ if not attributes.attributes.has_key(attr):
+ # print "missing attribute " + attr
+ continue
+ attr_vec = attributes.attributes[attr]
+ for a in attr_vec.access:
+ av = copy.copy(a)
+ if av.src_type == attr_vec.name:
+ av.src_type = typeattribute.type
+ if av.tgt_type == attr_vec.name:
+ av.tgt_type = typeattribute.type
+ self.add_av(av)
+
+
+ # Extract paramaters from roles
+ for role in interface.roles():
+ if role_extract_params(role, self.params):
+ pass
+ #print "found conflicting role param %s for interface %s" % \
+ # (role.name, interface.name)
+ # Extract paramaters from type rules
+ for rule in interface.typerules():
+ if type_rule_extract_params(rule, self.params):
+ pass
+ #print "found conflicting params in rule %s in interface %s" % \
+ # (str(rule), interface.name)
+
+ for ifcall in interface.interface_calls():
+ if ifcall_extract_params(ifcall, self.params):
+ pass
+ #print "found conflicting params in ifcall %s in interface %s" % \
+ # (str(ifcall), interface.name)
+
+
+ def add_av(self, av):
+ if av_extract_params(av, self.params) == 1:
+ pass
+ #print "found conflicting perms [%s]" % str(av)
+ self.access.add_av(av)
+
+ def to_string(self):
+ s = []
+ s.append("[InterfaceVector %s]" % self.name)
+ for av in self.access:
+ s.append(str(av))
+ return "\n".join(s)
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ return "<InterfaceVector %s:%s>" % (self.name, self.enabled)
+
+
+class InterfaceSet:
+ def __init__(self, output=None):
+ self.interfaces = { }
+ self.tgt_type_map = { }
+ self.tgt_type_all = []
+ self.output = output
+
+ def o(self, str):
+ if self.output:
+ self.output.write(str + "\n")
+
+ def to_file(self, fd):
+ for iv in self.interfaces.values():
+ fd.write("[InterfaceVector %s " % iv.name)
+ for param in iv.params.values():
+ fd.write("%s:%s " % (param.name, refpolicy.field_to_str[param.type]))
+ fd.write("]\n")
+ avl = iv.access.to_list()
+ for av in avl:
+ fd.write(",".join(av))
+ fd.write("\n")
+
+ def from_file(self, fd):
+ def parse_ifv(line):
+ fields = line[1:-1].split()
+ if len(fields) < 2 or fields[0] != "InterfaceVector":
+ raise SyntaxError("Syntax error InterfaceVector statement %s" % line)
+ ifv = InterfaceVector()
+ ifv.name = fields[1]
+ if len(fields) == 2:
+ return
+ for field in fields[2:]:
+ p = field.split(":")
+ if len(p) != 2:
+ raise SyntaxError("Invalid param in InterfaceVector statement %s" % line)
+ param = Param()
+ param.name = p[0]
+ param.type = refpolicy.str_to_field[p[1]]
+ ifv.params[param.name] = param
+ return ifv
+
+ ifv = None
+ for line in fd:
+ line = line[:-1]
+ if line[0] == "[":
+ if ifv:
+ self.add_ifv(ifv)
+ ifv = parse_ifv(line)
+ elif ifv:
+ l = line.split(",")
+ av = access.AccessVector(l)
+ ifv.add_av(av)
+ if ifv:
+ self.add_ifv(ifv)
+
+ self.index()
+
+ def add_ifv(self, ifv):
+ self.interfaces[ifv.name] = ifv
+
+ def index(self):
+ for ifv in self.interfaces.values():
+ tgt_types = set()
+ for av in ifv.access:
+ if access.is_idparam(av.tgt_type):
+ self.tgt_type_all.append(ifv)
+ tgt_types = set()
+ break
+ tgt_types.add(av.tgt_type)
+
+ for type in tgt_types:
+ l = self.tgt_type_map.setdefault(type, [])
+ l.append(ifv)
+
+ def add(self, interface, attributes={}):
+ ifv = InterfaceVector(interface, attributes)
+ self.add_ifv(ifv)
+
+ def add_headers(self, headers, output=None, attributes={}):
+ for i in itertools.chain(headers.interfaces(), headers.templates()):
+ self.add(i, attributes)
+
+ self.expand_ifcalls(headers)
+ self.index()
+
+ def map_param(self, id, ifcall):
+ if access.is_idparam(id):
+ num = int(id[1:])
+ if num > len(ifcall.args):
+ # Tell caller to drop this because it must have
+ # been generated from an optional param.
+ return None
+ else:
+ arg = ifcall.args[num - 1]
+ if isinstance(arg, list):
+ return arg
+ else:
+ return [arg]
+ else:
+ return [id]
+
+ def map_add_av(self, ifv, av, ifcall):
+ src_types = self.map_param(av.src_type, ifcall)
+ if src_types is None:
+ return
+
+ tgt_types = self.map_param(av.tgt_type, ifcall)
+ if tgt_types is None:
+ return
+
+ obj_classes = self.map_param(av.obj_class, ifcall)
+ if obj_classes is None:
+ return
+
+ new_perms = refpolicy.IdSet()
+ for perm in av.perms:
+ p = self.map_param(perm, ifcall)
+ if p is None:
+ continue
+ else:
+ new_perms.update(p)
+ if len(new_perms) == 0:
+ return
+
+ for src_type in src_types:
+ for tgt_type in tgt_types:
+ for obj_class in obj_classes:
+ ifv.access.add(src_type, tgt_type, obj_class, new_perms)
+
+ def do_expand_ifcalls(self, interface, if_by_name):
+ # Descend an interface call tree adding the access
+ # from each interface. This is a depth first walk
+ # of the tree.
+
+ stack = [(interface, None)]
+ ifv = self.interfaces[interface.name]
+ ifv.expanded = True
+
+ while len(stack) > 0:
+ cur, cur_ifcall = stack.pop(-1)
+
+ cur_ifv = self.interfaces[cur.name]
+ if cur != interface:
+
+ for av in cur_ifv.access:
+ self.map_add_av(ifv, av, cur_ifcall)
+
+ # If we have already fully expanded this interface
+ # there is no reason to descend further.
+ if cur_ifv.expanded:
+ continue
+
+ for ifcall in cur.interface_calls():
+ if ifcall.ifname == interface.name:
+ self.o(_("Found circular interface class"))
+ return
+ try:
+ newif = if_by_name[ifcall.ifname]
+ except KeyError:
+ self.o(_("Missing interface definition for %s" % ifcall.ifname))
+ continue
+
+ stack.append((newif, ifcall))
+
+
+ def expand_ifcalls(self, headers):
+ # Create a map of interface names to interfaces -
+ # this mirrors the interface vector map we already
+ # have.
+ if_by_name = { }
+
+ for i in itertools.chain(headers.interfaces(), headers.templates()):
+ if_by_name[i.name] = i
+
+
+ for interface in itertools.chain(headers.interfaces(), headers.templates()):
+ self.do_expand_ifcalls(interface, if_by_name)
+
diff --git a/lib/python2.7/site-packages/sepolgen/lex.py b/lib/python2.7/site-packages/sepolgen/lex.py
new file mode 100644
index 0000000..c149366
--- /dev/null
+++ b/lib/python2.7/site-packages/sepolgen/lex.py
@@ -0,0 +1,866 @@
+#-----------------------------------------------------------------------------
+# ply: lex.py
+#
+# Author: David M. Beazley (dave@dabeaz.com)
+#
+# Copyright (C) 2001-2006, David M. Beazley
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# See the file COPYING for a complete copy of the LGPL.
+#-----------------------------------------------------------------------------
+
+__version__ = "2.2"
+
+import re, sys, types
+
+# Regular expression used to match valid token names
+_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
+
+# Available instance types. This is used when lexers are defined by a class.
+# It's a little funky because I want to preserve backwards compatibility
+# with Python 2.0 where types.ObjectType is undefined.
+
+try:
+ _INSTANCETYPE = (types.InstanceType, types.ObjectType)
+except AttributeError:
+ _INSTANCETYPE = types.InstanceType
+ class object: pass # Note: needed if no new-style classes present
+
+# Exception thrown when invalid token encountered and no default error
+# handler is defined.
+class LexError(Exception):
+ def __init__(self,message,s):
+ self.args = (message,)
+ self.text = s
+
+# Token class
+class LexToken(object):
+ def __str__(self):
+ return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
+ def __repr__(self):
+ return str(self)
+ def skip(self,n):
+ self.lexer.skip(n)
+
+# -----------------------------------------------------------------------------
+# Lexer class
+#
+# This class encapsulates all of the methods and data associated with a lexer.
+#
+# input() - Store a new string in the lexer
+# token() - Get the next token
+# -----------------------------------------------------------------------------
+
+class Lexer:
+ def __init__(self):
+ self.lexre = None # Master regular expression. This is a list of
+ # tuples (re,findex) where re is a compiled
+ # regular expression and findex is a list
+ # mapping regex group numbers to rules
+ self.lexretext = None # Current regular expression strings
+ self.lexstatere = {} # Dictionary mapping lexer states to master regexs
+ self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
+ self.lexstate = "INITIAL" # Current lexer state
+ self.lexstatestack = [] # Stack of lexer states
+ self.lexstateinfo = None # State information
+ self.lexstateignore = {} # Dictionary of ignored characters for each state
+ self.lexstateerrorf = {} # Dictionary of error functions for each state
+ self.lexreflags = 0 # Optional re compile flags
+ self.lexdata = None # Actual input data (as a string)
+ self.lexpos = 0 # Current position in input text
+ self.lexlen = 0 # Length of the input text
+ self.lexerrorf = None # Error rule (if any)
+ self.lextokens = None # List of valid tokens
+ self.lexignore = "" # Ignored characters
+ self.lexliterals = "" # Literal characters that can be passed through
+ self.lexmodule = None # Module
+ self.lineno = 1 # Current line number
+ self.lexdebug = 0 # Debugging mode
+ self.lexoptimize = 0 # Optimized mode
+
+ def clone(self,object=None):
+ c = Lexer()
+ c.lexstatere = self.lexstatere
+ c.lexstateinfo = self.lexstateinfo
+ c.lexstateretext = self.lexstateretext
+ c.lexstate = self.lexstate
+ c.lexstatestack = self.lexstatestack
+ c.lexstateignore = self.lexstateignore
+ c.lexstateerrorf = self.lexstateerrorf
+ c.lexreflags = self.lexreflags
+ c.lexdata = self.lexdata
+ c.lexpos = self.lexpos
+ c.lexlen = self.lexlen
+ c.lextokens = self.lextokens
+ c.lexdebug = self.lexdebug
+ c.lineno = self.lineno
+ c.lexoptimize = self.lexoptimize
+ c.lexliterals = self.lexliterals
+ c.lexmodule = self.lexmodule
+
+ # If the object parameter has been supplied, it means we are attaching the
+ # lexer to a new object. In this case, we have to rebind all methods in
+ # the lexstatere and lexstateerrorf tables.
+
+ if object:
+ newtab = { }
+ for key, ritem in self.lexstatere.items():
+ newre = []
+ for cre, findex in ritem:
+ newfindex = []
+ for f in findex:
+ if not f or not f[0]:
+ newfindex.append(f)
+ continue
+ newfindex.append((getattr(object,f[0].__name__),f[1]))
+ newre.append((cre,newfindex))
+ newtab[key] = newre
+ c.lexstatere = newtab
+ c.lexstateerrorf = { }
+ for key, ef in self.lexstateerrorf.items():
+ c.lexstateerrorf[key] = getattr(object,ef.__name__)
+ c.lexmodule = object
+
+ # Set up other attributes
+ c.begin(c.lexstate)
+ return c
+
+ # ------------------------------------------------------------
+ # writetab() - Write lexer information to a table file
+ # ------------------------------------------------------------
+ def writetab(self,tabfile):
+ tf = open(tabfile+".py","w")
+ tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
+ tf.write("_lextokens = %s\n" % repr(self.lextokens))
+ tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
+ tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
+ tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
+
+ tabre = { }
+ for key, lre in self.lexstatere.items():
+ titem = []
+ for i in range(len(lre)):
+ titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1])))
+ tabre[key] = titem
+
+ tf.write("_lexstatere = %s\n" % repr(tabre))
+ tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
+
+ taberr = { }
+ for key, ef in self.lexstateerrorf.items():
+ if ef:
+ taberr[key] = ef.__name__
+ else:
+ taberr[key] = None
+ tf.write("_lexstateerrorf = %s\n" % repr(taberr))
+ tf.close()
+
+ # ------------------------------------------------------------
+ # readtab() - Read lexer information from a tab file
+ # ------------------------------------------------------------
+ def readtab(self,tabfile,fdict):
+ exec "import %s as lextab" % tabfile
+ self.lextokens = lextab._lextokens
+ self.lexreflags = lextab._lexreflags
+ self.lexliterals = lextab._lexliterals
+ self.lexstateinfo = lextab._lexstateinfo
+ self.lexstateignore = lextab._lexstateignore
+ self.lexstatere = { }
+ self.lexstateretext = { }
+ for key,lre in lextab._lexstatere.items():
+ titem = []
+ txtitem = []
+ for i in range(len(lre)):
+ titem.append((re.compile(lre[i][0],lextab._lexreflags),_names_to_funcs(lre[i][1],fdict)))
+ txtitem.append(lre[i][0])
+ self.lexstatere[key] = titem
+ self.lexstateretext[key] = txtitem
+ self.lexstateerrorf = { }
+ for key,ef in lextab._lexstateerrorf.items():
+ self.lexstateerrorf[key] = fdict[ef]
+ self.begin('INITIAL')
+
+ # ------------------------------------------------------------
+ # input() - Push a new string into the lexer
+ # ------------------------------------------------------------
+ def input(self,s):
+ if not (isinstance(s,types.StringType) or isinstance(s,types.UnicodeType)):
+ raise ValueError, "Expected a string"
+ self.lexdata = s
+ self.lexpos = 0
+ self.lexlen = len(s)
+
+ # ------------------------------------------------------------
+ # begin() - Changes the lexing state
+ # ------------------------------------------------------------
+ def begin(self,state):
+ if not self.lexstatere.has_key(state):
+ raise ValueError, "Undefined state"
+ self.lexre = self.lexstatere[state]
+ self.lexretext = self.lexstateretext[state]
+ self.lexignore = self.lexstateignore.get(state,"")
+ self.lexerrorf = self.lexstateerrorf.get(state,None)
+ self.lexstate = state
+
+ # ------------------------------------------------------------
+ # push_state() - Changes the lexing state and saves old on stack
+ # ------------------------------------------------------------
+ def push_state(self,state):
+ self.lexstatestack.append(self.lexstate)
+ self.begin(state)
+
+ # ------------------------------------------------------------
+ # pop_state() - Restores the previous state
+ # ------------------------------------------------------------
+ def pop_state(self):
+ self.begin(self.lexstatestack.pop())
+
+ # ------------------------------------------------------------
+ # current_state() - Returns the current lexing state
+ # ------------------------------------------------------------
+ def current_state(self):
+ return self.lexstate
+
+ # ------------------------------------------------------------
+ # skip() - Skip ahead n characters
+ # ------------------------------------------------------------
+ def skip(self,n):
+ self.lexpos += n
+
+ # ------------------------------------------------------------
+ # token() - Return the next token from the Lexer
+ #
+ # Note: This function has been carefully implemented to be as fast
+ # as possible. Don't make changes unless you really know what
+ # you are doing
+ # ------------------------------------------------------------
+ def token(self):
+ # Make local copies of frequently referenced attributes
+ lexpos = self.lexpos
+ lexlen = self.lexlen
+ lexignore = self.lexignore
+ lexdata = self.lexdata
+
+ while lexpos < lexlen:
+ # This code provides some short-circuit code for whitespace, tabs, and other ignored characters
+ if lexdata[lexpos] in lexignore:
+ lexpos += 1
+ continue
+
+ # Look for a regular expression match
+ for lexre,lexindexfunc in self.lexre:
+ m = lexre.match(lexdata,lexpos)
+ if not m: continue
+
+ # Set last match in lexer so that rules can access it if they want
+ self.lexmatch = m
+
+ # Create a token for return
+ tok = LexToken()
+ tok.value = m.group()
+ tok.lineno = self.lineno
+ tok.lexpos = lexpos
+ tok.lexer = self
+
+ lexpos = m.end()
+ i = m.lastindex
+ func,tok.type = lexindexfunc[i]
+ self.lexpos = lexpos
+
+ if not func:
+ # If no token type was set, it's an ignored token
+ if tok.type: return tok
+ break
+
+ # if func not callable, it means it's an ignored token
+ if not callable(func):
+ break
+
+ # If token is processed by a function, call it
+ newtok = func(tok)
+
+ # Every function must return a token, if nothing, we just move to next token
+ if not newtok:
+ lexpos = self.lexpos # This is here in case user has updated lexpos.
+ break
+
+ # Verify type of the token. If not in the token map, raise an error
+ if not self.lexoptimize:
+ if not self.lextokens.has_key(newtok.type):
+ raise LexError, ("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
+ func.func_code.co_filename, func.func_code.co_firstlineno,
+ func.__name__, newtok.type),lexdata[lexpos:])
+
+ return newtok
+ else:
+ # No match, see if in literals
+ if lexdata[lexpos] in self.lexliterals:
+ tok = LexToken()
+ tok.value = lexdata[lexpos]
+ tok.lineno = self.lineno
+ tok.lexer = self
+ tok.type = tok.value
+ tok.lexpos = lexpos
+ self.lexpos = lexpos + 1
+ return tok
+
+ # No match. Call t_error() if defined.
+ if self.lexerrorf:
+ tok = LexToken()
+ tok.value = self.lexdata[lexpos:]
+ tok.lineno = self.lineno
+ tok.type = "error"
+ tok.lexer = self
+ tok.lexpos = lexpos
+ self.lexpos = lexpos
+ newtok = self.lexerrorf(tok)
+ if lexpos == self.lexpos:
+ # Error method didn't change text position at all. This is an error.
+ raise LexError, ("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
+ lexpos = self.lexpos
+ if not newtok: continue
+ return newtok
+
+ self.lexpos = lexpos
+ raise LexError, ("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
+
+ self.lexpos = lexpos + 1
+ if self.lexdata is None:
+ raise RuntimeError, "No input string given with input()"
+ return None
+
+# -----------------------------------------------------------------------------
+# _validate_file()
+#
+# This checks to see if there are duplicated t_rulename() functions or strings
+# in the parser input file. This is done using a simple regular expression
+# match on each line in the filename.
+# -----------------------------------------------------------------------------
+
+def _validate_file(filename):
+ import os.path
+ base,ext = os.path.splitext(filename)
+ if ext != '.py': return 1 # No idea what the file is. Return OK
+
+ try:
+ f = open(filename)
+ lines = f.readlines()
+ f.close()
+ except IOError:
+ return 1 # Oh well
+
+ fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
+ sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
+ counthash = { }
+ linen = 1
+ noerror = 1
+ for l in lines:
+ m = fre.match(l)
+ if not m:
+ m = sre.match(l)
+ if m:
+ name = m.group(1)
+ prev = counthash.get(name)
+ if not prev:
+ counthash[name] = linen
+ else:
+ print "%s:%d: Rule %s redefined. Previously defined on line %d" % (filename,linen,name,prev)
+ noerror = 0
+ linen += 1
+ return noerror
+
+# -----------------------------------------------------------------------------
+# _funcs_to_names()
+#
+# Given a list of regular expression functions, this converts it to a list
+# suitable for output to a table file
+# -----------------------------------------------------------------------------
+
+def _funcs_to_names(funclist):
+ result = []
+ for f in funclist:
+ if f and f[0]:
+ result.append((f[0].__name__,f[1]))
+ else:
+ result.append(f)
+ return result
+
+# -----------------------------------------------------------------------------
+# _names_to_funcs()
+#
+# Given a list of regular expression function names, this converts it back to
+# functions.
+# -----------------------------------------------------------------------------
+
+def _names_to_funcs(namelist,fdict):
+ result = []
+ for n in namelist:
+ if n and n[0]:
+ result.append((fdict[n[0]],n[1]))
+ else:
+ result.append(n)
+ return result
+
+# -----------------------------------------------------------------------------
+# _form_master_re()
+#
+# This function takes a list of all of the regex components and attempts to
+# form the master regular expression. Given limitations in the Python re
+# module, it may be necessary to break the master regex into separate expressions.
+# -----------------------------------------------------------------------------
+
+def _form_master_re(relist,reflags,ldict):
+ if not relist: return []
+ regex = "|".join(relist)
+ try:
+ lexre = re.compile(regex,re.VERBOSE | reflags)
+
+ # Build the index to function map for the matching engine
+ lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
+ for f,i in lexre.groupindex.items():
+ handle = ldict.get(f,None)
+ if type(handle) in (types.FunctionType, types.MethodType):
+ lexindexfunc[i] = (handle,handle.__name__[2:])
+ elif handle is not None:
+ # If rule was specified as a string, we build an anonymous
+ # callback function to carry out the action
+ if f.find("ignore_") > 0:
+ lexindexfunc[i] = (None,None)
+ print "IGNORE", f
+ else:
+ lexindexfunc[i] = (None, f[2:])
+
+ return [(lexre,lexindexfunc)],[regex]
+ except Exception,e:
+ m = int(len(relist)/2)
+ if m == 0: m = 1
+ llist, lre = _form_master_re(relist[:m],reflags,ldict)
+ rlist, rre = _form_master_re(relist[m:],reflags,ldict)
+ return llist+rlist, lre+rre
+
+# -----------------------------------------------------------------------------
+# def _statetoken(s,names)
+#
+# Given a declaration name s of the form "t_" and a dictionary whose keys are
+# state names, this function returns a tuple (states,tokenname) where states
+# is a tuple of state names and tokenname is the name of the token. For example,
+# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
+# -----------------------------------------------------------------------------
+
+def _statetoken(s,names):
+ nonstate = 1
+ parts = s.split("_")
+ for i in range(1,len(parts)):
+ if not names.has_key(parts[i]) and parts[i] != 'ANY': break
+ if i > 1:
+ states = tuple(parts[1:i])
+ else:
+ states = ('INITIAL',)
+
+ if 'ANY' in states:
+ states = tuple(names.keys())
+
+ tokenname = "_".join(parts[i:])
+ return (states,tokenname)
+
+# -----------------------------------------------------------------------------
+# lex(module)
+#
+# Build all of the regular expression rules from definitions in the supplied module
+# -----------------------------------------------------------------------------
+def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0):
+ global lexer
+ ldict = None
+ stateinfo = { 'INITIAL' : 'inclusive'}
+ error = 0
+ files = { }
+ lexobj = Lexer()
+ lexobj.lexdebug = debug
+ lexobj.lexoptimize = optimize
+ global token,input
+
+ if nowarn: warn = 0
+ else: warn = 1
+
+ if object: module = object
+
+ if module:
+ # User supplied a module object.
+ if isinstance(module, types.ModuleType):
+ ldict = module.__dict__
+ elif isinstance(module, _INSTANCETYPE):
+ _items = [(k,getattr(module,k)) for k in dir(module)]
+ ldict = { }
+ for (i,v) in _items:
+ ldict[i] = v
+ else:
+ raise ValueError,"Expected a module or instance"
+ lexobj.lexmodule = module
+
+ else:
+ # No module given. We might be able to get information from the caller.
+ try:
+ raise RuntimeError
+ except RuntimeError:
+ e,b,t = sys.exc_info()
+ f = t.tb_frame
+ f = f.f_back # Walk out to our calling function
+ ldict = f.f_globals # Grab its globals dictionary
+
+ if optimize and lextab:
+ try:
+ lexobj.readtab(lextab,ldict)
+ token = lexobj.token
+ input = lexobj.input
+ lexer = lexobj
+ return lexobj
+
+ except ImportError:
+ pass
+
+ # Get the tokens, states, and literals variables (if any)
+ if (module and isinstance(module,_INSTANCETYPE)):
+ tokens = getattr(module,"tokens",None)
+ states = getattr(module,"states",None)
+ literals = getattr(module,"literals","")
+ else:
+ tokens = ldict.get("tokens",None)
+ states = ldict.get("states",None)
+ literals = ldict.get("literals","")
+
+ if not tokens:
+ raise SyntaxError,"lex: module does not define 'tokens'"
+ if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
+ raise SyntaxError,"lex: tokens must be a list or tuple."
+
+ # Build a dictionary of valid token names
+ lexobj.lextokens = { }
+ if not optimize:
+ for n in tokens:
+ if not _is_identifier.match(n):
+ print "lex: Bad token name '%s'" % n
+ error = 1
+ if warn and lexobj.lextokens.has_key(n):
+ print "lex: Warning. Token '%s' multiply defined." % n
+ lexobj.lextokens[n] = None
+ else:
+ for n in tokens: lexobj.lextokens[n] = None
+
+ if debug:
+ print "lex: tokens = '%s'" % lexobj.lextokens.keys()
+
+ try:
+ for c in literals:
+ if not (isinstance(c,types.StringType) or isinstance(c,types.UnicodeType)) or len(c) > 1:
+ print "lex: Invalid literal %s. Must be a single character" % repr(c)
+ error = 1
+ continue
+
+ except TypeError:
+ print "lex: Invalid literals specification. literals must be a sequence of characters."
+ error = 1
+
+ lexobj.lexliterals = literals
+
+ # Build statemap
+ if states:
+ if not (isinstance(states,types.TupleType) or isinstance(states,types.ListType)):
+ print "lex: states must be defined as a tuple or list."
+ error = 1
+ else:
+ for s in states:
+ if not isinstance(s,types.TupleType) or len(s) != 2:
+ print "lex: invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')" % repr(s)
+ error = 1
+ continue
+ name, statetype = s
+ if not isinstance(name,types.StringType):
+ print "lex: state name %s must be a string" % repr(name)
+ error = 1
+ continue
+ if not (statetype == 'inclusive' or statetype == 'exclusive'):
+ print "lex: state type for state %s must be 'inclusive' or 'exclusive'" % name
+ error = 1
+ continue
+ if stateinfo.has_key(name):
+ print "lex: state '%s' already defined." % name
+ error = 1
+ continue
+ stateinfo[name] = statetype
+
+ # Get a list of symbols with the t_ or s_ prefix
+ tsymbols = [f for f in ldict.keys() if f[:2] == 't_' ]
+
+ # Now build up a list of functions and a list of strings
+
+ funcsym = { } # Symbols defined as functions
+ strsym = { } # Symbols defined as strings
+ toknames = { } # Mapping of symbols to token names
+
+ for s in stateinfo.keys():
+ funcsym[s] = []
+ strsym[s] = []
+
+ ignore = { } # Ignore strings by state
+ errorf = { } # Error functions by state
+
+ if len(tsymbols) == 0:
+ raise SyntaxError,"lex: no rules of the form t_rulename are defined."
+
+ for f in tsymbols:
+ t = ldict[f]
+ states, tokname = _statetoken(f,stateinfo)
+ toknames[f] = tokname
+
+ if callable(t):
+ for s in states: funcsym[s].append((f,t))
+ elif (isinstance(t, types.StringType) or isinstance(t,types.UnicodeType)):
+ for s in states: strsym[s].append((f,t))
+ else:
+ print "lex: %s not defined as a function or string" % f
+ error = 1
+
+ # Sort the functions by line number
+ for f in funcsym.values():
+ f.sort(lambda x,y: cmp(x[1].func_code.co_firstlineno,y[1].func_code.co_firstlineno))
+
+ # Sort the strings by regular expression length
+ for s in strsym.values():
+ s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
+
+ regexs = { }
+
+ # Build the master regular expressions
+ for state in stateinfo.keys():
+ regex_list = []
+
+ # Add rules defined by functions first
+ for fname, f in funcsym[state]:
+ line = f.func_code.co_firstlineno
+ file = f.func_code.co_filename
+ files[file] = None
+ tokname = toknames[fname]
+
+ ismethod = isinstance(f, types.MethodType)
+
+ if not optimize:
+ nargs = f.func_code.co_argcount
+ if ismethod:
+ reqargs = 2
+ else:
+ reqargs = 1
+ if nargs > reqargs:
+ print "%s:%d: Rule '%s' has too many arguments." % (file,line,f.__name__)
+ error = 1
+ continue
+
+ if nargs < reqargs:
+ print "%s:%d: Rule '%s' requires an argument." % (file,line,f.__name__)
+ error = 1
+ continue
+
+ if tokname == 'ignore':
+ print "%s:%d: Rule '%s' must be defined as a string." % (file,line,f.__name__)
+ error = 1
+ continue
+
+ if tokname == 'error':
+ errorf[state] = f
+ continue
+
+ if f.__doc__:
+ if not optimize:
+ try:
+ c = re.compile("(?P<%s>%s)" % (f.__name__,f.__doc__), re.VERBOSE | reflags)
+ if c.match(""):
+ print "%s:%d: Regular expression for rule '%s' matches empty string." % (file,line,f.__name__)
+ error = 1
+ continue
+ except re.error,e:
+ print "%s:%d: Invalid regular expression for rule '%s'. %s" % (file,line,f.__name__,e)
+ if '#' in f.__doc__:
+ print "%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'." % (file,line, f.__name__)
+ error = 1
+ continue
+
+ if debug:
+ print "lex: Adding rule %s -> '%s' (state '%s')" % (f.__name__,f.__doc__, state)
+
+ # Okay. The regular expression seemed okay. Let's append it to the master regular
+ # expression we're building
+
+ regex_list.append("(?P<%s>%s)" % (f.__name__,f.__doc__))
+ else:
+ print "%s:%d: No regular expression defined for rule '%s'" % (file,line,f.__name__)
+
+ # Now add all of the simple rules
+ for name,r in strsym[state]:
+ tokname = toknames[name]
+
+ if tokname == 'ignore':
+ ignore[state] = r
+ continue
+
+ if not optimize:
+ if tokname == 'error':
+ raise SyntaxError,"lex: Rule '%s' must be defined as a function" % name
+ error = 1
+ continue
+
+ if not lexobj.lextokens.has_key(tokname) and tokname.find("ignore_") < 0:
+ print "lex: Rule '%s' defined for an unspecified token %s." % (name,tokname)
+ error = 1
+ continue
+ try:
+ c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | reflags)
+ if (c.match("")):
+ print "lex: Regular expression for rule '%s' matches empty string." % name
+ error = 1
+ continue
+ except re.error,e:
+ print "lex: Invalid regular expression for rule '%s'. %s" % (name,e)
+ if '#' in r:
+ print "lex: Make sure '#' in rule '%s' is escaped with '\\#'." % name
+
+ error = 1
+ continue
+ if debug:
+ print "lex: Adding rule %s -> '%s' (state '%s')" % (name,r,state)
+
+ regex_list.append("(?P<%s>%s)" % (name,r))
+
+ if not regex_list:
+ print "lex: No rules defined for state '%s'" % state
+ error = 1
+
+ regexs[state] = regex_list
+
+
+ if not optimize:
+ for f in files.keys():
+ if not _validate_file(f):
+ error = 1
+
+ if error:
+ raise SyntaxError,"lex: Unable to build lexer."
+
+ # From this point forward, we're reasonably confident that we can build the lexer.
+ # No more errors will be generated, but there might be some warning messages.
+
+ # Build the master regular expressions
+
+ for state in regexs.keys():
+ lexre, re_text = _form_master_re(regexs[state],reflags,ldict)
+ lexobj.lexstatere[state] = lexre
+ lexobj.lexstateretext[state] = re_text
+ if debug:
+ for i in range(len(re_text)):
+ print "lex: state '%s'. regex[%d] = '%s'" % (state, i, re_text[i])
+
+ # For inclusive states, we need to add the INITIAL state
+ for state,type in stateinfo.items():
+ if state != "INITIAL" and type == 'inclusive':
+ lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
+ lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
+
+ lexobj.lexstateinfo = stateinfo
+ lexobj.lexre = lexobj.lexstatere["INITIAL"]
+ lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
+
+ # Set up ignore variables
+ lexobj.lexstateignore = ignore
+ lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
+
+ # Set up error functions
+ lexobj.lexstateerrorf = errorf
+ lexobj.lexerrorf = errorf.get("INITIAL",None)
+ if warn and not lexobj.lexerrorf:
+ print "lex: Warning. no t_error rule is defined."
+
+ # Check state information for ignore and error rules
+ for s,stype in stateinfo.items():
+ if stype == 'exclusive':
+ if warn and not errorf.has_key(s):
+ print "lex: Warning. no error rule is defined for exclusive state '%s'" % s
+ if warn and not ignore.has_key(s) and lexobj.lexignore:
+ print "lex: Warning. no ignore rule is defined for exclusive state '%s'" % s
+ elif stype == 'inclusive':
+ if not errorf.has_key(s):
+ errorf[s] = errorf.get("INITIAL",None)
+ if not ignore.has_key(s):
+ ignore[s] = ignore.get("INITIAL","")
+
+
+ # Create global versions of the token() and input() functions
+ token = lexobj.token
+ input = lexobj.input
+ lexer = lexobj
+
+ # If in optimize mode, we write the lextab
+ if lextab and optimize:
+ lexobj.writetab(lextab)
+
+ return lexobj
+
+# -----------------------------------------------------------------------------
+# runmain()
+#
+# This runs the lexer as a main program
+# -----------------------------------------------------------------------------
+
+def runmain(lexer=None,data=None):
+ if not data:
+ try:
+ filename = sys.argv[1]
+ f = open(filename)
+ data = f.read()
+ f.close()
+ except IndexError:
+ print "Reading from standard input (type EOF to end):"
+ data = sys.stdin.read()
+
+ if lexer:
+ _input = lexer.input
+ else:
+ _input = input
+ _input(data)
+ if lexer:
+ _token = lexer.token
+ else:
+ _token = token
+
+ while 1:
+ tok = _token()
+ if not tok: break
+ print "(%s,%r,%d,%d)" % (tok.type, tok.value, tok.lineno,tok.lexpos)
+
+
+# -----------------------------------------------------------------------------
+# @TOKEN(regex)
+#
+# This decorator function can be used to set the regex expression on a function
+# when its docstring might need to be set in an alternative way
+# -----------------------------------------------------------------------------
+
+def TOKEN(r):
+ def set_doc(f):
+ f.__doc__ = r
+ return f
+ return set_doc
+
+# Alternative spelling of the TOKEN decorator
+Token = TOKEN
+
diff --git a/lib/python2.7/site-packages/sepolgen/matching.py b/lib/python2.7/site-packages/sepolgen/matching.py
new file mode 100644
index 0000000..d56dd92
--- /dev/null
+++ b/lib/python2.7/site-packages/sepolgen/matching.py
@@ -0,0 +1,255 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+"""
+Classes and algorithms for matching requested access to access vectors.
+"""
+
+import access
+import objectmodel
+import itertools
+
+class Match:
+ def __init__(self, interface=None, dist=0):
+ self.interface = interface
+ self.dist = dist
+ self.info_dir_change = False
+
+ def __cmp__(self, other):
+ if self.dist == other.dist:
+ if self.info_dir_change:
+ if other.info_dir_change:
+ return 0
+ else:
+ return 1
+ else:
+ if other.info_dir_change:
+ return -1
+ else:
+ return 0
+ else:
+ if self.dist < other.dist:
+ return -1
+ else:
+ return 1
+
+class MatchList:
+ DEFAULT_THRESHOLD = 150
+ def __init__(self):
+ # Match objects that pass the threshold
+ self.children = []
+ # Match objects over the threshold
+ self.bastards = []
+ self.threshold = self.DEFAULT_THRESHOLD
+ self.allow_info_dir_change = False
+ self.av = None
+
+ def best(self):
+ if len(self.children):
+ return self.children[0]
+ if len(self.bastards):
+ return self.bastards[0]
+ return None
+
+ def __len__(self):
+ # Only return the length of the matches so
+ # that this can be used to test if there is
+ # a match.
+ return len(self.children) + len(self.bastards)
+
+ def __iter__(self):
+ return iter(self.children)
+
+ def all(self):
+ return itertools.chain(self.children, self.bastards)
+
+ def append(self, match):
+ if match.dist <= self.threshold:
+ if not match.info_dir_change or self.allow_info_dir_change:
+ self.children.append(match)
+ else:
+ self.bastards.append(match)
+ else:
+ self.bastards.append(match)
+
+ def sort(self):
+ self.children.sort()
+ self.bastards.sort()
+
+
+class AccessMatcher:
+ def __init__(self, perm_maps=None):
+ self.type_penalty = 10
+ self.obj_penalty = 10
+ if perm_maps:
+ self.perm_maps = perm_maps
+ else:
+ self.perm_maps = objectmodel.PermMappings()
+ # We want a change in the information flow direction
+ # to be a strong penalty - stronger than access to
+ # a few unrelated types.
+ self.info_dir_penalty = 100
+
+ def type_distance(self, a, b):
+ if a == b or access.is_idparam(b):
+ return 0
+ else:
+ return -self.type_penalty
+
+
+ def perm_distance(self, av_req, av_prov):
+ # First check that we have enough perms
+ diff = av_req.perms.difference(av_prov.perms)
+
+ if len(diff) != 0:
+ total = self.perm_maps.getdefault_distance(av_req.obj_class, diff)
+ return -total
+ else:
+ diff = av_prov.perms.difference(av_req.perms)
+ return self.perm_maps.getdefault_distance(av_req.obj_class, diff)
+
+ def av_distance(self, req, prov):
+ """Determine the 'distance' between 2 access vectors.
+
+ This function is used to find an access vector that matches
+ a 'required' access. To do this we comput a signed numeric
+ value that indicates how close the req access is to the
+ 'provided' access vector. The closer the value is to 0
+ the closer the match, with 0 being an exact match.
+
+ A value over 0 indicates that the prov access vector provides more
+ access than the req (in practice, this means that the source type,
+ target type, and object class is the same and the perms in prov is
+ a superset of those in req.
+
+ A value under 0 indicates that the prov access less - or unrelated
+ - access to the req access. A different type or object class will
+ result in a very low value.
+
+ The values other than 0 should only be interpreted relative to
+ one another - they have no exact meaning and are likely to
+ change.
+
+ Params:
+ req - [AccessVector] The access that is required. This is the
+ access being matched.
+ prov - [AccessVector] The access provided. This is the potential
+ match that is being evaluated for req.
+ Returns:
+ 0 : Exact match between the acess vectors.
+
+ < 0 : The prov av does not provide all of the access in req.
+ A smaller value indicates that the access is further.
+
+ > 0 : The prov av provides more access than req. The larger
+ the value the more access over req.
+ """
+ # FUTURE - this is _very_ expensive and probably needs some
+ # thorough performance work. This version is meant to give
+ # meaningful results relatively simply.
+ dist = 0
+
+ # Get the difference between the types. The addition is safe
+ # here because type_distance only returns 0 or negative.
+ dist += self.type_distance(req.src_type, prov.src_type)
+ dist += self.type_distance(req.tgt_type, prov.tgt_type)
+
+ # Object class distance
+ if req.obj_class != prov.obj_class and not access.is_idparam(prov.obj_class):
+ dist -= self.obj_penalty
+
+ # Permission distance
+
+ # If this av doesn't have a matching source type, target type, and object class
+ # count all of the permissions against it. Otherwise determine the perm
+ # distance and dir.
+ if dist < 0:
+ pdist = self.perm_maps.getdefault_distance(prov.obj_class, prov.perms)
+ else:
+ pdist = self.perm_distance(req, prov)
+
+ # Combine the perm and other distance
+ if dist < 0:
+ if pdist < 0:
+ return dist + pdist
+ else:
+ return dist - pdist
+ elif dist >= 0:
+ if pdist < 0:
+ return pdist - dist
+ else:
+ return dist + pdist
+
+ def av_set_match(self, av_set, av):
+ """
+
+ """
+ dist = None
+
+ # Get the distance for each access vector
+ for x in av_set:
+ tmp = self.av_distance(av, x)
+ if dist is None:
+ dist = tmp
+ elif tmp >= 0:
+ if dist >= 0:
+ dist += tmp
+ else:
+ dist = tmp + -dist
+ else:
+ if dist < 0:
+ dist += tmp
+ else:
+ dist -= tmp
+
+ # Penalize for information flow - we want to prevent the
+ # addition of a write if the requested is read none. We are
+ # much less concerned about the reverse.
+ av_dir = self.perm_maps.getdefault_direction(av.obj_class, av.perms)
+
+ if av_set.info_dir is None:
+ av_set.info_dir = objectmodel.FLOW_NONE
+ for x in av_set:
+ av_set.info_dir = av_set.info_dir | \
+ self.perm_maps.getdefault_direction(x.obj_class, x.perms)
+ if (av_dir & objectmodel.FLOW_WRITE == 0) and (av_set.info_dir & objectmodel.FLOW_WRITE):
+ if dist < 0:
+ dist -= self.info_dir_penalty
+ else:
+ dist += self.info_dir_penalty
+
+ return dist
+
+ def search_ifs(self, ifset, av, match_list):
+ match_list.av = av
+ for iv in itertools.chain(ifset.tgt_type_all,
+ ifset.tgt_type_map.get(av.tgt_type, [])):
+ if not iv.enabled:
+ #print "iv %s not enabled" % iv.name
+ continue
+
+ dist = self.av_set_match(iv.access, av)
+ if dist >= 0:
+ m = Match(iv, dist)
+ match_list.append(m)
+
+
+ match_list.sort()
+
+
diff --git a/lib/python2.7/site-packages/sepolgen/module.py b/lib/python2.7/site-packages/sepolgen/module.py
new file mode 100644
index 0000000..7fc9443
--- /dev/null
+++ b/lib/python2.7/site-packages/sepolgen/module.py
@@ -0,0 +1,213 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+"""
+Utilities for dealing with the compilation of modules and creation
+of module tress.
+"""
+
+import defaults
+
+import selinux
+
+import re
+import tempfile
+import commands
+import os
+import os.path
+import subprocess
+import shutil
+
+def is_valid_name(modname):
+ """Check that a module name is valid.
+ """
+ m = re.findall("[^a-zA-Z0-9_\-\.]", modname)
+ if len(m) == 0 and modname[0].isalpha():
+ return True
+ else:
+ return False
+
+class ModuleTree:
+ def __init__(self, modname):
+ self.modname = modname
+ self.dirname = None
+
+ def dir_name(self):
+ return self.dirname
+
+ def te_name(self):
+ return self.dirname + "/" + self.modname + ".te"
+
+ def fc_name(self):
+ return self.dirname + "/" + self.modname + ".fc"
+
+ def if_name(self):
+ return self.dirname + "/" + self.modname + ".if"
+
+ def package_name(self):
+ return self.dirname + "/" + self.modname + ".pp"
+
+ def makefile_name(self):
+ return self.dirname + "/Makefile"
+
+ def create(self, parent_dirname, makefile_include=None):
+ self.dirname = parent_dirname + "/" + self.modname
+ os.mkdir(self.dirname)
+ fd = open(self.makefile_name(), "w")
+ if makefile_include:
+ fd.write("include " + makefile_include)
+ else:
+ fd.write("include " + defaults.refpolicy_makefile())
+ fd.close()
+
+ # Create empty files for the standard refpolicy
+ # module files
+ open(self.te_name(), "w").close()
+ open(self.fc_name(), "w").close()
+ open(self.if_name(), "w").close()
+
+def modname_from_sourcename(sourcename):
+ return os.path.splitext(os.path.split(sourcename)[1])[0]
+
+class ModuleCompiler:
+ """ModuleCompiler eases running of the module compiler.
+
+ The ModuleCompiler class encapsulates running the commandline
+ module compiler (checkmodule) and module packager (semodule_package).
+ You are likely interested in the create_module_package method.
+
+ Several options are controlled via paramaters (only effects the
+ non-refpol builds):
+
+ .mls [boolean] Generate an MLS module (by passed -M to
+ checkmodule). True to generate an MLS module, false
+ otherwise.
+
+ .module [boolean] Generate a module instead of a base module.
+ True to generate a module, false to generate a base.
+
+ .checkmodule [string] Fully qualified path to the module compiler.
+ Default is /usr/bin/checkmodule.
+
+ .semodule_package [string] Fully qualified path to the module
+ packager. Defaults to /usr/bin/semodule_package.
+ .output [file object] File object used to write verbose
+ output of the compililation and packaging process.
+ """
+ def __init__(self, output=None):
+ """Create a ModuleCompiler instance, optionally with an
+ output file object for verbose output of the compilation process.
+ """
+ self.mls = selinux.is_selinux_mls_enabled()
+ self.module = True
+ self.checkmodule = "/usr/bin/checkmodule"
+ self.semodule_package = "/usr/bin/semodule_package"
+ self.output = output
+ self.last_output = ""
+ self.refpol_makefile = defaults.refpolicy_makefile()
+ self.make = "/usr/bin/make"
+
+ def o(self, str):
+ if self.output:
+ self.output.write(str + "\n")
+ self.last_output = str
+
+ def run(self, command):
+ self.o(command)
+ rc, output = commands.getstatusoutput(command)
+ self.o(output)
+
+ return rc
+
+ def gen_filenames(self, sourcename):
+ """Generate the module and policy package filenames from
+ a source file name. The source file must be in the form
+ of "foo.te". This will generate "foo.mod" and "foo.pp".
+
+ Returns a tuple with (modname, policypackage).
+ """
+ splitname = sourcename.split(".")
+ if len(splitname) < 2:
+ raise RuntimeError("invalid sourcefile name %s (must end in .te)", sourcename)
+ # Handle other periods in the filename correctly
+ basename = ".".join(splitname[0:-1])
+ modname = basename + ".mod"
+ packagename = basename + ".pp"
+
+ return (modname, packagename)
+
+ def create_module_package(self, sourcename, refpolicy=True):
+ """Create a module package saved in a packagename from a
+ sourcename.
+
+ The create_module_package creates a module package saved in a
+ file named sourcename (.pp is the standard extension) from a
+ source file (.te is the standard extension). The source file
+ should contain SELinux policy statements appropriate for a
+ base or non-base module (depending on the setting of .module).
+
+ Only file names are accepted, not open file objects or
+ descriptors because the command line SELinux tools are used.
+
+ On error a RuntimeError will be raised with a descriptive
+ error message.
+ """
+ if refpolicy:
+ self.refpol_build(sourcename)
+ else:
+ modname, packagename = self.gen_filenames(sourcename)
+ self.compile(sourcename, modname)
+ self.package(modname, packagename)
+ os.unlink(modname)
+
+ def refpol_build(self, sourcename):
+ # Compile
+ command = self.make + " -f " + self.refpol_makefile
+ rc = self.run(command)
+
+ # Raise an error if the process failed
+ if rc != 0:
+ raise RuntimeError("compilation failed:\n%s" % self.last_output)
+
+ def compile(self, sourcename, modname):
+ s = [self.checkmodule]
+ if self.mls:
+ s.append("-M")
+ if self.module:
+ s.append("-m")
+ s.append("-o")
+ s.append(modname)
+ s.append(sourcename)
+
+ rc = self.run(" ".join(s))
+ if rc != 0:
+ raise RuntimeError("compilation failed:\n%s" % self.last_output)
+
+ def package(self, modname, packagename):
+ s = [self.semodule_package]
+ s.append("-o")
+ s.append(packagename)
+ s.append("-m")
+ s.append(modname)
+
+ rc = self.run(" ".join(s))
+ if rc != 0:
+ raise RuntimeError("packaging failed [%s]" % self.last_output)
+
+
diff --git a/lib/python2.7/site-packages/sepolgen/objectmodel.py b/lib/python2.7/site-packages/sepolgen/objectmodel.py
new file mode 100644
index 0000000..88c8a1f
--- /dev/null
+++ b/lib/python2.7/site-packages/sepolgen/objectmodel.py
@@ -0,0 +1,172 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+"""
+This module provides knowledge object classes and permissions. It should
+be used to keep this knowledge from leaking into the more generic parts of
+the policy generation.
+"""
+
+# Objects that can be implicitly typed - these objects do
+# not _have_ to be implicitly typed (e.g., sockets can be
+# explicitly labeled), but they often are.
+#
+# File is in this list for /proc/self
+#
+# This list is useful when dealing with rules that have a
+# type (or param) used as both a subject and object. For
+# example:
+#
+# allow httpd_t httpd_t : socket read;
+#
+# This rule makes sense because the socket was (presumably) created
+# by a process with the type httpd_t.
+implicitly_typed_objects = ["socket", "fd", "process", "file", "lnk_file", "fifo_file",
+ "dbus", "capability", "unix_stream_socket"]
+
+#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+#
+#Information Flow
+#
+# All of the permissions in SELinux can be described in terms of
+# information flow. For example, a read of a file is a flow of
+# information from that file to the process reading. Viewing
+# permissions in these terms can be used to model a varity of
+# security properties.
+#
+# Here we have some infrastructure for understanding permissions
+# in terms of information flow
+#
+#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+
+# Information flow deals with information either flowing from a subject
+# to and object ("write") or to a subject from an object ("read"). Read
+# or write is described from the subject point-of-view. It is also possible
+# for a permission to represent both a read and write (though the flow is
+# typical asymettric in terms of bandwidth). It is also possible for
+# permission to not flow information (meaning that the result is pure
+# side-effect).
+#
+# The following constants are for representing the directionality
+# of information flow.
+FLOW_NONE = 0
+FLOW_READ = 1
+FLOW_WRITE = 2
+FLOW_BOTH = FLOW_READ | FLOW_WRITE
+
+# These are used by the parser and for nice disply of the directions
+str_to_dir = { "n" : FLOW_NONE, "r" : FLOW_READ, "w" : FLOW_WRITE, "b" : FLOW_BOTH }
+dir_to_str = { FLOW_NONE : "n", FLOW_READ : "r", FLOW_WRITE : "w", FLOW_BOTH : "b" }
+
+class PermMap:
+ """A mapping between a permission and its information flow properties.
+
+ PermMap represents the information flow properties of a single permission
+ including the direction (read, write, etc.) and an abstract representation
+ of the bandwidth of the flow (weight).
+ """
+ def __init__(self, perm, dir, weight):
+ self.perm = perm
+ self.dir = dir
+ self.weight = weight
+
+ def __repr__(self):
+ return "<sepolgen.objectmodel.PermMap %s %s %d>" % (self.perm,
+ dir_to_str[self.dir],
+ self.weight)
+
+class PermMappings:
+ """The information flow properties of a set of object classes and permissions.
+
+ PermMappings maps one or more classes and permissions to their PermMap objects
+ describing their information flow charecteristics.
+ """
+ def __init__(self):
+ self.classes = { }
+ self.default_weight = 5
+ self.default_dir = FLOW_BOTH
+
+ def from_file(self, fd):
+ """Read the permission mappings from a file. This reads the format used
+ by Apol in the setools suite.
+ """
+ # This parsing is deliberitely picky and bails at the least error. It
+ # is assumed that the permission map file will be shipped as part
+ # of sepolgen and not user modified, so this is a reasonable design
+ # choice. If user supplied permission mappings are needed the parser
+ # should be made a little more robust and give better error messages.
+ cur = None
+ for line in fd:
+ fields = line.split()
+ if len(fields) == 0 or len(fields) == 1 or fields[0] == "#":
+ continue
+ if fields[0] == "class":
+ c = fields[1]
+ if self.classes.has_key(c):
+ raise ValueError("duplicate class in perm map")
+ self.classes[c] = { }
+ cur = self.classes[c]
+ else:
+ if len(fields) != 3:
+ raise ValueError("error in object classs permissions")
+ if cur is None:
+ raise ValueError("permission outside of class")
+ pm = PermMap(fields[0], str_to_dir[fields[1]], int(fields[2]))
+ cur[pm.perm] = pm
+
+ def get(self, obj, perm):
+ """Get the permission map for the object permission.
+
+ Returns:
+ PermMap representing the permission
+ Raises:
+ KeyError if the object or permission is not defined
+ """
+ return self.classes[obj][perm]
+
+ def getdefault(self, obj, perm):
+ """Get the permission map for the object permission or a default.
+
+ getdefault is the same as get except that a default PermMap is
+ returned if the object class or permission is not defined. The
+ default is FLOW_BOTH with a weight of 5.
+ """
+ try:
+ pm = self.classes[obj][perm]
+ except KeyError:
+ return PermMap(perm, self.default_dir, self.default_weight)
+ return pm
+
+ def getdefault_direction(self, obj, perms):
+ dir = FLOW_NONE
+ for perm in perms:
+ pm = self.getdefault(obj, perm)
+ dir = dir | pm.dir
+ return dir
+
+ def getdefault_distance(self, obj, perms):
+ total = 0
+ for perm in perms:
+ pm = self.getdefault(obj, perm)
+ total += pm.weight
+
+ return total
+
+
+
diff --git a/lib/python2.7/site-packages/sepolgen/output.py b/lib/python2.7/site-packages/sepolgen/output.py
new file mode 100644
index 0000000..739452d
--- /dev/null
+++ b/lib/python2.7/site-packages/sepolgen/output.py
@@ -0,0 +1,173 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+"""
+Classes and functions for the output of reference policy modules.
+
+This module takes a refpolicy.Module object and formats it for
+output using the ModuleWriter object. By separating the output
+in this way the other parts of Madison can focus solely on
+generating policy. This keeps the semantic / syntactic issues
+cleanly separated from the formatting issues.
+"""
+
+import refpolicy
+import util
+
+class ModuleWriter:
+ def __init__(self):
+ self.fd = None
+ self.module = None
+ self.sort = True
+ self.requires = True
+
+ def write(self, module, fd):
+ self.module = module
+
+ if self.sort:
+ sort_filter(self.module)
+
+ # FIXME - make this handle nesting
+ for node, depth in refpolicy.walktree(self.module, showdepth=True):
+ fd.write("%s\n" % str(node))
+
+# Helper functions for sort_filter - this is all done old school
+# C style rather than with polymorphic methods because this sorting
+# is specific to output. It is not necessarily the comparison you
+# want generally.
+
+# Compare two IdSets - we could probably do something clever
+# with different here, but this works.
+def id_set_cmp(x, y):
+ xl = util.set_to_list(x)
+ xl.sort()
+ yl = util.set_to_list(y)
+ yl.sort()
+
+ if len(xl) != len(yl):
+ return cmp(xl[0], yl[0])
+ for v in zip(xl, yl):
+ if v[0] != v[1]:
+ return cmp(v[0], v[1])
+ return 0
+
+# Compare two avrules
+def avrule_cmp(a, b):
+ ret = id_set_cmp(a.src_types, b.src_types)
+ if ret is not 0:
+ return ret
+ ret = id_set_cmp(a.tgt_types, b.tgt_types)
+ if ret is not 0:
+ return ret
+ ret = id_set_cmp(a.obj_classes, b.obj_classes)
+ if ret is not 0:
+ return ret
+
+ # At this point, who cares - just return something
+ return cmp(len(a.perms), len(b.perms))
+
+# Compare two interface calls
+def ifcall_cmp(a, b):
+ if a.args[0] != b.args[0]:
+ return cmp(a.args[0], b.args[0])
+ return cmp(a.ifname, b.ifname)
+
+# Compare an two avrules or interface calls
+def rule_cmp(a, b):
+ if isinstance(a, refpolicy.InterfaceCall):
+ if isinstance(b, refpolicy.InterfaceCall):
+ return ifcall_cmp(a, b)
+ else:
+ return id_set_cmp([a.args[0]], b.src_types)
+ else:
+ if isinstance(b, refpolicy.AVRule):
+ return avrule_cmp(a,b)
+ else:
+ return id_set_cmp(a.src_types, [b.args[0]])
+
+def role_type_cmp(a, b):
+ return cmp(a.role, b.role)
+
+def sort_filter(module):
+ """Sort and group the output for readability.
+ """
+ def sort_node(node):
+ c = []
+
+ # Module statement
+ for mod in node.module_declarations():
+ c.append(mod)
+ c.append(refpolicy.Comment())
+
+ # Requires
+ for require in node.requires():
+ c.append(require)
+ c.append(refpolicy.Comment())
+
+ # Rules
+ #
+ # We are going to group output by source type (which
+ # we assume is the first argument for interfaces).
+ rules = []
+ rules.extend(node.avrules())
+ rules.extend(node.interface_calls())
+ rules.sort(rule_cmp)
+
+ cur = None
+ sep_rules = []
+ for rule in rules:
+ if isinstance(rule, refpolicy.InterfaceCall):
+ x = rule.args[0]
+ else:
+ x = util.first(rule.src_types)
+
+ if cur != x:
+ if cur:
+ sep_rules.append(refpolicy.Comment())
+ cur = x
+ comment = refpolicy.Comment()
+ comment.lines.append("============= %s ==============" % cur)
+ sep_rules.append(comment)
+ sep_rules.append(rule)
+
+ c.extend(sep_rules)
+
+
+ ras = []
+ ras.extend(node.role_types())
+ ras.sort(role_type_cmp)
+ if len(ras):
+ comment = refpolicy.Comment()
+ comment.lines.append("============= ROLES ==============")
+ c.append(comment)
+
+
+ c.extend(ras)
+
+ # Everything else
+ for child in node.children:
+ if child not in c:
+ c.append(child)
+
+ node.children = c
+
+ for node in module.nodes():
+ sort_node(node)
+
+
diff --git a/lib/python2.7/site-packages/sepolgen/policygen.py b/lib/python2.7/site-packages/sepolgen/policygen.py
new file mode 100644
index 0000000..5f38577
--- /dev/null
+++ b/lib/python2.7/site-packages/sepolgen/policygen.py
@@ -0,0 +1,402 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+"""
+classes and algorithms for the generation of SELinux policy.
+"""
+
+import itertools
+import textwrap
+
+import refpolicy
+import objectmodel
+import access
+import interfaces
+import matching
+import selinux.audit2why as audit2why
+try:
+ from setools import *
+except:
+ pass
+
+# Constants for the level of explanation from the generation
+# routines
+NO_EXPLANATION = 0
+SHORT_EXPLANATION = 1
+LONG_EXPLANATION = 2
+
+class PolicyGenerator:
+ """Generate a reference policy module from access vectors.
+
+ PolicyGenerator generates a new reference policy module
+ or updates an existing module based on requested access
+ in the form of access vectors.
+
+ It generates allow rules and optionally module require
+ statements and reference policy interfaces. By default
+ only allow rules are generated. The methods .set_gen_refpol
+ and .set_gen_requires turns on interface generation and
+ requires generation respectively.
+
+ PolicyGenerator can also optionally add comments explaining
+ why a particular access was allowed based on the audit
+ messages that generated the access. The access vectors
+ passed in must have the .audit_msgs field set correctly
+ and .explain set to SHORT|LONG_EXPLANATION to enable this
+ feature.
+
+ The module created by PolicyGenerator can be passed to
+ output.ModuleWriter to output a text representation.
+ """
+ def __init__(self, module=None):
+ """Initialize a PolicyGenerator with an optional
+ existing module.
+
+ If the module paramater is not None then access
+ will be added to the passed in module. Otherwise
+ a new reference policy module will be created.
+ """
+ self.ifgen = None
+ self.explain = NO_EXPLANATION
+ self.gen_requires = False
+ if module:
+ self.moduel = module
+ else:
+ self.module = refpolicy.Module()
+
+ self.dontaudit = False
+
+ self.domains = None
+ def set_gen_refpol(self, if_set=None, perm_maps=None):
+ """Set whether reference policy interfaces are generated.
+
+ To turn on interface generation pass in an interface set
+ to use for interface generation. To turn off interface
+ generation pass in None.
+
+ If interface generation is enabled requires generation
+ will also be enabled.
+ """
+ if if_set:
+ self.ifgen = InterfaceGenerator(if_set, perm_maps)
+ self.gen_requires = True
+ else:
+ self.ifgen = None
+ self.__set_module_style()
+
+
+ def set_gen_requires(self, status=True):
+ """Set whether module requires are generated.
+
+ Passing in true will turn on requires generation and
+ False will disable generation. If requires generation is
+ disabled interface generation will also be disabled and
+ can only be re-enabled via .set_gen_refpol.
+ """
+ self.gen_requires = status
+
+ def set_gen_explain(self, explain=SHORT_EXPLANATION):
+ """Set whether access is explained.
+ """
+ self.explain = explain
+
+ def set_gen_dontaudit(self, dontaudit):
+ self.dontaudit = dontaudit
+
+ def __set_module_style(self):
+ if self.ifgen:
+ refpolicy = True
+ else:
+ refpolicy = False
+ for mod in self.module.module_declarations():
+ mod.refpolicy = refpolicy
+
+ def set_module_name(self, name, version="1.0"):
+ """Set the name of the module and optionally the version.
+ """
+ # find an existing module declaration
+ m = None
+ for mod in self.module.module_declarations():
+ m = mod
+ if not m:
+ m = refpolicy.ModuleDeclaration()
+ self.module.children.insert(0, m)
+ m.name = name
+ m.version = version
+ if self.ifgen:
+ m.refpolicy = True
+ else:
+ m.refpolicy = False
+
+ def get_module(self):
+ # Generate the requires
+ if self.gen_requires:
+ gen_requires(self.module)
+
+ """Return the generated module"""
+ return self.module
+
+ def __add_allow_rules(self, avs):
+ for av in avs:
+ rule = refpolicy.AVRule(av)
+ if self.dontaudit:
+ rule.rule_type = rule.DONTAUDIT
+ rule.comment = ""
+ if self.explain:
+ rule.comment = str(refpolicy.Comment(explain_access(av, verbosity=self.explain)))
+ if av.type == audit2why.ALLOW:
+ rule.comment += "\n#!!!! This avc is allowed in the current policy"
+ if av.type == audit2why.DONTAUDIT:
+ rule.comment += "\n#!!!! This avc has a dontaudit rule in the current policy"
+
+ if av.type == audit2why.BOOLEAN:
+ if len(av.data) > 1:
+ rule.comment += "\n#!!!! This avc can be allowed using one of the these booleans:\n# %s" % ", ".join(map(lambda x: x[0], av.data))
+ else:
+ rule.comment += "\n#!!!! This avc can be allowed using the boolean '%s'" % av.data[0][0]
+
+ if av.type == audit2why.CONSTRAINT:
+ rule.comment += "\n#!!!! This avc is a constraint violation. You would need to modify the attributes of either the source or target types to allow this access."
+ rule.comment += "\n#Constraint rule: "
+ rule.comment += "\n\t" + av.data[0]
+ for reason in av.data[1:]:
+ rule.comment += "\n#\tPossible cause is the source %s and target %s are different." % reason
+
+ try:
+ if ( av.type == audit2why.TERULE and
+ "write" in av.perms and
+ ( "dir" in av.obj_class or "open" in av.perms )):
+ if not self.domains:
+ self.domains = seinfo(ATTRIBUTE, name="domain")[0]["types"]
+ types=[]
+
+ for i in map(lambda x: x[TCONTEXT], sesearch([ALLOW], {SCONTEXT: av.src_type, CLASS: av.obj_class, PERMS: av.perms})):
+ if i not in self.domains:
+ types.append(i)
+ if len(types) == 1:
+ rule.comment += "\n#!!!! The source type '%s' can write to a '%s' of the following type:\n# %s\n" % ( av.src_type, av.obj_class, ", ".join(types))
+ elif len(types) >= 1:
+ rule.comment += "\n#!!!! The source type '%s' can write to a '%s' of the following types:\n# %s\n" % ( av.src_type, av.obj_class, ", ".join(types))
+ except:
+ pass
+ self.module.children.append(rule)
+
+
+ def add_access(self, av_set):
+ """Add the access from the access vector set to this
+ module.
+ """
+ # Use the interface generator to split the access
+ # into raw allow rules and interfaces. After this
+ # a will contain a list of access that should be
+ # used as raw allow rules and the interfaces will
+ # be added to the module.
+ if self.ifgen:
+ raw_allow, ifcalls = self.ifgen.gen(av_set, self.explain)
+ self.module.children.extend(ifcalls)
+ else:
+ raw_allow = av_set
+
+ # Generate the raw allow rules from the filtered list
+ self.__add_allow_rules(raw_allow)
+
+ def add_role_types(self, role_type_set):
+ for role_type in role_type_set:
+ self.module.children.append(role_type)
+
+def explain_access(av, ml=None, verbosity=SHORT_EXPLANATION):
+ """Explain why a policy statement was generated.
+
+ Return a string containing a text explanation of
+ why a policy statement was generated. The string is
+ commented and wrapped and can be directly inserted
+ into a policy.
+
+ Params:
+ av - access vector representing the access. Should
+ have .audit_msgs set appropriately.
+ verbosity - the amount of explanation provided. Should
+ be set to NO_EXPLANATION, SHORT_EXPLANATION, or
+ LONG_EXPLANATION.
+ Returns:
+ list of strings - strings explaining the access or an empty
+ string if verbosity=NO_EXPLANATION or there is not sufficient
+ information to provide an explanation.
+ """
+ s = []
+
+ def explain_interfaces():
+ if not ml:
+ return
+ s.append(" Interface options:")
+ for match in ml.all():
+ ifcall = call_interface(match.interface, ml.av)
+ s.append(' %s # [%d]' % (ifcall.to_string(), match.dist))
+
+
+ # Format the raw audit data to explain why the
+ # access was requested - either long or short.
+ if verbosity == LONG_EXPLANATION:
+ for msg in av.audit_msgs:
+ s.append(' %s' % msg.header)
+ s.append(' scontext="%s" tcontext="%s"' %
+ (str(msg.scontext), str(msg.tcontext)))
+ s.append(' class="%s" perms="%s"' %
+ (msg.tclass, refpolicy.list_to_space_str(msg.accesses)))
+ s.append(' comm="%s" exe="%s" path="%s"' % (msg.comm, msg.exe, msg.path))
+ s.extend(textwrap.wrap('message="' + msg.message + '"', 80, initial_indent=" ",
+ subsequent_indent=" "))
+ explain_interfaces()
+ elif verbosity:
+ s.append(' src="%s" tgt="%s" class="%s", perms="%s"' %
+ (av.src_type, av.tgt_type, av.obj_class, av.perms.to_space_str()))
+ # For the short display we are only going to use the additional information
+ # from the first audit message. For the vast majority of cases this info
+ # will always be the same anyway.
+ if len(av.audit_msgs) > 0:
+ msg = av.audit_msgs[0]
+ s.append(' comm="%s" exe="%s" path="%s"' % (msg.comm, msg.exe, msg.path))
+ explain_interfaces()
+ return s
+
+def param_comp(a, b):
+ return cmp(b.num, a.num)
+
+def call_interface(interface, av):
+ params = []
+ args = []
+
+ params.extend(interface.params.values())
+ params.sort(param_comp)
+
+ ifcall = refpolicy.InterfaceCall()
+ ifcall.ifname = interface.name
+
+ for i in range(len(params)):
+ if params[i].type == refpolicy.SRC_TYPE:
+ ifcall.args.append(av.src_type)
+ elif params[i].type == refpolicy.TGT_TYPE:
+ ifcall.args.append(av.tgt_type)
+ elif params[i].type == refpolicy.OBJ_CLASS:
+ ifcall.args.append(av.obj_class)
+ else:
+ print params[i].type
+ assert(0)
+
+ assert(len(ifcall.args) > 0)
+
+ return ifcall
+
+class InterfaceGenerator:
+ def __init__(self, ifs, perm_maps=None):
+ self.ifs = ifs
+ self.hack_check_ifs(ifs)
+ self.matcher = matching.AccessMatcher(perm_maps)
+ self.calls = []
+
+ def hack_check_ifs(self, ifs):
+ # FIXME: Disable interfaces we can't call - this is a hack.
+ # Because we don't handle roles, multiple paramaters, etc.,
+ # etc., we must make certain we can actually use a returned
+ # interface.
+ for x in ifs.interfaces.values():
+ params = []
+ params.extend(x.params.values())
+ params.sort(param_comp)
+ for i in range(len(params)):
+ # Check that the paramater position matches
+ # the number (e.g., $1 is the first arg). This
+ # will fail if the parser missed something.
+ if (i + 1) != params[i].num:
+ x.enabled = False
+ break
+ # Check that we can handle the param type (currently excludes
+ # roles.
+ if params[i].type not in [refpolicy.SRC_TYPE, refpolicy.TGT_TYPE,
+ refpolicy.OBJ_CLASS]:
+ x.enabled = False
+ break
+
+ def gen(self, avs, verbosity):
+ raw_av = self.match(avs)
+ ifcalls = []
+ for ml in self.calls:
+ ifcall = call_interface(ml.best().interface, ml.av)
+ if verbosity:
+ ifcall.comment = refpolicy.Comment(explain_access(ml.av, ml, verbosity))
+ ifcalls.append((ifcall, ml))
+
+ d = []
+ for ifcall, ifs in ifcalls:
+ found = False
+ for o_ifcall in d:
+ if o_ifcall.matches(ifcall):
+ if o_ifcall.comment and ifcall.comment:
+ o_ifcall.comment.merge(ifcall.comment)
+ found = True
+ if not found:
+ d.append(ifcall)
+
+ return (raw_av, d)
+
+
+ def match(self, avs):
+ raw_av = []
+ for av in avs:
+ ans = matching.MatchList()
+ self.matcher.search_ifs(self.ifs, av, ans)
+ if len(ans):
+ self.calls.append(ans)
+ else:
+ raw_av.append(av)
+
+ return raw_av
+
+
+def gen_requires(module):
+ """Add require statements to the module.
+ """
+ def collect_requires(node):
+ r = refpolicy.Require()
+ for avrule in node.avrules():
+ r.types.update(avrule.src_types)
+ r.types.update(avrule.tgt_types)
+ for obj in avrule.obj_classes:
+ r.add_obj_class(obj, avrule.perms)
+
+ for ifcall in node.interface_calls():
+ for arg in ifcall.args:
+ # FIXME - handle non-type arguments when we
+ # can actually figure those out.
+ r.types.add(arg)
+
+ for role_type in node.role_types():
+ r.roles.add(role_type.role)
+ r.types.update(role_type.types)
+
+ r.types.discard("self")
+
+ node.children.insert(0, r)
+
+ # FUTURE - this is untested on modules with any sort of
+ # nesting
+ for node in module.nodes():
+ collect_requires(node)
+
+
diff --git a/lib/python2.7/site-packages/sepolgen/refparser.py b/lib/python2.7/site-packages/sepolgen/refparser.py
new file mode 100644
index 0000000..83542d3
--- /dev/null
+++ b/lib/python2.7/site-packages/sepolgen/refparser.py
@@ -0,0 +1,1128 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006-2007 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+# OVERVIEW
+#
+#
+# This is a parser for the refpolicy policy "language" - i.e., the
+# normal SELinux policy language plus the refpolicy style M4 macro
+# constructs on top of that base language. This parser is primarily
+# aimed at parsing the policy headers in order to create an abstract
+# policy representation suitable for generating policy.
+#
+# Both the lexer and parser are included in this file. The are implemented
+# using the Ply library (included with sepolgen).
+
+import sys
+import os
+import re
+import traceback
+
+import refpolicy
+import access
+import defaults
+
+import lex
+import yacc
+
+# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+#
+# lexer
+#
+# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+
+tokens = (
+ # basic tokens, punctuation
+ 'TICK',
+ 'SQUOTE',
+ 'OBRACE',
+ 'CBRACE',
+ 'SEMI',
+ 'COLON',
+ 'OPAREN',
+ 'CPAREN',
+ 'COMMA',
+ 'MINUS',
+ 'TILDE',
+ 'ASTERISK',
+ 'AMP',
+ 'BAR',
+ 'EXPL',
+ 'EQUAL',
+ 'FILENAME',
+ 'IDENTIFIER',
+ 'NUMBER',
+ 'PATH',
+ 'IPV6_ADDR',
+ # reserved words
+ # module
+ 'MODULE',
+ 'POLICY_MODULE',
+ 'REQUIRE',
+ # flask
+ 'SID',
+ 'GENFSCON',
+ 'FS_USE_XATTR',
+ 'FS_USE_TRANS',
+ 'FS_USE_TASK',
+ 'PORTCON',
+ 'NODECON',
+ 'NETIFCON',
+ 'PIRQCON',
+ 'IOMEMCON',
+ 'IOPORTCON',
+ 'PCIDEVICECON',
+ 'DEVICETREECON',
+ # object classes
+ 'CLASS',
+ # types and attributes
+ 'TYPEATTRIBUTE',
+ 'ROLEATTRIBUTE',
+ 'TYPE',
+ 'ATTRIBUTE',
+ 'ATTRIBUTE_ROLE',
+ 'ALIAS',
+ 'TYPEALIAS',
+ # conditional policy
+ 'BOOL',
+ 'TRUE',
+ 'FALSE',
+ 'IF',
+ 'ELSE',
+ # users and roles
+ 'ROLE',
+ 'TYPES',
+ # rules
+ 'ALLOW',
+ 'DONTAUDIT',
+ 'AUDITALLOW',
+ 'NEVERALLOW',
+ 'PERMISSIVE',
+ 'TYPE_TRANSITION',
+ 'TYPE_CHANGE',
+ 'TYPE_MEMBER',
+ 'RANGE_TRANSITION',
+ 'ROLE_TRANSITION',
+ # refpolicy keywords
+ 'OPT_POLICY',
+ 'INTERFACE',
+ 'TUNABLE_POLICY',
+ 'GEN_REQ',
+ 'TEMPLATE',
+ 'GEN_CONTEXT',
+ # m4
+ 'IFELSE',
+ 'IFDEF',
+ 'IFNDEF',
+ 'DEFINE'
+ )
+
+# All reserved keywords - see t_IDENTIFIER for how these are matched in
+# the lexer.
+reserved = {
+ # module
+ 'module' : 'MODULE',
+ 'policy_module' : 'POLICY_MODULE',
+ 'require' : 'REQUIRE',
+ # flask
+ 'sid' : 'SID',
+ 'genfscon' : 'GENFSCON',
+ 'fs_use_xattr' : 'FS_USE_XATTR',
+ 'fs_use_trans' : 'FS_USE_TRANS',
+ 'fs_use_task' : 'FS_USE_TASK',
+ 'portcon' : 'PORTCON',
+ 'nodecon' : 'NODECON',
+ 'netifcon' : 'NETIFCON',
+ 'pirqcon' : 'PIRQCON',
+ 'iomemcon' : 'IOMEMCON',
+ 'ioportcon' : 'IOPORTCON',
+ 'pcidevicecon' : 'PCIDEVICECON',
+ 'devicetreecon' : 'DEVICETREECON',
+ # object classes
+ 'class' : 'CLASS',
+ # types and attributes
+ 'typeattribute' : 'TYPEATTRIBUTE',
+ 'roleattribute' : 'ROLEATTRIBUTE',
+ 'type' : 'TYPE',
+ 'attribute' : 'ATTRIBUTE',
+ 'attribute_role' : 'ATTRIBUTE_ROLE',
+ 'alias' : 'ALIAS',
+ 'typealias' : 'TYPEALIAS',
+ # conditional policy
+ 'bool' : 'BOOL',
+ 'true' : 'TRUE',
+ 'false' : 'FALSE',
+ 'if' : 'IF',
+ 'else' : 'ELSE',
+ # users and roles
+ 'role' : 'ROLE',
+ 'types' : 'TYPES',
+ # rules
+ 'allow' : 'ALLOW',
+ 'dontaudit' : 'DONTAUDIT',
+ 'auditallow' : 'AUDITALLOW',
+ 'neverallow' : 'NEVERALLOW',
+ 'permissive' : 'PERMISSIVE',
+ 'type_transition' : 'TYPE_TRANSITION',
+ 'type_change' : 'TYPE_CHANGE',
+ 'type_member' : 'TYPE_MEMBER',
+ 'range_transition' : 'RANGE_TRANSITION',
+ 'role_transition' : 'ROLE_TRANSITION',
+ # refpolicy keywords
+ 'optional_policy' : 'OPT_POLICY',
+ 'interface' : 'INTERFACE',
+ 'tunable_policy' : 'TUNABLE_POLICY',
+ 'gen_require' : 'GEN_REQ',
+ 'template' : 'TEMPLATE',
+ 'gen_context' : 'GEN_CONTEXT',
+ # M4
+ 'ifelse' : 'IFELSE',
+ 'ifndef' : 'IFNDEF',
+ 'ifdef' : 'IFDEF',
+ 'define' : 'DEFINE'
+ }
+
+# The ply lexer allows definition of tokens in 2 ways: regular expressions
+# or functions.
+
+# Simple regex tokens
+t_TICK = r'\`'
+t_SQUOTE = r'\''
+t_OBRACE = r'\{'
+t_CBRACE = r'\}'
+# This will handle spurios extra ';' via the +
+t_SEMI = r'\;+'
+t_COLON = r'\:'
+t_OPAREN = r'\('
+t_CPAREN = r'\)'
+t_COMMA = r'\,'
+t_MINUS = r'\-'
+t_TILDE = r'\~'
+t_ASTERISK = r'\*'
+t_AMP = r'\&'
+t_BAR = r'\|'
+t_EXPL = r'\!'
+t_EQUAL = r'\='
+t_NUMBER = r'[0-9\.]+'
+t_PATH = r'/[a-zA-Z0-9)_\.\*/]*'
+#t_IPV6_ADDR = r'[a-fA-F0-9]{0,4}:[a-fA-F0-9]{0,4}:([a-fA-F0-9]{0,4}:)*'
+
+# Ignore whitespace - this is a special token for ply that more efficiently
+# ignores uninteresting tokens.
+t_ignore = " \t"
+
+# More complex tokens
+def t_IPV6_ADDR(t):
+ r'[a-fA-F0-9]{0,4}:[a-fA-F0-9]{0,4}:([a-fA-F0-9]|:)*'
+ # This is a function simply to force it sooner into
+ # the regex list
+ return t
+
+def t_m4comment(t):
+ r'dnl.*\n'
+ # Ignore all comments
+ t.lexer.lineno += 1
+
+def t_refpolicywarn1(t):
+ r'define.*refpolicywarn\(.*\n'
+ # Ignore refpolicywarn statements - they sometimes
+ # contain text that we can't parse.
+ t.skip(1)
+
+def t_refpolicywarn(t):
+ r'refpolicywarn\(.*\n'
+ # Ignore refpolicywarn statements - they sometimes
+ # contain text that we can't parse.
+ t.lexer.lineno += 1
+
+def t_IDENTIFIER(t):
+ r'[a-zA-Z_\$][a-zA-Z0-9_\-\+\.\$\*~]*'
+ # Handle any keywords
+ t.type = reserved.get(t.value,'IDENTIFIER')
+ return t
+
+def t_FILENAME(t):
+ r'\"[a-zA-Z0-9_\-\+\.\$\*~ :]+\"'
+ # Handle any keywords
+ t.type = reserved.get(t.value,'FILENAME')
+ return t
+
+def t_comment(t):
+ r'\#.*\n'
+ # Ignore all comments
+ t.lexer.lineno += 1
+
+def t_error(t):
+ print "Illegal character '%s'" % t.value[0]
+ t.skip(1)
+
+def t_newline(t):
+ r'\n+'
+ t.lexer.lineno += len(t.value)
+
+# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+#
+# Parser
+#
+# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+
+# Global data used during parsing - making it global is easier than
+# passing the state through the parsing functions.
+
+# m is the top-level data structure (stands for modules).
+m = None
+# error is either None (indicating no error) or a string error message.
+error = None
+parse_file = ""
+# spt is the support macros (e.g., obj/perm sets) - it is an instance of
+# refpolicy.SupportMacros and should always be present during parsing
+# though it may not contain any macros.
+spt = None
+success = True
+
+# utilities
+def collect(stmts, parent, val=None):
+ if stmts is None:
+ return
+ for s in stmts:
+ if s is None:
+ continue
+ s.parent = parent
+ if val is not None:
+ parent.children.insert(0, (val, s))
+ else:
+ parent.children.insert(0, s)
+
+def expand(ids, s):
+ for id in ids:
+ if spt.has_key(id):
+ s.update(spt.by_name(id))
+ else:
+ s.add(id)
+
+# Top-level non-terminal
+def p_statements(p):
+ '''statements : statement
+ | statements statement
+ | empty
+ '''
+ if len(p) == 2 and p[1]:
+ m.children.append(p[1])
+ elif len(p) > 2 and p[2]:
+ m.children.append(p[2])
+
+def p_statement(p):
+ '''statement : interface
+ | template
+ | obj_perm_set
+ | policy
+ | policy_module_stmt
+ | module_stmt
+ '''
+ p[0] = p[1]
+
+def p_empty(p):
+ 'empty :'
+ pass
+
+#
+# Reference policy language constructs
+#
+
+# This is for the policy module statement (e.g., policy_module(foo,1.2.0)).
+# We have a separate terminal for either the basic language module statement
+# and interface calls to make it easier to identifier.
+def p_policy_module_stmt(p):
+ 'policy_module_stmt : POLICY_MODULE OPAREN IDENTIFIER COMMA NUMBER CPAREN'
+ m = refpolicy.ModuleDeclaration()
+ m.name = p[3]
+ m.version = p[5]
+ m.refpolicy = True
+ p[0] = m
+
+def p_interface(p):
+ '''interface : INTERFACE OPAREN TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
+ '''
+ x = refpolicy.Interface(p[4])
+ collect(p[8], x)
+ p[0] = x
+
+def p_template(p):
+ '''template : TEMPLATE OPAREN TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
+ | DEFINE OPAREN TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
+ '''
+ x = refpolicy.Template(p[4])
+ collect(p[8], x)
+ p[0] = x
+
+def p_define(p):
+ '''define : DEFINE OPAREN TICK IDENTIFIER SQUOTE CPAREN'''
+ # This is for defining single M4 values (to be used later in ifdef statements).
+ # Example: define(`sulogin_no_pam'). We don't currently do anything with these
+ # but we should in the future when we correctly resolve ifdef statements.
+ p[0] = None
+
+def p_interface_stmts(p):
+ '''interface_stmts : policy
+ | interface_stmts policy
+ | empty
+ '''
+ if len(p) == 2 and p[1]:
+ p[0] = p[1]
+ elif len(p) > 2:
+ if not p[1]:
+ if p[2]:
+ p[0] = p[2]
+ elif not p[2]:
+ p[0] = p[1]
+ else:
+ p[0] = p[1] + p[2]
+
+def p_optional_policy(p):
+ '''optional_policy : OPT_POLICY OPAREN TICK interface_stmts SQUOTE CPAREN
+ | OPT_POLICY OPAREN TICK interface_stmts SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
+ '''
+ o = refpolicy.OptionalPolicy()
+ collect(p[4], o, val=True)
+ if len(p) > 7:
+ collect(p[8], o, val=False)
+ p[0] = [o]
+
+def p_tunable_policy(p):
+ '''tunable_policy : TUNABLE_POLICY OPAREN TICK cond_expr SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
+ | TUNABLE_POLICY OPAREN TICK cond_expr SQUOTE COMMA TICK interface_stmts SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
+ '''
+ x = refpolicy.TunablePolicy()
+ x.cond_expr = p[4]
+ collect(p[8], x, val=True)
+ if len(p) > 11:
+ collect(p[12], x, val=False)
+ p[0] = [x]
+
+def p_ifelse(p):
+ '''ifelse : IFELSE OPAREN TICK IDENTIFIER SQUOTE COMMA COMMA TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN optional_semi
+ | IFELSE OPAREN TICK IDENTIFIER SQUOTE COMMA TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN optional_semi
+ '''
+# x = refpolicy.IfDef(p[4])
+# v = True
+# collect(p[8], x, val=v)
+# if len(p) > 12:
+# collect(p[12], x, val=False)
+# p[0] = [x]
+ pass
+
+
+def p_ifdef(p):
+ '''ifdef : IFDEF OPAREN TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN optional_semi
+ | IFNDEF OPAREN TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN optional_semi
+ | IFDEF OPAREN TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN optional_semi
+ '''
+ x = refpolicy.IfDef(p[4])
+ if p[1] == 'ifdef':
+ v = True
+ else:
+ v = False
+ collect(p[8], x, val=v)
+ if len(p) > 12:
+ collect(p[12], x, val=False)
+ p[0] = [x]
+
+def p_interface_call(p):
+ '''interface_call : IDENTIFIER OPAREN interface_call_param_list CPAREN
+ | IDENTIFIER OPAREN CPAREN
+ | IDENTIFIER OPAREN interface_call_param_list CPAREN SEMI'''
+ # Allow spurious semi-colons at the end of interface calls
+ i = refpolicy.InterfaceCall(ifname=p[1])
+ if len(p) > 4:
+ i.args.extend(p[3])
+ p[0] = i
+
+def p_interface_call_param(p):
+ '''interface_call_param : IDENTIFIER
+ | IDENTIFIER MINUS IDENTIFIER
+ | nested_id_set
+ | TRUE
+ | FALSE
+ | FILENAME
+ '''
+ # Intentionally let single identifiers pass through
+ # List means set, non-list identifier
+ if len(p) == 2:
+ p[0] = p[1]
+ else:
+ p[0] = [p[1], "-" + p[3]]
+
+def p_interface_call_param_list(p):
+ '''interface_call_param_list : interface_call_param
+ | interface_call_param_list COMMA interface_call_param
+ '''
+ if len(p) == 2:
+ p[0] = [p[1]]
+ else:
+ p[0] = p[1] + [p[3]]
+
+
+def p_obj_perm_set(p):
+ 'obj_perm_set : DEFINE OPAREN TICK IDENTIFIER SQUOTE COMMA TICK names SQUOTE CPAREN'
+ s = refpolicy.ObjPermSet(p[4])
+ s.perms = p[8]
+ p[0] = s
+
+#
+# Basic SELinux policy language
+#
+
+def p_policy(p):
+ '''policy : policy_stmt
+ | optional_policy
+ | tunable_policy
+ | ifdef
+ | ifelse
+ | conditional
+ '''
+ p[0] = p[1]
+
+def p_policy_stmt(p):
+ '''policy_stmt : gen_require
+ | avrule_def
+ | typerule_def
+ | typeattribute_def
+ | roleattribute_def
+ | interface_call
+ | role_def
+ | role_allow
+ | permissive
+ | type_def
+ | typealias_def
+ | attribute_def
+ | attribute_role_def
+ | range_transition_def
+ | role_transition_def
+ | bool
+ | define
+ | initial_sid
+ | genfscon
+ | fs_use
+ | portcon
+ | nodecon
+ | netifcon
+ | pirqcon
+ | iomemcon
+ | ioportcon
+ | pcidevicecon
+ | devicetreecon
+ '''
+ if p[1]:
+ p[0] = [p[1]]
+
+def p_module_stmt(p):
+ 'module_stmt : MODULE IDENTIFIER NUMBER SEMI'
+ m = refpolicy.ModuleDeclaration()
+ m.name = p[2]
+ m.version = p[3]
+ m.refpolicy = False
+ p[0] = m
+
+def p_gen_require(p):
+ '''gen_require : GEN_REQ OPAREN TICK requires SQUOTE CPAREN
+ | REQUIRE OBRACE requires CBRACE'''
+ # We ignore the require statements - they are redundant data from our point-of-view.
+ # Checkmodule will verify them later anyway so we just assume that they match what
+ # is in the rest of the interface.
+ pass
+
+def p_requires(p):
+ '''requires : require
+ | requires require
+ | ifdef
+ | requires ifdef
+ '''
+ pass
+
+def p_require(p):
+ '''require : TYPE comma_list SEMI
+ | ROLE comma_list SEMI
+ | ATTRIBUTE comma_list SEMI
+ | ATTRIBUTE_ROLE comma_list SEMI
+ | CLASS comma_list SEMI
+ | BOOL comma_list SEMI
+ '''
+ pass
+
+def p_security_context(p):
+ '''security_context : IDENTIFIER COLON IDENTIFIER COLON IDENTIFIER
+ | IDENTIFIER COLON IDENTIFIER COLON IDENTIFIER COLON mls_range_def'''
+ # This will likely need some updates to handle complex levels
+ s = refpolicy.SecurityContext()
+ s.user = p[1]
+ s.role = p[3]
+ s.type = p[5]
+ if len(p) > 6:
+ s.level = p[7]
+
+ p[0] = s
+
+def p_gen_context(p):
+ '''gen_context : GEN_CONTEXT OPAREN security_context COMMA mls_range_def CPAREN
+ '''
+ # We actually store gen_context statements in a SecurityContext
+ # object - it knows how to output either a bare context or a
+ # gen_context statement.
+ s = p[3]
+ s.level = p[5]
+
+ p[0] = s
+
+def p_context(p):
+ '''context : security_context
+ | gen_context
+ '''
+ p[0] = p[1]
+
+def p_initial_sid(p):
+ '''initial_sid : SID IDENTIFIER context'''
+ s = refpolicy.InitialSid()
+ s.name = p[2]
+ s.context = p[3]
+ p[0] = s
+
+def p_genfscon(p):
+ '''genfscon : GENFSCON IDENTIFIER PATH context'''
+
+ g = refpolicy.GenfsCon()
+ g.filesystem = p[2]
+ g.path = p[3]
+ g.context = p[4]
+
+ p[0] = g
+
+def p_fs_use(p):
+ '''fs_use : FS_USE_XATTR IDENTIFIER context SEMI
+ | FS_USE_TASK IDENTIFIER context SEMI
+ | FS_USE_TRANS IDENTIFIER context SEMI
+ '''
+ f = refpolicy.FilesystemUse()
+ if p[1] == "fs_use_xattr":
+ f.type = refpolicy.FilesystemUse.XATTR
+ elif p[1] == "fs_use_task":
+ f.type = refpolicy.FilesystemUse.TASK
+ elif p[1] == "fs_use_trans":
+ f.type = refpolicy.FilesystemUse.TRANS
+
+ f.filesystem = p[2]
+ f.context = p[3]
+
+ p[0] = f
+
+def p_portcon(p):
+ '''portcon : PORTCON IDENTIFIER NUMBER context
+ | PORTCON IDENTIFIER NUMBER MINUS NUMBER context'''
+ c = refpolicy.PortCon()
+ c.port_type = p[2]
+ if len(p) == 5:
+ c.port_number = p[3]
+ c.context = p[4]
+ else:
+ c.port_number = p[3] + "-" + p[4]
+ c.context = p[5]
+
+ p[0] = c
+
+def p_nodecon(p):
+ '''nodecon : NODECON NUMBER NUMBER context
+ | NODECON IPV6_ADDR IPV6_ADDR context
+ '''
+ n = refpolicy.NodeCon()
+ n.start = p[2]
+ n.end = p[3]
+ n.context = p[4]
+
+ p[0] = n
+
+def p_netifcon(p):
+ 'netifcon : NETIFCON IDENTIFIER context context'
+ n = refpolicy.NetifCon()
+ n.interface = p[2]
+ n.interface_context = p[3]
+ n.packet_context = p[4]
+
+ p[0] = n
+
+def p_pirqcon(p):
+ 'pirqcon : PIRQCON NUMBER context'
+ c = refpolicy.PirqCon()
+ c.pirq_number = p[2]
+ c.context = p[3]
+
+ p[0] = c
+
+def p_iomemcon(p):
+ '''iomemcon : IOMEMCON NUMBER context
+ | IOMEMCON NUMBER MINUS NUMBER context'''
+ c = refpolicy.IomemCon()
+ if len(p) == 4:
+ c.device_mem = p[2]
+ c.context = p[3]
+ else:
+ c.device_mem = p[2] + "-" + p[3]
+ c.context = p[4]
+
+ p[0] = c
+
+def p_ioportcon(p):
+ '''ioportcon : IOPORTCON NUMBER context
+ | IOPORTCON NUMBER MINUS NUMBER context'''
+ c = refpolicy.IoportCon()
+ if len(p) == 4:
+ c.ioport = p[2]
+ c.context = p[3]
+ else:
+ c.ioport = p[2] + "-" + p[3]
+ c.context = p[4]
+
+ p[0] = c
+
+def p_pcidevicecon(p):
+ 'pcidevicecon : PCIDEVICECON NUMBER context'
+ c = refpolicy.PciDeviceCon()
+ c.device = p[2]
+ c.context = p[3]
+
+ p[0] = c
+
+def p_devicetreecon(p):
+ 'devicetreecon : DEVICETREECON NUMBER context'
+ c = refpolicy.DevicetTeeCon()
+ c.path = p[2]
+ c.context = p[3]
+
+ p[0] = c
+
+def p_mls_range_def(p):
+ '''mls_range_def : mls_level_def MINUS mls_level_def
+ | mls_level_def
+ '''
+ p[0] = p[1]
+ if len(p) > 2:
+ p[0] = p[0] + "-" + p[3]
+
+def p_mls_level_def(p):
+ '''mls_level_def : IDENTIFIER COLON comma_list
+ | IDENTIFIER
+ '''
+ p[0] = p[1]
+ if len(p) > 2:
+ p[0] = p[0] + ":" + ",".join(p[3])
+
+def p_type_def(p):
+ '''type_def : TYPE IDENTIFIER COMMA comma_list SEMI
+ | TYPE IDENTIFIER SEMI
+ | TYPE IDENTIFIER ALIAS names SEMI
+ | TYPE IDENTIFIER ALIAS names COMMA comma_list SEMI
+ '''
+ t = refpolicy.Type(p[2])
+ if len(p) == 6:
+ if p[3] == ',':
+ t.attributes.update(p[4])
+ else:
+ t.aliases = p[4]
+ elif len(p) > 4:
+ t.aliases = p[4]
+ if len(p) == 8:
+ t.attributes.update(p[6])
+ p[0] = t
+
+def p_attribute_def(p):
+ 'attribute_def : ATTRIBUTE IDENTIFIER SEMI'
+ a = refpolicy.Attribute(p[2])
+ p[0] = a
+
+def p_attribute_role_def(p):
+ 'attribute_role_def : ATTRIBUTE_ROLE IDENTIFIER SEMI'
+ a = refpolicy.Attribute_Role(p[2])
+ p[0] = a
+
+def p_typealias_def(p):
+ 'typealias_def : TYPEALIAS IDENTIFIER ALIAS names SEMI'
+ t = refpolicy.TypeAlias()
+ t.type = p[2]
+ t.aliases = p[4]
+ p[0] = t
+
+def p_role_def(p):
+ '''role_def : ROLE IDENTIFIER TYPES comma_list SEMI
+ | ROLE IDENTIFIER SEMI'''
+ r = refpolicy.Role()
+ r.role = p[2]
+ if len(p) > 4:
+ r.types.update(p[4])
+ p[0] = r
+
+def p_role_allow(p):
+ 'role_allow : ALLOW names names SEMI'
+ r = refpolicy.RoleAllow()
+ r.src_roles = p[2]
+ r.tgt_roles = p[3]
+ p[0] = r
+
+def p_permissive(p):
+ 'permissive : PERMISSIVE names SEMI'
+ t.skip(1)
+
+def p_avrule_def(p):
+ '''avrule_def : ALLOW names names COLON names names SEMI
+ | DONTAUDIT names names COLON names names SEMI
+ | AUDITALLOW names names COLON names names SEMI
+ | NEVERALLOW names names COLON names names SEMI
+ '''
+ a = refpolicy.AVRule()
+ if p[1] == 'dontaudit':
+ a.rule_type = refpolicy.AVRule.DONTAUDIT
+ elif p[1] == 'auditallow':
+ a.rule_type = refpolicy.AVRule.AUDITALLOW
+ elif p[1] == 'neverallow':
+ a.rule_type = refpolicy.AVRule.NEVERALLOW
+ a.src_types = p[2]
+ a.tgt_types = p[3]
+ a.obj_classes = p[5]
+ a.perms = p[6]
+ p[0] = a
+
+def p_typerule_def(p):
+ '''typerule_def : TYPE_TRANSITION names names COLON names IDENTIFIER SEMI
+ | TYPE_TRANSITION names names COLON names IDENTIFIER FILENAME SEMI
+ | TYPE_TRANSITION names names COLON names IDENTIFIER IDENTIFIER SEMI
+ | TYPE_CHANGE names names COLON names IDENTIFIER SEMI
+ | TYPE_MEMBER names names COLON names IDENTIFIER SEMI
+ '''
+ t = refpolicy.TypeRule()
+ if p[1] == 'type_change':
+ t.rule_type = refpolicy.TypeRule.TYPE_CHANGE
+ elif p[1] == 'type_member':
+ t.rule_type = refpolicy.TypeRule.TYPE_MEMBER
+ t.src_types = p[2]
+ t.tgt_types = p[3]
+ t.obj_classes = p[5]
+ t.dest_type = p[6]
+ t.file_name = p[7]
+ p[0] = t
+
+def p_bool(p):
+ '''bool : BOOL IDENTIFIER TRUE SEMI
+ | BOOL IDENTIFIER FALSE SEMI'''
+ b = refpolicy.Bool()
+ b.name = p[2]
+ if p[3] == "true":
+ b.state = True
+ else:
+ b.state = False
+ p[0] = b
+
+def p_conditional(p):
+ ''' conditional : IF OPAREN cond_expr CPAREN OBRACE interface_stmts CBRACE
+ | IF OPAREN cond_expr CPAREN OBRACE interface_stmts CBRACE ELSE OBRACE interface_stmts CBRACE
+ '''
+ c = refpolicy.Conditional()
+ c.cond_expr = p[3]
+ collect(p[6], c, val=True)
+ if len(p) > 8:
+ collect(p[10], c, val=False)
+ p[0] = [c]
+
+def p_typeattribute_def(p):
+ '''typeattribute_def : TYPEATTRIBUTE IDENTIFIER comma_list SEMI'''
+ t = refpolicy.TypeAttribute()
+ t.type = p[2]
+ t.attributes.update(p[3])
+ p[0] = t
+
+def p_roleattribute_def(p):
+ '''roleattribute_def : ROLEATTRIBUTE IDENTIFIER comma_list SEMI'''
+ t = refpolicy.RoleAttribute()
+ t.role = p[2]
+ t.roleattributes.update(p[3])
+ p[0] = t
+
+def p_range_transition_def(p):
+ '''range_transition_def : RANGE_TRANSITION names names COLON names mls_range_def SEMI
+ | RANGE_TRANSITION names names names SEMI'''
+ pass
+
+def p_role_transition_def(p):
+ '''role_transition_def : ROLE_TRANSITION names names names SEMI'''
+ pass
+
+def p_cond_expr(p):
+ '''cond_expr : IDENTIFIER
+ | EXPL cond_expr
+ | cond_expr AMP AMP cond_expr
+ | cond_expr BAR BAR cond_expr
+ | cond_expr EQUAL EQUAL cond_expr
+ | cond_expr EXPL EQUAL cond_expr
+ '''
+ l = len(p)
+ if l == 2:
+ p[0] = [p[1]]
+ elif l == 3:
+ p[0] = [p[1]] + p[2]
+ else:
+ p[0] = p[1] + [p[2] + p[3]] + p[4]
+
+
+#
+# Basic terminals
+#
+
+# Identifiers and lists of identifiers. These must
+# be handled somewhat gracefully. Names returns an IdSet and care must
+# be taken that this is _assigned_ to an object to correctly update
+# all of the flags (as opposed to using update). The other terminals
+# return list - this is to preserve ordering if it is important for
+# parsing (for example, interface_call must retain the ordering). Other
+# times the list should be used to update an IdSet.
+
+def p_names(p):
+ '''names : identifier
+ | nested_id_set
+ | asterisk
+ | TILDE identifier
+ | TILDE nested_id_set
+ | IDENTIFIER MINUS IDENTIFIER
+ '''
+ s = refpolicy.IdSet()
+ if len(p) < 3:
+ expand(p[1], s)
+ elif len(p) == 3:
+ expand(p[2], s)
+ s.compliment = True
+ else:
+ expand([p[1]])
+ s.add("-" + p[3])
+ p[0] = s
+
+def p_identifier(p):
+ 'identifier : IDENTIFIER'
+ p[0] = [p[1]]
+
+def p_asterisk(p):
+ 'asterisk : ASTERISK'
+ p[0] = [p[1]]
+
+def p_nested_id_set(p):
+ '''nested_id_set : OBRACE nested_id_list CBRACE
+ '''
+ p[0] = p[2]
+
+def p_nested_id_list(p):
+ '''nested_id_list : nested_id_element
+ | nested_id_list nested_id_element
+ '''
+ if len(p) == 2:
+ p[0] = p[1]
+ else:
+ p[0] = p[1] + p[2]
+
+def p_nested_id_element(p):
+ '''nested_id_element : identifier
+ | MINUS IDENTIFIER
+ | nested_id_set
+ '''
+ if len(p) == 2:
+ p[0] = p[1]
+ else:
+ # For now just leave the '-'
+ str = "-" + p[2]
+ p[0] = [str]
+
+def p_comma_list(p):
+ '''comma_list : nested_id_list
+ | comma_list COMMA nested_id_list
+ '''
+ if len(p) > 2:
+ p[1] = p[1] + p[3]
+ p[0] = p[1]
+
+def p_optional_semi(p):
+ '''optional_semi : SEMI
+ | empty'''
+ pass
+
+
+#
+# Interface to the parser
+#
+
+def p_error(tok):
+ global error, parse_file, success, parser
+ error = "%s: Syntax error on line %d %s [type=%s]" % (parse_file, tok.lineno, tok.value, tok.type)
+ print error
+ success = False
+
+def prep_spt(spt):
+ if not spt:
+ return { }
+ map = {}
+ for x in spt:
+ map[x.name] = x
+
+parser = None
+lexer = None
+def create_globals(module, support, debug):
+ global parser, lexer, m, spt
+
+ if not parser:
+ lexer = lex.lex()
+ parser = yacc.yacc(method="LALR", debug=debug, write_tables=0)
+
+ if module is not None:
+ m = module
+ else:
+ m = refpolicy.Module()
+
+ if not support:
+ spt = refpolicy.SupportMacros()
+ else:
+ spt = support
+
+def parse(text, module=None, support=None, debug=False):
+ create_globals(module, support, debug)
+ global error, parser, lexer, success
+
+ success = True
+
+ try:
+ parser.parse(text, debug=debug, lexer=lexer)
+ except Exception, e:
+ parser = None
+ lexer = None
+ error = "internal parser error: %s" % str(e) + "\n" + traceback.format_exc()
+
+ if not success:
+ # force the parser and lexer to be rebuilt - we have some problems otherwise
+ parser = None
+ msg = 'could not parse text: "%s"' % error
+ raise ValueError(msg)
+ return m
+
+def list_headers(root):
+ modules = []
+ support_macros = None
+
+ for dirpath, dirnames, filenames in os.walk(root):
+ for name in filenames:
+ modname = os.path.splitext(name)
+ filename = os.path.join(dirpath, name)
+
+ if modname[1] == '.spt':
+ if name == "obj_perm_sets.spt":
+ support_macros = filename
+ elif len(re.findall("patterns", modname[0])):
+ modules.append((modname[0], filename))
+ elif modname[1] == '.if':
+ modules.append((modname[0], filename))
+
+ return (modules, support_macros)
+
+
+def parse_headers(root, output=None, expand=True, debug=False):
+ import util
+
+ headers = refpolicy.Headers()
+
+ modules = []
+ support_macros = None
+
+ if os.path.isfile(root):
+ name = os.path.split(root)[1]
+ if name == '':
+ raise ValueError("Invalid file name %s" % root)
+ modname = os.path.splitext(name)
+ modules.append((modname[0], root))
+ all_modules, support_macros = list_headers(defaults.headers())
+ else:
+ modules, support_macros = list_headers(root)
+
+ if expand and not support_macros:
+ raise ValueError("could not find support macros (obj_perm_sets.spt)")
+
+ def o(msg):
+ if output:
+ output.write(msg)
+
+ def parse_file(f, module, spt=None):
+ global parse_file
+ if debug:
+ o("parsing file %s\n" % f)
+ try:
+ fd = open(f)
+ txt = fd.read()
+ fd.close()
+ parse_file = f
+ parse(txt, module, spt, debug)
+ except IOError, e:
+ return
+ except ValueError, e:
+ raise ValueError("error parsing file %s: %s" % (f, str(e)))
+
+ spt = None
+ if support_macros:
+ o("Parsing support macros (%s): " % support_macros)
+ spt = refpolicy.SupportMacros()
+ parse_file(support_macros, spt)
+
+ headers.children.append(spt)
+
+ # FIXME: Total hack - add in can_exec rather than parse the insanity
+ # of misc_macros. We are just going to pretend that this is an interface
+ # to make the expansion work correctly.
+ can_exec = refpolicy.Interface("can_exec")
+ av = access.AccessVector(["$1","$2","file","execute_no_trans","open", "read",
+ "getattr","lock","execute","ioctl"])
+
+ can_exec.children.append(refpolicy.AVRule(av))
+ headers.children.append(can_exec)
+
+ o("done.\n")
+
+ if output and not debug:
+ status = util.ConsoleProgressBar(sys.stdout, steps=len(modules))
+ status.start("Parsing interface files")
+
+ failures = []
+ for x in modules:
+ m = refpolicy.Module()
+ m.name = x[0]
+ try:
+ if expand:
+ parse_file(x[1], m, spt)
+ else:
+ parse_file(x[1], m)
+ except ValueError, e:
+ o(str(e) + "\n")
+ failures.append(x[1])
+ continue
+
+ headers.children.append(m)
+ if output and not debug:
+ status.step()
+
+ if len(failures):
+ o("failed to parse some headers: %s" % ", ".join(failures))
+
+ return headers
diff --git a/lib/python2.7/site-packages/sepolgen/refpolicy.py b/lib/python2.7/site-packages/sepolgen/refpolicy.py
new file mode 100644
index 0000000..b8ed5c1
--- /dev/null
+++ b/lib/python2.7/site-packages/sepolgen/refpolicy.py
@@ -0,0 +1,917 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+import string
+import itertools
+import selinux
+
+# OVERVIEW
+#
+# This file contains objects and functions used to represent the reference
+# policy (including the headers, M4 macros, and policy language statements).
+#
+# This representation is very different from the semantic representation
+# used in libsepol. Instead, it is a more typical abstract representation
+# used by the first stage of compilers. It is basically a parse tree.
+#
+# This choice is intentional as it allows us to handle the unprocessed
+# M4 statements - including the $1 style arguments - and to more easily generate
+# the data structures that we need for policy generation.
+#
+
+# Constans for referring to fields
+SRC_TYPE = 0
+TGT_TYPE = 1
+OBJ_CLASS = 2
+PERMS = 3
+ROLE = 4
+DEST_TYPE = 5
+
+# String represenations of the above constants
+field_to_str = ["source", "target", "object", "permission", "role", "destination" ]
+str_to_field = { "source" : SRC_TYPE, "target" : TGT_TYPE, "object" : OBJ_CLASS,
+ "permission" : PERMS, "role" : ROLE, "destination" : DEST_TYPE }
+
+# Base Classes
+
+class PolicyBase:
+ def __init__(self, parent=None):
+ self.parent = None
+ self.comment = None
+
+class Node(PolicyBase):
+ """Base class objects produced from parsing the reference policy.
+
+ The Node class is used as the base class for any non-leaf
+ object produced by parsing the reference policy. This object
+ should contain a reference to its parent (or None for a top-level
+ object) and 0 or more children.
+
+ The general idea here is to have a very simple tree structure. Children
+ are not separated out by type. Instead the tree structure represents
+ fairly closely the real structure of the policy statements.
+
+ The object should be iterable - by default over all children but
+ subclasses are free to provide additional iterators over a subset
+ of their childre (see Interface for example).
+ """
+
+ def __init__(self, parent=None):
+ PolicyBase.__init__(self, parent)
+ self.children = []
+
+ def __iter__(self):
+ return iter(self.children)
+
+ # Not all of the iterators will return something on all Nodes, but
+ # they won't explode either. Putting them here is just easier.
+
+ # Top level nodes
+
+ def nodes(self):
+ return itertools.ifilter(lambda x: isinstance(x, Node), walktree(self))
+
+ def modules(self):
+ return itertools.ifilter(lambda x: isinstance(x, Module), walktree(self))
+
+ def interfaces(self):
+ return itertools.ifilter(lambda x: isinstance(x, Interface), walktree(self))
+
+ def templates(self):
+ return itertools.ifilter(lambda x: isinstance(x, Template), walktree(self))
+
+ def support_macros(self):
+ return itertools.ifilter(lambda x: isinstance(x, SupportMacros), walktree(self))
+
+ # Common policy statements
+
+ def module_declarations(self):
+ return itertools.ifilter(lambda x: isinstance(x, ModuleDeclaration), walktree(self))
+
+ def interface_calls(self):
+ return itertools.ifilter(lambda x: isinstance(x, InterfaceCall), walktree(self))
+
+ def avrules(self):
+ return itertools.ifilter(lambda x: isinstance(x, AVRule), walktree(self))
+
+ def typerules(self):
+ return itertools.ifilter(lambda x: isinstance(x, TypeRule), walktree(self))
+
+ def typeattributes(self):
+ """Iterate over all of the TypeAttribute children of this Interface."""
+ return itertools.ifilter(lambda x: isinstance(x, TypeAttribute), walktree(self))
+
+ def roleattributes(self):
+ """Iterate over all of the RoleAttribute children of this Interface."""
+ return itertools.ifilter(lambda x: isinstance(x, RoleAttribute), walktree(self))
+
+ def requires(self):
+ return itertools.ifilter(lambda x: isinstance(x, Require), walktree(self))
+
+ def roles(self):
+ return itertools.ifilter(lambda x: isinstance(x, Role), walktree(self))
+
+ def role_allows(self):
+ return itertools.ifilter(lambda x: isinstance(x, RoleAllow), walktree(self))
+
+ def role_types(self):
+ return itertools.ifilter(lambda x: isinstance(x, RoleType), walktree(self))
+
+ def __str__(self):
+ if self.comment:
+ return str(self.comment) + "\n" + self.to_string()
+ else:
+ return self.to_string()
+
+ def __repr__(self):
+ return "<%s(%s)>" % (self.__class__.__name__, self.to_string())
+
+ def to_string(self):
+ return ""
+
+
+class Leaf(PolicyBase):
+ def __init__(self, parent=None):
+ PolicyBase.__init__(self, parent)
+
+ def __str__(self):
+ if self.comment:
+ return str(self.comment) + "\n" + self.to_string()
+ else:
+ return self.to_string()
+
+ def __repr__(self):
+ return "<%s(%s)>" % (self.__class__.__name__, self.to_string())
+
+ def to_string(self):
+ return ""
+
+
+
+# Utility functions
+
+def walktree(node, depthfirst=True, showdepth=False, type=None):
+ """Iterate over a Node and its Children.
+
+ The walktree function iterates over a tree containing Nodes and
+ leaf objects. The iteration can perform a depth first or a breadth
+ first traversal of the tree (controlled by the depthfirst
+ paramater. The passed in node will be returned.
+
+ This function will only work correctly for trees - arbitrary graphs
+ will likely cause infinite looping.
+ """
+ # We control depth first / versus breadth first by
+ # how we pop items off of the node stack.
+ if depthfirst:
+ index = -1
+ else:
+ index = 0
+
+ stack = [(node, 0)]
+ while len(stack) > 0:
+ cur, depth = stack.pop(index)
+ if showdepth:
+ yield cur, depth
+ else:
+ yield cur
+
+ # If the node is not a Node instance it must
+ # be a leaf - so no need to add it to the stack
+ if isinstance(cur, Node):
+ items = []
+ i = len(cur.children) - 1
+ while i >= 0:
+ if type is None or isinstance(cur.children[i], type):
+ items.append((cur.children[i], depth + 1))
+ i -= 1
+
+ stack.extend(items)
+
+def walknode(node, type=None):
+ """Iterate over the direct children of a Node.
+
+ The walktree function iterates over the children of a Node.
+ Unlike walktree it does note return the passed in node or
+ the children of any Node objects (that is, it does not go
+ beyond the current level in the tree).
+ """
+ for x in node:
+ if type is None or isinstance(x, type):
+ yield x
+
+
+def list_to_space_str(s, cont=('{', '}')):
+ """Convert a set (or any sequence type) into a string representation
+ formatted to match SELinux space separated list conventions.
+
+ For example the list ['read', 'write'] would be converted into:
+ '{ read write }'
+ """
+ l = len(s)
+ str = ""
+ if l < 1:
+ raise ValueError("cannot convert 0 len set to string")
+ str = " ".join(s)
+ if l == 1:
+ return str
+ else:
+ return cont[0] + " " + str + " " + cont[1]
+
+def list_to_comma_str(s):
+ l = len(s)
+ if l < 1:
+ raise ValueError("cannot conver 0 len set to comma string")
+
+ return ", ".join(s)
+
+# Basic SELinux types
+
+class IdSet(set):
+ def __init__(self, list=None):
+ if list:
+ set.__init__(self, list)
+ else:
+ set.__init__(self)
+ self.compliment = False
+
+ def to_space_str(self):
+ return list_to_space_str(self)
+
+ def to_comma_str(self):
+ return list_to_comma_str(self)
+
+class SecurityContext(Leaf):
+ """An SELinux security context with optional MCS / MLS fields."""
+ def __init__(self, context=None, parent=None):
+ """Create a SecurityContext object, optionally from a string.
+
+ Parameters:
+ [context] - string representing a security context. Same format
+ as a string passed to the from_string method.
+ """
+ Leaf.__init__(self, parent)
+ self.user = ""
+ self.role = ""
+ self.type = ""
+ self.level = None
+ if context is not None:
+ self.from_string(context)
+
+ def from_string(self, context):
+ """Parse a string representing a context into a SecurityContext.
+
+ The string should be in the standard format - e.g.,
+ 'user:role:type:level'.
+
+ Raises ValueError if the string is not parsable as a security context.
+ """
+ fields = context.split(":")
+ if len(fields) < 3:
+ raise ValueError("context string [%s] not in a valid format" % context)
+
+ self.user = fields[0]
+ self.role = fields[1]
+ self.type = fields[2]
+ if len(fields) > 3:
+ # FUTURE - normalize level fields to allow more comparisons to succeed.
+ self.level = string.join(fields[3:], ':')
+ else:
+ self.level = None
+
+ def __eq__(self, other):
+ """Compare two SecurityContext objects - all fields must be exactly the
+ the same for the comparison to work. It is possible for the level fields
+ to be semantically the same yet syntactically different - in this case
+ this function will return false.
+ """
+ return self.user == other.user and \
+ self.role == other.role and \
+ self.type == other.type and \
+ self.level == other.level
+
+ def to_string(self, default_level=None):
+ """Return a string representing this security context.
+
+ By default, the string will contiain a MCS / MLS level
+ potentially from the default which is passed in if none was
+ set.
+
+ Arguments:
+ default_level - the default level to use if self.level is an
+ empty string.
+
+ Returns:
+ A string represening the security context in the form
+ 'user:role:type:level'.
+ """
+ fields = [self.user, self.role, self.type]
+ if self.level is None:
+ if default_level is None:
+ if selinux.is_selinux_mls_enabled() == 1:
+ fields.append("s0")
+ else:
+ fields.append(default_level)
+ else:
+ fields.append(self.level)
+ return ":".join(fields)
+
+class ObjectClass(Leaf):
+ """SELinux object class and permissions.
+
+ This class is a basic representation of an SELinux object
+ class - it does not represent separate common permissions -
+ just the union of the common and class specific permissions.
+ It is meant to be convenient for policy generation.
+ """
+ def __init__(self, name="", parent=None):
+ Leaf.__init__(self, parent)
+ self.name = name
+ self.perms = IdSet()
+
+# Basic statements
+
+class TypeAttribute(Leaf):
+ """SElinux typeattribute statement.
+
+ This class represents a typeattribute statement.
+ """
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.type = ""
+ self.attributes = IdSet()
+
+ def to_string(self):
+ return "typeattribute %s %s;" % (self.type, self.attributes.to_comma_str())
+
+class RoleAttribute(Leaf):
+ """SElinux roleattribute statement.
+
+ This class represents a roleattribute statement.
+ """
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.role = ""
+ self.roleattributes = IdSet()
+
+ def to_string(self):
+ return "roleattribute %s %s;" % (self.role, self.roleattributes.to_comma_str())
+
+
+class Role(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.role = ""
+ self.types = IdSet()
+
+ def to_string(self):
+ s = ""
+ for t in self.types:
+ s += "role %s types %s;\n" % (self.role, t)
+ return s
+
+class Type(Leaf):
+ def __init__(self, name="", parent=None):
+ Leaf.__init__(self, parent)
+ self.name = name
+ self.attributes = IdSet()
+ self.aliases = IdSet()
+
+ def to_string(self):
+ s = "type %s" % self.name
+ if len(self.aliases) > 0:
+ s = s + "alias %s" % self.aliases.to_space_str()
+ if len(self.attributes) > 0:
+ s = s + ", %s" % self.attributes.to_comma_str()
+ return s + ";"
+
+class TypeAlias(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.type = ""
+ self.aliases = IdSet()
+
+ def to_string(self):
+ return "typealias %s alias %s;" % (self.type, self.aliases.to_space_str())
+
+class Attribute(Leaf):
+ def __init__(self, name="", parent=None):
+ Leaf.__init__(self, parent)
+ self.name = name
+
+ def to_string(self):
+ return "attribute %s;" % self.name
+
+class Attribute_Role(Leaf):
+ def __init__(self, name="", parent=None):
+ Leaf.__init__(self, parent)
+ self.name = name
+
+ def to_string(self):
+ return "attribute_role %s;" % self.name
+
+
+# Classes representing rules
+
+class AVRule(Leaf):
+ """SELinux access vector (AV) rule.
+
+ The AVRule class represents all varieties of AV rules including
+ allow, dontaudit, and auditallow (indicated by the flags self.ALLOW,
+ self.DONTAUDIT, and self.AUDITALLOW respectively).
+
+ The source and target types, object classes, and perms are all represented
+ by sets containing strings. Sets are used to make it simple to add
+ strings repeatedly while avoiding duplicates.
+
+ No checking is done to make certain that the symbols are valid or
+ consistent (e.g., perms that don't match the object classes). It is
+ even possible to put invalid types like '$1' into the rules to allow
+ storage of the reference policy interfaces.
+ """
+ ALLOW = 0
+ DONTAUDIT = 1
+ AUDITALLOW = 2
+ NEVERALLOW = 3
+
+ def __init__(self, av=None, parent=None):
+ Leaf.__init__(self, parent)
+ self.src_types = IdSet()
+ self.tgt_types = IdSet()
+ self.obj_classes = IdSet()
+ self.perms = IdSet()
+ self.rule_type = self.ALLOW
+ if av:
+ self.from_av(av)
+
+ def __rule_type_str(self):
+ if self.rule_type == self.ALLOW:
+ return "allow"
+ elif self.rule_type == self.DONTAUDIT:
+ return "dontaudit"
+ else:
+ return "auditallow"
+
+ def from_av(self, av):
+ """Add the access from an access vector to this allow
+ rule.
+ """
+ self.src_types.add(av.src_type)
+ if av.src_type == av.tgt_type:
+ self.tgt_types.add("self")
+ else:
+ self.tgt_types.add(av.tgt_type)
+ self.obj_classes.add(av.obj_class)
+ self.perms.update(av.perms)
+
+ def to_string(self):
+ """Return a string representation of the rule
+ that is a valid policy language representation (assuming
+ that the types, object class, etc. are valie).
+ """
+ return "%s %s %s:%s %s;" % (self.__rule_type_str(),
+ self.src_types.to_space_str(),
+ self.tgt_types.to_space_str(),
+ self.obj_classes.to_space_str(),
+ self.perms.to_space_str())
+class TypeRule(Leaf):
+ """SELinux type rules.
+
+ This class is very similar to the AVRule class, but is for representing
+ the type rules (type_trans, type_change, and type_member). The major
+ difference is the lack of perms and only and sing destination type.
+ """
+ TYPE_TRANSITION = 0
+ TYPE_CHANGE = 1
+ TYPE_MEMBER = 2
+
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.src_types = IdSet()
+ self.tgt_types = IdSet()
+ self.obj_classes = IdSet()
+ self.dest_type = ""
+ self.rule_type = self.TYPE_TRANSITION
+
+ def __rule_type_str(self):
+ if self.rule_type == self.TYPE_TRANSITION:
+ return "type_transition"
+ elif self.rule_type == self.TYPE_CHANGE:
+ return "type_change"
+ else:
+ return "type_member"
+
+ def to_string(self):
+ return "%s %s %s:%s %s;" % (self.__rule_type_str(),
+ self.src_types.to_space_str(),
+ self.tgt_types.to_space_str(),
+ self.obj_classes.to_space_str(),
+ self.dest_type)
+
+class RoleAllow(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.src_roles = IdSet()
+ self.tgt_roles = IdSet()
+
+ def to_string(self):
+ return "allow %s %s;" % (self.src_roles.to_comma_str(),
+ self.tgt_roles.to_comma_str())
+
+class RoleType(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.role = ""
+ self.types = IdSet()
+
+ def to_string(self):
+ s = ""
+ for t in self.types:
+ s += "role %s types %s;\n" % (self.role, t)
+ return s
+
+class ModuleDeclaration(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.name = ""
+ self.version = ""
+ self.refpolicy = False
+
+ def to_string(self):
+ if self.refpolicy:
+ return "policy_module(%s, %s)" % (self.name, self.version)
+ else:
+ return "module %s %s;" % (self.name, self.version)
+
+class Conditional(Node):
+ def __init__(self, parent=None):
+ Node.__init__(self, parent)
+ self.cond_expr = []
+
+ def to_string(self):
+ return "[If %s]" % list_to_space_str(self.cond_expr, cont=("", ""))
+
+class Bool(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.name = ""
+ self.state = False
+
+ def to_string(self):
+ s = "bool %s " % self.name
+ if s.state:
+ return s + "true"
+ else:
+ return s + "false"
+
+class InitialSid(Leaf):
+ def __init(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.name = ""
+ self.context = None
+
+ def to_string(self):
+ return "sid %s %s" % (self.name, str(self.context))
+
+class GenfsCon(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.filesystem = ""
+ self.path = ""
+ self.context = None
+
+ def to_string(self):
+ return "genfscon %s %s %s" % (self.filesystem, self.path, str(self.context))
+
+class FilesystemUse(Leaf):
+ XATTR = 1
+ TRANS = 2
+ TASK = 3
+
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.type = self.XATTR
+ self.filesystem = ""
+ self.context = None
+
+ def to_string(self):
+ s = ""
+ if self.type == XATTR:
+ s = "fs_use_xattr "
+ elif self.type == TRANS:
+ s = "fs_use_trans "
+ elif self.type == TASK:
+ s = "fs_use_task "
+
+ return "%s %s %s;" % (s, self.filesystem, str(self.context))
+
+class PortCon(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.port_type = ""
+ self.port_number = ""
+ self.context = None
+
+ def to_string(self):
+ return "portcon %s %s %s" % (self.port_type, self.port_number, str(self.context))
+
+class NodeCon(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.start = ""
+ self.end = ""
+ self.context = None
+
+ def to_string(self):
+ return "nodecon %s %s %s" % (self.start, self.end, str(self.context))
+
+class NetifCon(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.interface = ""
+ self.interface_context = None
+ self.packet_context = None
+
+ def to_string(self):
+ return "netifcon %s %s %s" % (self.interface, str(self.interface_context),
+ str(self.packet_context))
+class PirqCon(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.pirq_number = ""
+ self.context = None
+
+ def to_string(self):
+ return "pirqcon %s %s" % (self.pirq_number, str(self.context))
+
+class IomemCon(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.device_mem = ""
+ self.context = None
+
+ def to_string(self):
+ return "iomemcon %s %s" % (self.device_mem, str(self.context))
+
+class IoportCon(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.ioport = ""
+ self.context = None
+
+ def to_string(self):
+ return "ioportcon %s %s" % (self.ioport, str(self.context))
+
+class PciDeviceCon(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.device = ""
+ self.context = None
+
+ def to_string(self):
+ return "pcidevicecon %s %s" % (self.device, str(self.context))
+
+class DeviceTreeCon(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.path = ""
+ self.context = None
+
+ def to_string(self):
+ return "devicetreecon %s %s" % (self.path, str(self.context))
+
+# Reference policy specific types
+
+def print_tree(head):
+ for node, depth in walktree(head, showdepth=True):
+ s = ""
+ for i in range(depth):
+ s = s + "\t"
+ print s + str(node)
+
+
+class Headers(Node):
+ def __init__(self, parent=None):
+ Node.__init__(self, parent)
+
+ def to_string(self):
+ return "[Headers]"
+
+
+class Module(Node):
+ def __init__(self, parent=None):
+ Node.__init__(self, parent)
+
+ def to_string(self):
+ return ""
+
+class Interface(Node):
+ """A reference policy interface definition.
+
+ This class represents a reference policy interface definition.
+ """
+ def __init__(self, name="", parent=None):
+ Node.__init__(self, parent)
+ self.name = name
+
+ def to_string(self):
+ return "[Interface name: %s]" % self.name
+
+class TunablePolicy(Node):
+ def __init__(self, parent=None):
+ Node.__init__(self, parent)
+ self.cond_expr = []
+
+ def to_string(self):
+ return "[Tunable Policy %s]" % list_to_space_str(self.cond_expr, cont=("", ""))
+
+class Template(Node):
+ def __init__(self, name="", parent=None):
+ Node.__init__(self, parent)
+ self.name = name
+
+ def to_string(self):
+ return "[Template name: %s]" % self.name
+
+class IfDef(Node):
+ def __init__(self, name="", parent=None):
+ Node.__init__(self, parent)
+ self.name = name
+
+ def to_string(self):
+ return "[Ifdef name: %s]" % self.name
+
+class InterfaceCall(Leaf):
+ def __init__(self, ifname="", parent=None):
+ Leaf.__init__(self, parent)
+ self.ifname = ifname
+ self.args = []
+ self.comments = []
+
+ def matches(self, other):
+ if self.ifname != other.ifname:
+ return False
+ if len(self.args) != len(other.args):
+ return False
+ for a,b in zip(self.args, other.args):
+ if a != b:
+ return False
+ return True
+
+ def to_string(self):
+ s = "%s(" % self.ifname
+ i = 0
+ for a in self.args:
+ if isinstance(a, list):
+ str = list_to_space_str(a)
+ else:
+ str = a
+
+ if i != 0:
+ s = s + ", %s" % str
+ else:
+ s = s + str
+ i += 1
+ return s + ")"
+
+class OptionalPolicy(Node):
+ def __init__(self, parent=None):
+ Node.__init__(self, parent)
+
+ def to_string(self):
+ return "[Optional Policy]"
+
+class SupportMacros(Node):
+ def __init__(self, parent=None):
+ Node.__init__(self, parent)
+ self.map = None
+
+ def to_string(self):
+ return "[Support Macros]"
+
+ def __expand_perm(self, perm):
+ # Recursive expansion - the assumption is that these
+ # are ordered correctly so that no macro is used before
+ # it is defined
+ s = set()
+ if self.map.has_key(perm):
+ for p in self.by_name(perm):
+ s.update(self.__expand_perm(p))
+ else:
+ s.add(perm)
+ return s
+
+ def __gen_map(self):
+ self.map = {}
+ for x in self:
+ exp_perms = set()
+ for perm in x.perms:
+ exp_perms.update(self.__expand_perm(perm))
+ self.map[x.name] = exp_perms
+
+ def by_name(self, name):
+ if not self.map:
+ self.__gen_map()
+ return self.map[name]
+
+ def has_key(self, name):
+ if not self.map:
+ self.__gen_map()
+ return self.map.has_key(name)
+
+class Require(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.types = IdSet()
+ self.obj_classes = { }
+ self.roles = IdSet()
+ self.data = IdSet()
+ self.users = IdSet()
+
+ def add_obj_class(self, obj_class, perms):
+ p = self.obj_classes.setdefault(obj_class, IdSet())
+ p.update(perms)
+
+
+ def to_string(self):
+ s = []
+ s.append("require {")
+ for type in self.types:
+ s.append("\ttype %s;" % type)
+ for obj_class, perms in self.obj_classes.items():
+ s.append("\tclass %s %s;" % (obj_class, perms.to_space_str()))
+ for role in self.roles:
+ s.append("\trole %s;" % role)
+ for bool in self.data:
+ s.append("\tbool %s;" % bool)
+ for user in self.users:
+ s.append("\tuser %s;" % user)
+ s.append("}")
+
+ # Handle empty requires
+ if len(s) == 2:
+ return ""
+
+ return "\n".join(s)
+
+
+class ObjPermSet:
+ def __init__(self, name):
+ self.name = name
+ self.perms = set()
+
+ def to_string(self):
+ return "define(`%s', `%s')" % (self.name, self.perms.to_space_str())
+
+class ClassMap:
+ def __init__(self, obj_class, perms):
+ self.obj_class = obj_class
+ self.perms = perms
+
+ def to_string(self):
+ return self.obj_class + ": " + self.perms
+
+class Comment:
+ def __init__(self, l=None):
+ if l:
+ self.lines = l
+ else:
+ self.lines = []
+
+ def to_string(self):
+ # If there are no lines, treat this as a spacer between
+ # policy statements and return a new line.
+ if len(self.lines) == 0:
+ return ""
+ else:
+ out = []
+ for line in self.lines:
+ out.append("#" + line)
+ return "\n".join(out)
+
+ def merge(self, other):
+ if len(other.lines):
+ for line in other.lines:
+ if line != "":
+ self.lines.append(line)
+
+ def __str__(self):
+ return self.to_string()
+
+
diff --git a/lib/python2.7/site-packages/sepolgen/sepolgeni18n.py b/lib/python2.7/site-packages/sepolgen/sepolgeni18n.py
new file mode 100644
index 0000000..998c435
--- /dev/null
+++ b/lib/python2.7/site-packages/sepolgen/sepolgeni18n.py
@@ -0,0 +1,26 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+try:
+ import gettext
+ t = gettext.translation( 'yumex' )
+ _ = t.gettext
+except:
+ def _(str):
+ return str
diff --git a/lib/python2.7/site-packages/sepolgen/util.py b/lib/python2.7/site-packages/sepolgen/util.py
new file mode 100644
index 0000000..74a11f5
--- /dev/null
+++ b/lib/python2.7/site-packages/sepolgen/util.py
@@ -0,0 +1,87 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+class ConsoleProgressBar:
+ def __init__(self, out, steps=100, indicator='#'):
+ self.blocks = 0
+ self.current = 0
+ self.steps = steps
+ self.indicator = indicator
+ self.out = out
+ self.done = False
+
+ def start(self, message=None):
+ self.done = False
+ if message:
+ self.out.write('\n%s:\n' % message)
+ self.out.write('%--10---20---30---40---50---60---70---80---90--100\n')
+
+ def step(self, n=1):
+ self.current += n
+
+ old = self.blocks
+ self.blocks = int(round(self.current / float(self.steps) * 100) / 2)
+
+ if self.blocks > 50:
+ self.blocks = 50
+
+ new = self.blocks - old
+
+ self.out.write(self.indicator * new)
+ self.out.flush()
+
+ if self.blocks == 50 and not self.done:
+ self.done = True
+ self.out.write("\n")
+
+def set_to_list(s):
+ l = []
+ l.extend(s)
+ return l
+
+def first(s, sorted=False):
+ """
+ Return the first element of a set.
+
+ It sometimes useful to return the first element from a set but,
+ because sets are not indexable, this is rather hard. This function
+ will return the first element from a set. If sorted is True, then
+ the set will first be sorted (making this an expensive operation).
+ Otherwise a random element will be returned (as sets are not ordered).
+ """
+ if not len(s):
+ raise IndexError("empty containter")
+
+ if sorted:
+ l = set_to_list(s)
+ l.sort()
+ return l[0]
+ else:
+ for x in s:
+ return x
+
+if __name__ == "__main__":
+ import sys
+ import time
+ p = ConsoleProgressBar(sys.stdout, steps=999)
+ p.start("computing pi")
+ for i in range(999):
+ p.step()
+ time.sleep(0.001)
+
diff --git a/lib/python2.7/site-packages/sepolgen/yacc.py b/lib/python2.7/site-packages/sepolgen/yacc.py
new file mode 100644
index 0000000..bc4536d
--- /dev/null
+++ b/lib/python2.7/site-packages/sepolgen/yacc.py
@@ -0,0 +1,2209 @@
+#-----------------------------------------------------------------------------
+# ply: yacc.py
+#
+# Author(s): David M. Beazley (dave@dabeaz.com)
+#
+# Copyright (C) 2001-2006, David M. Beazley
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# See the file COPYING for a complete copy of the LGPL.
+#
+#
+# This implements an LR parser that is constructed from grammar rules defined
+# as Python functions. The grammer is specified by supplying the BNF inside
+# Python documentation strings. The inspiration for this technique was borrowed
+# from John Aycock's Spark parsing system. PLY might be viewed as cross between
+# Spark and the GNU bison utility.
+#
+# The current implementation is only somewhat object-oriented. The
+# LR parser itself is defined in terms of an object (which allows multiple
+# parsers to co-exist). However, most of the variables used during table
+# construction are defined in terms of global variables. Users shouldn't
+# notice unless they are trying to define multiple parsers at the same
+# time using threads (in which case they should have their head examined).
+#
+# This implementation supports both SLR and LALR(1) parsing. LALR(1)
+# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
+# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
+# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
+# by the more efficient DeRemer and Pennello algorithm.
+#
+# :::::::: WARNING :::::::
+#
+# Construction of LR parsing tables is fairly complicated and expensive.
+# To make this module run fast, a *LOT* of work has been put into
+# optimization---often at the expensive of readability and what might
+# consider to be good Python "coding style." Modify the code at your
+# own risk!
+# ----------------------------------------------------------------------------
+
+__version__ = "2.2"
+
+#-----------------------------------------------------------------------------
+# === User configurable parameters ===
+#
+# Change these to modify the default behavior of yacc (if you wish)
+#-----------------------------------------------------------------------------
+
+yaccdebug = 1 # Debugging mode. If set, yacc generates a
+ # a 'parser.out' file in the current directory
+
+debug_file = 'parser.out' # Default name of the debugging file
+tab_module = 'parsetab' # Default name of the table module
+default_lr = 'LALR' # Default LR table generation method
+
+error_count = 3 # Number of symbols that must be shifted to leave recovery mode
+
+import re, types, sys, cStringIO, hashlib, os.path
+
+# Exception raised for yacc-related errors
+class YaccError(Exception): pass
+
+#-----------------------------------------------------------------------------
+# === LR Parsing Engine ===
+#
+# The following classes are used for the LR parser itself. These are not
+# used during table construction and are independent of the actual LR
+# table generation algorithm
+#-----------------------------------------------------------------------------
+
+# This class is used to hold non-terminal grammar symbols during parsing.
+# It normally has the following attributes set:
+# .type = Grammar symbol type
+# .value = Symbol value
+# .lineno = Starting line number
+# .endlineno = Ending line number (optional, set automatically)
+# .lexpos = Starting lex position
+# .endlexpos = Ending lex position (optional, set automatically)
+
+class YaccSymbol:
+ def __str__(self): return self.type
+ def __repr__(self): return str(self)
+
+# This class is a wrapper around the objects actually passed to each
+# grammar rule. Index lookup and assignment actually assign the
+# .value attribute of the underlying YaccSymbol object.
+# The lineno() method returns the line number of a given
+# item (or 0 if not defined). The linespan() method returns
+# a tuple of (startline,endline) representing the range of lines
+# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
+# representing the range of positional information for a symbol.
+
+class YaccProduction:
+ def __init__(self,s,stack=None):
+ self.slice = s
+ self.pbstack = []
+ self.stack = stack
+
+ def __getitem__(self,n):
+ if type(n) == types.IntType:
+ if n >= 0: return self.slice[n].value
+ else: return self.stack[n].value
+ else:
+ return [s.value for s in self.slice[n.start:n.stop:n.step]]
+
+ def __setitem__(self,n,v):
+ self.slice[n].value = v
+
+ def __len__(self):
+ return len(self.slice)
+
+ def lineno(self,n):
+ return getattr(self.slice[n],"lineno",0)
+
+ def linespan(self,n):
+ startline = getattr(self.slice[n],"lineno",0)
+ endline = getattr(self.slice[n],"endlineno",startline)
+ return startline,endline
+
+ def lexpos(self,n):
+ return getattr(self.slice[n],"lexpos",0)
+
+ def lexspan(self,n):
+ startpos = getattr(self.slice[n],"lexpos",0)
+ endpos = getattr(self.slice[n],"endlexpos",startpos)
+ return startpos,endpos
+
+ def pushback(self,n):
+ if n <= 0:
+ raise ValueError, "Expected a positive value"
+ if n > (len(self.slice)-1):
+ raise ValueError, "Can't push %d tokens. Only %d are available." % (n,len(self.slice)-1)
+ for i in range(0,n):
+ self.pbstack.append(self.slice[-i-1])
+
+# The LR Parsing engine. This is defined as a class so that multiple parsers
+# can exist in the same process. A user never instantiates this directly.
+# Instead, the global yacc() function should be used to create a suitable Parser
+# object.
+
+class Parser:
+ def __init__(self,magic=None):
+
+ # This is a hack to keep users from trying to instantiate a Parser
+ # object directly.
+
+ if magic != "xyzzy":
+ raise YaccError, "Can't instantiate Parser. Use yacc() instead."
+
+ # Reset internal state
+ self.productions = None # List of productions
+ self.errorfunc = None # Error handling function
+ self.action = { } # LR Action table
+ self.goto = { } # LR goto table
+ self.require = { } # Attribute require table
+ self.method = "Unknown LR" # Table construction method used
+
+ def errok(self):
+ self.errorcount = 0
+
+ def restart(self):
+ del self.statestack[:]
+ del self.symstack[:]
+ sym = YaccSymbol()
+ sym.type = '$end'
+ self.symstack.append(sym)
+ self.statestack.append(0)
+
+ def parse(self,input=None,lexer=None,debug=0):
+ lookahead = None # Current lookahead symbol
+ lookaheadstack = [ ] # Stack of lookahead symbols
+ actions = self.action # Local reference to action table
+ goto = self.goto # Local reference to goto table
+ prod = self.productions # Local reference to production list
+ pslice = YaccProduction(None) # Production object passed to grammar rules
+ pslice.parser = self # Parser object
+ self.errorcount = 0 # Used during error recovery
+
+ # If no lexer was given, we will try to use the lex module
+ if not lexer:
+ import lex
+ lexer = lex.lexer
+
+ pslice.lexer = lexer
+
+ # If input was supplied, pass to lexer
+ if input:
+ lexer.input(input)
+
+ # Tokenize function
+ get_token = lexer.token
+
+ statestack = [ ] # Stack of parsing states
+ self.statestack = statestack
+ symstack = [ ] # Stack of grammar symbols
+ self.symstack = symstack
+
+ pslice.stack = symstack # Put in the production
+ errtoken = None # Err token
+
+ # The start state is assumed to be (0,$end)
+ statestack.append(0)
+ sym = YaccSymbol()
+ sym.type = '$end'
+ symstack.append(sym)
+
+ while 1:
+ # Get the next symbol on the input. If a lookahead symbol
+ # is already set, we just use that. Otherwise, we'll pull
+ # the next token off of the lookaheadstack or from the lexer
+ if debug > 1:
+ print 'state', statestack[-1]
+ if not lookahead:
+ if not lookaheadstack:
+ lookahead = get_token() # Get the next token
+ else:
+ lookahead = lookaheadstack.pop()
+ if not lookahead:
+ lookahead = YaccSymbol()
+ lookahead.type = '$end'
+ if debug:
+ errorlead = ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()
+
+ # Check the action table
+ s = statestack[-1]
+ ltype = lookahead.type
+ t = actions.get((s,ltype),None)
+
+ if debug > 1:
+ print 'action', t
+ if t is not None:
+ if t > 0:
+ # shift a symbol on the stack
+ if ltype == '$end':
+ # Error, end of input
+ sys.stderr.write("yacc: Parse error. EOF\n")
+ return
+ statestack.append(t)
+ if debug > 1:
+ sys.stderr.write("%-60s shift state %s\n" % (errorlead, t))
+ symstack.append(lookahead)
+ lookahead = None
+
+ # Decrease error count on successful shift
+ if self.errorcount > 0:
+ self.errorcount -= 1
+
+ continue
+
+ if t < 0:
+ # reduce a symbol on the stack, emit a production
+ p = prod[-t]
+ pname = p.name
+ plen = p.len
+
+ # Get production function
+ sym = YaccSymbol()
+ sym.type = pname # Production name
+ sym.value = None
+ if debug > 1:
+ sys.stderr.write("%-60s reduce %d\n" % (errorlead, -t))
+
+ if plen:
+ targ = symstack[-plen-1:]
+ targ[0] = sym
+ try:
+ sym.lineno = targ[1].lineno
+ sym.endlineno = getattr(targ[-1],"endlineno",targ[-1].lineno)
+ sym.lexpos = targ[1].lexpos
+ sym.endlexpos = getattr(targ[-1],"endlexpos",targ[-1].lexpos)
+ except AttributeError:
+ sym.lineno = 0
+ del symstack[-plen:]
+ del statestack[-plen:]
+ else:
+ sym.lineno = 0
+ targ = [ sym ]
+ pslice.slice = targ
+ pslice.pbstack = []
+ # Call the grammar rule with our special slice object
+ p.func(pslice)
+
+ # If there was a pushback, put that on the stack
+ if pslice.pbstack:
+ lookaheadstack.append(lookahead)
+ for _t in pslice.pbstack:
+ lookaheadstack.append(_t)
+ lookahead = None
+
+ symstack.append(sym)
+ statestack.append(goto[statestack[-1],pname])
+ continue
+
+ if t == 0:
+ n = symstack[-1]
+ return getattr(n,"value",None)
+ sys.stderr.write(errorlead, "\n")
+
+ if t == None:
+ if debug:
+ sys.stderr.write(errorlead + "\n")
+ # We have some kind of parsing error here. To handle
+ # this, we are going to push the current token onto
+ # the tokenstack and replace it with an 'error' token.
+ # If there are any synchronization rules, they may
+ # catch it.
+ #
+ # In addition to pushing the error token, we call call
+ # the user defined p_error() function if this is the
+ # first syntax error. This function is only called if
+ # errorcount == 0.
+ if not self.errorcount:
+ self.errorcount = error_count
+ errtoken = lookahead
+ if errtoken.type == '$end':
+ errtoken = None # End of file!
+ if self.errorfunc:
+ global errok,token,restart
+ errok = self.errok # Set some special functions available in error recovery
+ token = get_token
+ restart = self.restart
+ tok = self.errorfunc(errtoken)
+ del errok, token, restart # Delete special functions
+
+ if not self.errorcount:
+ # User must have done some kind of panic
+ # mode recovery on their own. The
+ # returned token is the next lookahead
+ lookahead = tok
+ errtoken = None
+ continue
+ else:
+ if errtoken:
+ if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
+ else: lineno = 0
+ if lineno:
+ sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
+ else:
+ sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
+ else:
+ sys.stderr.write("yacc: Parse error in input. EOF\n")
+ return
+
+ else:
+ self.errorcount = error_count
+
+ # case 1: the statestack only has 1 entry on it. If we're in this state, the
+ # entire parse has been rolled back and we're completely hosed. The token is
+ # discarded and we just keep going.
+
+ if len(statestack) <= 1 and lookahead.type != '$end':
+ lookahead = None
+ errtoken = None
+ # Nuke the pushback stack
+ del lookaheadstack[:]
+ continue
+
+ # case 2: the statestack has a couple of entries on it, but we're
+ # at the end of the file. nuke the top entry and generate an error token
+
+ # Start nuking entries on the stack
+ if lookahead.type == '$end':
+ # Whoa. We're really hosed here. Bail out
+ return
+
+ if lookahead.type != 'error':
+ sym = symstack[-1]
+ if sym.type == 'error':
+ # Hmmm. Error is on top of stack, we'll just nuke input
+ # symbol and continue
+ lookahead = None
+ continue
+ t = YaccSymbol()
+ t.type = 'error'
+ if hasattr(lookahead,"lineno"):
+ t.lineno = lookahead.lineno
+ t.value = lookahead
+ lookaheadstack.append(lookahead)
+ lookahead = t
+ else:
+ symstack.pop()
+ statestack.pop()
+
+ continue
+
+ # Call an error function here
+ raise RuntimeError, "yacc: internal parser error!!!\n"
+
+# -----------------------------------------------------------------------------
+# === Parser Construction ===
+#
+# The following functions and variables are used to implement the yacc() function
+# itself. This is pretty hairy stuff involving lots of error checking,
+# construction of LR items, kernels, and so forth. Although a lot of
+# this work is done using global variables, the resulting Parser object
+# is completely self contained--meaning that it is safe to repeatedly
+# call yacc() with different grammars in the same application.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# validate_file()
+#
+# This function checks to see if there are duplicated p_rulename() functions
+# in the parser module file. Without this function, it is really easy for
+# users to make mistakes by cutting and pasting code fragments (and it's a real
+# bugger to try and figure out why the resulting parser doesn't work). Therefore,
+# we just do a little regular expression pattern matching of def statements
+# to try and detect duplicates.
+# -----------------------------------------------------------------------------
+
+def validate_file(filename):
+ base,ext = os.path.splitext(filename)
+ if ext != '.py': return 1 # No idea. Assume it's okay.
+
+ try:
+ f = open(filename)
+ lines = f.readlines()
+ f.close()
+ except IOError:
+ return 1 # Oh well
+
+ # Match def p_funcname(
+ fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
+ counthash = { }
+ linen = 1
+ noerror = 1
+ for l in lines:
+ m = fre.match(l)
+ if m:
+ name = m.group(1)
+ prev = counthash.get(name)
+ if not prev:
+ counthash[name] = linen
+ else:
+ sys.stderr.write("%s:%d: Function %s redefined. Previously defined on line %d\n" % (filename,linen,name,prev))
+ noerror = 0
+ linen += 1
+ return noerror
+
+# This function looks for functions that might be grammar rules, but which don't have the proper p_suffix.
+def validate_dict(d):
+ for n,v in d.items():
+ if n[0:2] == 'p_' and type(v) in (types.FunctionType, types.MethodType): continue
+ if n[0:2] == 't_': continue
+
+ if n[0:2] == 'p_':
+ sys.stderr.write("yacc: Warning. '%s' not defined as a function\n" % n)
+ if 1 and isinstance(v,types.FunctionType) and v.func_code.co_argcount == 1:
+ try:
+ doc = v.__doc__.split(" ")
+ if doc[1] == ':':
+ sys.stderr.write("%s:%d: Warning. Possible grammar rule '%s' defined without p_ prefix.\n" % (v.func_code.co_filename, v.func_code.co_firstlineno,n))
+ except StandardError:
+ pass
+
+# -----------------------------------------------------------------------------
+# === GRAMMAR FUNCTIONS ===
+#
+# The following global variables and functions are used to store, manipulate,
+# and verify the grammar rules specified by the user.
+# -----------------------------------------------------------------------------
+
+# Initialize all of the global variables used during grammar construction
+def initialize_vars():
+ global Productions, Prodnames, Prodmap, Terminals
+ global Nonterminals, First, Follow, Precedence, LRitems
+ global Errorfunc, Signature, Requires
+
+ Productions = [None] # A list of all of the productions. The first
+ # entry is always reserved for the purpose of
+ # building an augmented grammar
+
+ Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
+ # productions of that nonterminal.
+
+ Prodmap = { } # A dictionary that is only used to detect duplicate
+ # productions.
+
+ Terminals = { } # A dictionary mapping the names of terminal symbols to a
+ # list of the rules where they are used.
+
+ Nonterminals = { } # A dictionary mapping names of nonterminals to a list
+ # of rule numbers where they are used.
+
+ First = { } # A dictionary of precomputed FIRST(x) symbols
+
+ Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
+
+ Precedence = { } # Precedence rules for each terminal. Contains tuples of the
+ # form ('right',level) or ('nonassoc', level) or ('left',level)
+
+ LRitems = [ ] # A list of all LR items for the grammar. These are the
+ # productions with the "dot" like E -> E . PLUS E
+
+ Errorfunc = None # User defined error handler
+
+ Signature = hashlib.sha256() # Digital signature of the grammar rules, precedence
+ # and other information. Used to determined when a
+ # parsing table needs to be regenerated.
+
+ Requires = { } # Requires list
+
+ # File objects used when creating the parser.out debugging file
+ global _vf, _vfc
+ _vf = cStringIO.StringIO()
+ _vfc = cStringIO.StringIO()
+
+# -----------------------------------------------------------------------------
+# class Production:
+#
+# This class stores the raw information about a single production or grammar rule.
+# It has a few required attributes:
+#
+# name - Name of the production (nonterminal)
+# prod - A list of symbols making up its production
+# number - Production number.
+#
+# In addition, a few additional attributes are used to help with debugging or
+# optimization of table generation.
+#
+# file - File where production action is defined.
+# lineno - Line number where action is defined
+# func - Action function
+# prec - Precedence level
+# lr_next - Next LR item. Example, if we are ' E -> E . PLUS E'
+# then lr_next refers to 'E -> E PLUS . E'
+# lr_index - LR item index (location of the ".") in the prod list.
+# lookaheads - LALR lookahead symbols for this item
+# len - Length of the production (number of symbols on right hand side)
+# -----------------------------------------------------------------------------
+
+class Production:
+ def __init__(self,**kw):
+ for k,v in kw.items():
+ setattr(self,k,v)
+ self.lr_index = -1
+ self.lr0_added = 0 # Flag indicating whether or not added to LR0 closure
+ self.lr1_added = 0 # Flag indicating whether or not added to LR1
+ self.usyms = [ ]
+ self.lookaheads = { }
+ self.lk_added = { }
+ self.setnumbers = [ ]
+
+ def __str__(self):
+ if self.prod:
+ s = "%s -> %s" % (self.name," ".join(self.prod))
+ else:
+ s = "%s -> <empty>" % self.name
+ return s
+
+ def __repr__(self):
+ return str(self)
+
+ # Compute lr_items from the production
+ def lr_item(self,n):
+ if n > len(self.prod): return None
+ p = Production()
+ p.name = self.name
+ p.prod = list(self.prod)
+ p.number = self.number
+ p.lr_index = n
+ p.lookaheads = { }
+ p.setnumbers = self.setnumbers
+ p.prod.insert(n,".")
+ p.prod = tuple(p.prod)
+ p.len = len(p.prod)
+ p.usyms = self.usyms
+
+ # Precompute list of productions immediately following
+ try:
+ p.lrafter = Prodnames[p.prod[n+1]]
+ except (IndexError,KeyError),e:
+ p.lrafter = []
+ try:
+ p.lrbefore = p.prod[n-1]
+ except IndexError:
+ p.lrbefore = None
+
+ return p
+
+class MiniProduction:
+ pass
+
+# regex matching identifiers
+_is_identifier = re.compile(r'^[a-zA-Z0-9_-~]+$')
+
+# -----------------------------------------------------------------------------
+# add_production()
+#
+# Given an action function, this function assembles a production rule.
+# The production rule is assumed to be found in the function's docstring.
+# This rule has the general syntax:
+#
+# name1 ::= production1
+# | production2
+# | production3
+# ...
+# | productionn
+# name2 ::= production1
+# | production2
+# ...
+# -----------------------------------------------------------------------------
+
+def add_production(f,file,line,prodname,syms):
+
+ if Terminals.has_key(prodname):
+ sys.stderr.write("%s:%d: Illegal rule name '%s'. Already defined as a token.\n" % (file,line,prodname))
+ return -1
+ if prodname == 'error':
+ sys.stderr.write("%s:%d: Illegal rule name '%s'. error is a reserved word.\n" % (file,line,prodname))
+ return -1
+
+ if not _is_identifier.match(prodname):
+ sys.stderr.write("%s:%d: Illegal rule name '%s'\n" % (file,line,prodname))
+ return -1
+
+ for x in range(len(syms)):
+ s = syms[x]
+ if s[0] in "'\"":
+ try:
+ c = eval(s)
+ if (len(c) > 1):
+ sys.stderr.write("%s:%d: Literal token %s in rule '%s' may only be a single character\n" % (file,line,s, prodname))
+ return -1
+ if not Terminals.has_key(c):
+ Terminals[c] = []
+ syms[x] = c
+ continue
+ except SyntaxError:
+ pass
+ if not _is_identifier.match(s) and s != '%prec':
+ sys.stderr.write("%s:%d: Illegal name '%s' in rule '%s'\n" % (file,line,s, prodname))
+ return -1
+
+ # See if the rule is already in the rulemap
+ map = "%s -> %s" % (prodname,syms)
+ if Prodmap.has_key(map):
+ m = Prodmap[map]
+ sys.stderr.write("%s:%d: Duplicate rule %s.\n" % (file,line, m))
+ sys.stderr.write("%s:%d: Previous definition at %s:%d\n" % (file,line, m.file, m.line))
+ return -1
+
+ p = Production()
+ p.name = prodname
+ p.prod = syms
+ p.file = file
+ p.line = line
+ p.func = f
+ p.number = len(Productions)
+
+
+ Productions.append(p)
+ Prodmap[map] = p
+ if not Nonterminals.has_key(prodname):
+ Nonterminals[prodname] = [ ]
+
+ # Add all terminals to Terminals
+ i = 0
+ while i < len(p.prod):
+ t = p.prod[i]
+ if t == '%prec':
+ try:
+ precname = p.prod[i+1]
+ except IndexError:
+ sys.stderr.write("%s:%d: Syntax error. Nothing follows %%prec.\n" % (p.file,p.line))
+ return -1
+
+ prec = Precedence.get(precname,None)
+ if not prec:
+ sys.stderr.write("%s:%d: Nothing known about the precedence of '%s'\n" % (p.file,p.line,precname))
+ return -1
+ else:
+ p.prec = prec
+ del p.prod[i]
+ del p.prod[i]
+ continue
+
+ if Terminals.has_key(t):
+ Terminals[t].append(p.number)
+ # Is a terminal. We'll assign a precedence to p based on this
+ if not hasattr(p,"prec"):
+ p.prec = Precedence.get(t,('right',0))
+ else:
+ if not Nonterminals.has_key(t):
+ Nonterminals[t] = [ ]
+ Nonterminals[t].append(p.number)
+ i += 1
+
+ if not hasattr(p,"prec"):
+ p.prec = ('right',0)
+
+ # Set final length of productions
+ p.len = len(p.prod)
+ p.prod = tuple(p.prod)
+
+ # Calculate unique syms in the production
+ p.usyms = [ ]
+ for s in p.prod:
+ if s not in p.usyms:
+ p.usyms.append(s)
+
+ # Add to the global productions list
+ try:
+ Prodnames[p.name].append(p)
+ except KeyError:
+ Prodnames[p.name] = [ p ]
+ return 0
+
+# Given a raw rule function, this function rips out its doc string
+# and adds rules to the grammar
+
+def add_function(f):
+ line = f.func_code.co_firstlineno
+ file = f.func_code.co_filename
+ error = 0
+
+ if isinstance(f,types.MethodType):
+ reqdargs = 2
+ else:
+ reqdargs = 1
+
+ if f.func_code.co_argcount > reqdargs:
+ sys.stderr.write("%s:%d: Rule '%s' has too many arguments.\n" % (file,line,f.__name__))
+ return -1
+
+ if f.func_code.co_argcount < reqdargs:
+ sys.stderr.write("%s:%d: Rule '%s' requires an argument.\n" % (file,line,f.__name__))
+ return -1
+
+ if f.__doc__:
+ # Split the doc string into lines
+ pstrings = f.__doc__.splitlines()
+ lastp = None
+ dline = line
+ for ps in pstrings:
+ dline += 1
+ p = ps.split()
+ if not p: continue
+ try:
+ if p[0] == '|':
+ # This is a continuation of a previous rule
+ if not lastp:
+ sys.stderr.write("%s:%d: Misplaced '|'.\n" % (file,dline))
+ return -1
+ prodname = lastp
+ if len(p) > 1:
+ syms = p[1:]
+ else:
+ syms = [ ]
+ else:
+ prodname = p[0]
+ lastp = prodname
+ assign = p[1]
+ if len(p) > 2:
+ syms = p[2:]
+ else:
+ syms = [ ]
+ if assign != ':' and assign != '::=':
+ sys.stderr.write("%s:%d: Syntax error. Expected ':'\n" % (file,dline))
+ return -1
+
+
+ e = add_production(f,file,dline,prodname,syms)
+ error += e
+
+
+ except StandardError:
+ sys.stderr.write("%s:%d: Syntax error in rule '%s'\n" % (file,dline,ps))
+ error -= 1
+ else:
+ sys.stderr.write("%s:%d: No documentation string specified in function '%s'\n" % (file,line,f.__name__))
+ return error
+
+
+# Cycle checking code (Michael Dyck)
+
+def compute_reachable():
+ '''
+ Find each symbol that can be reached from the start symbol.
+ Print a warning for any nonterminals that can't be reached.
+ (Unused terminals have already had their warning.)
+ '''
+ Reachable = { }
+ for s in Terminals.keys() + Nonterminals.keys():
+ Reachable[s] = 0
+
+ mark_reachable_from( Productions[0].prod[0], Reachable )
+
+ for s in Nonterminals.keys():
+ if not Reachable[s]:
+ sys.stderr.write("yacc: Symbol '%s' is unreachable.\n" % s)
+
+def mark_reachable_from(s, Reachable):
+ '''
+ Mark all symbols that are reachable from symbol s.
+ '''
+ if Reachable[s]:
+ # We've already reached symbol s.
+ return
+ Reachable[s] = 1
+ for p in Prodnames.get(s,[]):
+ for r in p.prod:
+ mark_reachable_from(r, Reachable)
+
+# -----------------------------------------------------------------------------
+# compute_terminates()
+#
+# This function looks at the various parsing rules and tries to detect
+# infinite recursion cycles (grammar rules where there is no possible way
+# to derive a string of only terminals).
+# -----------------------------------------------------------------------------
+def compute_terminates():
+ '''
+ Raise an error for any symbols that don't terminate.
+ '''
+ Terminates = {}
+
+ # Terminals:
+ for t in Terminals.keys():
+ Terminates[t] = 1
+
+ Terminates['$end'] = 1
+
+ # Nonterminals:
+
+ # Initialize to false:
+ for n in Nonterminals.keys():
+ Terminates[n] = 0
+
+ # Then propagate termination until no change:
+ while 1:
+ some_change = 0
+ for (n,pl) in Prodnames.items():
+ # Nonterminal n terminates iff any of its productions terminates.
+ for p in pl:
+ # Production p terminates iff all of its rhs symbols terminate.
+ for s in p.prod:
+ if not Terminates[s]:
+ # The symbol s does not terminate,
+ # so production p does not terminate.
+ p_terminates = 0
+ break
+ else:
+ # didn't break from the loop,
+ # so every symbol s terminates
+ # so production p terminates.
+ p_terminates = 1
+
+ if p_terminates:
+ # symbol n terminates!
+ if not Terminates[n]:
+ Terminates[n] = 1
+ some_change = 1
+ # Don't need to consider any more productions for this n.
+ break
+
+ if not some_change:
+ break
+
+ some_error = 0
+ for (s,terminates) in Terminates.items():
+ if not terminates:
+ if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
+ # s is used-but-not-defined, and we've already warned of that,
+ # so it would be overkill to say that it's also non-terminating.
+ pass
+ else:
+ sys.stderr.write("yacc: Infinite recursion detected for symbol '%s'.\n" % s)
+ some_error = 1
+
+ return some_error
+
+# -----------------------------------------------------------------------------
+# verify_productions()
+#
+# This function examines all of the supplied rules to see if they seem valid.
+# -----------------------------------------------------------------------------
+def verify_productions(cycle_check=1):
+ error = 0
+ for p in Productions:
+ if not p: continue
+
+ for s in p.prod:
+ if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
+ sys.stderr.write("%s:%d: Symbol '%s' used, but not defined as a token or a rule.\n" % (p.file,p.line,s))
+ error = 1
+ continue
+
+ unused_tok = 0
+ # Now verify all of the tokens
+ if yaccdebug:
+ _vf.write("Unused terminals:\n\n")
+ for s,v in Terminals.items():
+ if s != 'error' and not v:
+ sys.stderr.write("yacc: Warning. Token '%s' defined, but not used.\n" % s)
+ if yaccdebug: _vf.write(" %s\n"% s)
+ unused_tok += 1
+
+ # Print out all of the productions
+ if yaccdebug:
+ _vf.write("\nGrammar\n\n")
+ for i in range(1,len(Productions)):
+ _vf.write("Rule %-5d %s\n" % (i, Productions[i]))
+
+ unused_prod = 0
+ # Verify the use of all productions
+ for s,v in Nonterminals.items():
+ if not v:
+ p = Prodnames[s][0]
+ sys.stderr.write("%s:%d: Warning. Rule '%s' defined, but not used.\n" % (p.file,p.line, s))
+ unused_prod += 1
+
+
+ if unused_tok == 1:
+ sys.stderr.write("yacc: Warning. There is 1 unused token.\n")
+ if unused_tok > 1:
+ sys.stderr.write("yacc: Warning. There are %d unused tokens.\n" % unused_tok)
+
+ if unused_prod == 1:
+ sys.stderr.write("yacc: Warning. There is 1 unused rule.\n")
+ if unused_prod > 1:
+ sys.stderr.write("yacc: Warning. There are %d unused rules.\n" % unused_prod)
+
+ if yaccdebug:
+ _vf.write("\nTerminals, with rules where they appear\n\n")
+ ks = Terminals.keys()
+ ks.sort()
+ for k in ks:
+ _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Terminals[k]])))
+ _vf.write("\nNonterminals, with rules where they appear\n\n")
+ ks = Nonterminals.keys()
+ ks.sort()
+ for k in ks:
+ _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Nonterminals[k]])))
+
+ if (cycle_check):
+ compute_reachable()
+ error += compute_terminates()
+# error += check_cycles()
+ return error
+
+# -----------------------------------------------------------------------------
+# build_lritems()
+#
+# This function walks the list of productions and builds a complete set of the
+# LR items. The LR items are stored in two ways: First, they are uniquely
+# numbered and placed in the list _lritems. Second, a linked list of LR items
+# is built for each production. For example:
+#
+# E -> E PLUS E
+#
+# Creates the list
+#
+# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
+# -----------------------------------------------------------------------------
+
+def build_lritems():
+ for p in Productions:
+ lastlri = p
+ lri = p.lr_item(0)
+ i = 0
+ while 1:
+ lri = p.lr_item(i)
+ lastlri.lr_next = lri
+ if not lri: break
+ lri.lr_num = len(LRitems)
+ LRitems.append(lri)
+ lastlri = lri
+ i += 1
+
+ # In order for the rest of the parser generator to work, we need to
+ # guarantee that no more lritems are generated. Therefore, we nuke
+ # the p.lr_item method. (Only used in debugging)
+ # Production.lr_item = None
+
+# -----------------------------------------------------------------------------
+# add_precedence()
+#
+# Given a list of precedence rules, add to the precedence table.
+# -----------------------------------------------------------------------------
+
+def add_precedence(plist):
+ plevel = 0
+ error = 0
+ for p in plist:
+ plevel += 1
+ try:
+ prec = p[0]
+ terms = p[1:]
+ if prec != 'left' and prec != 'right' and prec != 'nonassoc':
+ sys.stderr.write("yacc: Invalid precedence '%s'\n" % prec)
+ return -1
+ for t in terms:
+ if Precedence.has_key(t):
+ sys.stderr.write("yacc: Precedence already specified for terminal '%s'\n" % t)
+ error += 1
+ continue
+ Precedence[t] = (prec,plevel)
+ except:
+ sys.stderr.write("yacc: Invalid precedence table.\n")
+ error += 1
+
+ return error
+
+# -----------------------------------------------------------------------------
+# augment_grammar()
+#
+# Compute the augmented grammar. This is just a rule S' -> start where start
+# is the starting symbol.
+# -----------------------------------------------------------------------------
+
+def augment_grammar(start=None):
+ if not start:
+ start = Productions[1].name
+ Productions[0] = Production(name="S'",prod=[start],number=0,len=1,prec=('right',0),func=None)
+ Productions[0].usyms = [ start ]
+ Nonterminals[start].append(0)
+
+
+# -------------------------------------------------------------------------
+# first()
+#
+# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
+#
+# During execution of compute_first1, the result may be incomplete.
+# Afterward (e.g., when called from compute_follow()), it will be complete.
+# -------------------------------------------------------------------------
+def first(beta):
+
+ # We are computing First(x1,x2,x3,...,xn)
+ result = [ ]
+ for x in beta:
+ x_produces_empty = 0
+
+ # Add all the non-<empty> symbols of First[x] to the result.
+ for f in First[x]:
+ if f == '<empty>':
+ x_produces_empty = 1
+ else:
+ if f not in result: result.append(f)
+
+ if x_produces_empty:
+ # We have to consider the next x in beta,
+ # i.e. stay in the loop.
+ pass
+ else:
+ # We don't have to consider any further symbols in beta.
+ break
+ else:
+ # There was no 'break' from the loop,
+ # so x_produces_empty was true for all x in beta,
+ # so beta produces empty as well.
+ result.append('<empty>')
+
+ return result
+
+
+# FOLLOW(x)
+# Given a non-terminal. This function computes the set of all symbols
+# that might follow it. Dragon book, p. 189.
+
+def compute_follow(start=None):
+ # Add '$end' to the follow list of the start symbol
+ for k in Nonterminals.keys():
+ Follow[k] = [ ]
+
+ if not start:
+ start = Productions[1].name
+
+ Follow[start] = [ '$end' ]
+
+ while 1:
+ didadd = 0
+ for p in Productions[1:]:
+ # Here is the production set
+ for i in range(len(p.prod)):
+ B = p.prod[i]
+ if Nonterminals.has_key(B):
+ # Okay. We got a non-terminal in a production
+ fst = first(p.prod[i+1:])
+ hasempty = 0
+ for f in fst:
+ if f != '<empty>' and f not in Follow[B]:
+ Follow[B].append(f)
+ didadd = 1
+ if f == '<empty>':
+ hasempty = 1
+ if hasempty or i == (len(p.prod)-1):
+ # Add elements of follow(a) to follow(b)
+ for f in Follow[p.name]:
+ if f not in Follow[B]:
+ Follow[B].append(f)
+ didadd = 1
+ if not didadd: break
+
+ if 0 and yaccdebug:
+ _vf.write('\nFollow:\n')
+ for k in Nonterminals.keys():
+ _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Follow[k]])))
+
+# -------------------------------------------------------------------------
+# compute_first1()
+#
+# Compute the value of FIRST1(X) for all symbols
+# -------------------------------------------------------------------------
+def compute_first1():
+
+ # Terminals:
+ for t in Terminals.keys():
+ First[t] = [t]
+
+ First['$end'] = ['$end']
+ First['#'] = ['#'] # what's this for?
+
+ # Nonterminals:
+
+ # Initialize to the empty set:
+ for n in Nonterminals.keys():
+ First[n] = []
+
+ # Then propagate symbols until no change:
+ while 1:
+ some_change = 0
+ for n in Nonterminals.keys():
+ for p in Prodnames[n]:
+ for f in first(p.prod):
+ if f not in First[n]:
+ First[n].append( f )
+ some_change = 1
+ if not some_change:
+ break
+
+ if 0 and yaccdebug:
+ _vf.write('\nFirst:\n')
+ for k in Nonterminals.keys():
+ _vf.write("%-20s : %s\n" %
+ (k, " ".join([str(s) for s in First[k]])))
+
+# -----------------------------------------------------------------------------
+# === SLR Generation ===
+#
+# The following functions are used to construct SLR (Simple LR) parsing tables
+# as described on p.221-229 of the dragon book.
+# -----------------------------------------------------------------------------
+
+# Global variables for the LR parsing engine
+def lr_init_vars():
+ global _lr_action, _lr_goto, _lr_method
+ global _lr_goto_cache, _lr0_cidhash
+
+ _lr_action = { } # Action table
+ _lr_goto = { } # Goto table
+ _lr_method = "Unknown" # LR method used
+ _lr_goto_cache = { }
+ _lr0_cidhash = { }
+
+
+# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
+# prodlist is a list of productions.
+
+_add_count = 0 # Counter used to detect cycles
+
+def lr0_closure(I):
+ global _add_count
+
+ _add_count += 1
+ prodlist = Productions
+
+ # Add everything in I to J
+ J = I[:]
+ didadd = 1
+ while didadd:
+ didadd = 0
+ for j in J:
+ for x in j.lrafter:
+ if x.lr0_added == _add_count: continue
+ # Add B --> .G to J
+ J.append(x.lr_next)
+ x.lr0_added = _add_count
+ didadd = 1
+
+ return J
+
+# Compute the LR(0) goto function goto(I,X) where I is a set
+# of LR(0) items and X is a grammar symbol. This function is written
+# in a way that guarantees uniqueness of the generated goto sets
+# (i.e. the same goto set will never be returned as two different Python
+# objects). With uniqueness, we can later do fast set comparisons using
+# id(obj) instead of element-wise comparison.
+
+def lr0_goto(I,x):
+ # First we look for a previously cached entry
+ g = _lr_goto_cache.get((id(I),x),None)
+ if g: return g
+
+ # Now we generate the goto set in a way that guarantees uniqueness
+ # of the result
+
+ s = _lr_goto_cache.get(x,None)
+ if not s:
+ s = { }
+ _lr_goto_cache[x] = s
+
+ gs = [ ]
+ for p in I:
+ n = p.lr_next
+ if n and n.lrbefore == x:
+ s1 = s.get(id(n),None)
+ if not s1:
+ s1 = { }
+ s[id(n)] = s1
+ gs.append(n)
+ s = s1
+ g = s.get('$end',None)
+ if not g:
+ if gs:
+ g = lr0_closure(gs)
+ s['$end'] = g
+ else:
+ s['$end'] = gs
+ _lr_goto_cache[(id(I),x)] = g
+ return g
+
+_lr0_cidhash = { }
+
+# Compute the LR(0) sets of item function
+def lr0_items():
+
+ C = [ lr0_closure([Productions[0].lr_next]) ]
+ i = 0
+ for I in C:
+ _lr0_cidhash[id(I)] = i
+ i += 1
+
+ # Loop over the items in C and each grammar symbols
+ i = 0
+ while i < len(C):
+ I = C[i]
+ i += 1
+
+ # Collect all of the symbols that could possibly be in the goto(I,X) sets
+ asyms = { }
+ for ii in I:
+ for s in ii.usyms:
+ asyms[s] = None
+
+ for x in asyms.keys():
+ g = lr0_goto(I,x)
+ if not g: continue
+ if _lr0_cidhash.has_key(id(g)): continue
+ _lr0_cidhash[id(g)] = len(C)
+ C.append(g)
+
+ return C
+
+# -----------------------------------------------------------------------------
+# ==== LALR(1) Parsing ====
+#
+# LALR(1) parsing is almost exactly the same as SLR except that instead of
+# relying upon Follow() sets when performing reductions, a more selective
+# lookahead set that incorporates the state of the LR(0) machine is utilized.
+# Thus, we mainly just have to focus on calculating the lookahead sets.
+#
+# The method used here is due to DeRemer and Pennelo (1982).
+#
+# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
+# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
+# Vol. 4, No. 4, Oct. 1982, pp. 615-649
+#
+# Further details can also be found in:
+#
+# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
+# McGraw-Hill Book Company, (1985).
+#
+# Note: This implementation is a complete replacement of the LALR(1)
+# implementation in PLY-1.x releases. That version was based on
+# a less efficient algorithm and it had bugs in its implementation.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# compute_nullable_nonterminals()
+#
+# Creates a dictionary containing all of the non-terminals that might produce
+# an empty production.
+# -----------------------------------------------------------------------------
+
+def compute_nullable_nonterminals():
+ nullable = {}
+ num_nullable = 0
+ while 1:
+ for p in Productions[1:]:
+ if p.len == 0:
+ nullable[p.name] = 1
+ continue
+ for t in p.prod:
+ if not nullable.has_key(t): break
+ else:
+ nullable[p.name] = 1
+ if len(nullable) == num_nullable: break
+ num_nullable = len(nullable)
+ return nullable
+
+# -----------------------------------------------------------------------------
+# find_nonterminal_trans(C)
+#
+# Given a set of LR(0) items, this functions finds all of the non-terminal
+# transitions. These are transitions in which a dot appears immediately before
+# a non-terminal. Returns a list of tuples of the form (state,N) where state
+# is the state number and N is the nonterminal symbol.
+#
+# The input C is the set of LR(0) items.
+# -----------------------------------------------------------------------------
+
+def find_nonterminal_transitions(C):
+ trans = []
+ for state in range(len(C)):
+ for p in C[state]:
+ if p.lr_index < p.len - 1:
+ t = (state,p.prod[p.lr_index+1])
+ if Nonterminals.has_key(t[1]):
+ if t not in trans: trans.append(t)
+ state = state + 1
+ return trans
+
+# -----------------------------------------------------------------------------
+# dr_relation()
+#
+# Computes the DR(p,A) relationships for non-terminal transitions. The input
+# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
+#
+# Returns a list of terminals.
+# -----------------------------------------------------------------------------
+
+def dr_relation(C,trans,nullable):
+ dr_set = { }
+ state,N = trans
+ terms = []
+
+ g = lr0_goto(C[state],N)
+ for p in g:
+ if p.lr_index < p.len - 1:
+ a = p.prod[p.lr_index+1]
+ if Terminals.has_key(a):
+ if a not in terms: terms.append(a)
+
+ # This extra bit is to handle the start state
+ if state == 0 and N == Productions[0].prod[0]:
+ terms.append('$end')
+
+ return terms
+
+# -----------------------------------------------------------------------------
+# reads_relation()
+#
+# Computes the READS() relation (p,A) READS (t,C).
+# -----------------------------------------------------------------------------
+
+def reads_relation(C, trans, empty):
+ # Look for empty transitions
+ rel = []
+ state, N = trans
+
+ g = lr0_goto(C[state],N)
+ j = _lr0_cidhash.get(id(g),-1)
+ for p in g:
+ if p.lr_index < p.len - 1:
+ a = p.prod[p.lr_index + 1]
+ if empty.has_key(a):
+ rel.append((j,a))
+
+ return rel
+
+# -----------------------------------------------------------------------------
+# compute_lookback_includes()
+#
+# Determines the lookback and includes relations
+#
+# LOOKBACK:
+#
+# This relation is determined by running the LR(0) state machine forward.
+# For example, starting with a production "N : . A B C", we run it forward
+# to obtain "N : A B C ." We then build a relationship between this final
+# state and the starting state. These relationships are stored in a dictionary
+# lookdict.
+#
+# INCLUDES:
+#
+# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
+#
+# This relation is used to determine non-terminal transitions that occur
+# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
+# if the following holds:
+#
+# B -> LAT, where T -> epsilon and p' -L-> p
+#
+# L is essentially a prefix (which may be empty), T is a suffix that must be
+# able to derive an empty string. State p' must lead to state p with the string L.
+#
+# -----------------------------------------------------------------------------
+
+def compute_lookback_includes(C,trans,nullable):
+
+ lookdict = {} # Dictionary of lookback relations
+ includedict = {} # Dictionary of include relations
+
+ # Make a dictionary of non-terminal transitions
+ dtrans = {}
+ for t in trans:
+ dtrans[t] = 1
+
+ # Loop over all transitions and compute lookbacks and includes
+ for state,N in trans:
+ lookb = []
+ includes = []
+ for p in C[state]:
+ if p.name != N: continue
+
+ # Okay, we have a name match. We now follow the production all the way
+ # through the state machine until we get the . on the right hand side
+
+ lr_index = p.lr_index
+ j = state
+ while lr_index < p.len - 1:
+ lr_index = lr_index + 1
+ t = p.prod[lr_index]
+
+ # Check to see if this symbol and state are a non-terminal transition
+ if dtrans.has_key((j,t)):
+ # Yes. Okay, there is some chance that this is an includes relation
+ # the only way to know for certain is whether the rest of the
+ # production derives empty
+
+ li = lr_index + 1
+ while li < p.len:
+ if Terminals.has_key(p.prod[li]): break # No forget it
+ if not nullable.has_key(p.prod[li]): break
+ li = li + 1
+ else:
+ # Appears to be a relation between (j,t) and (state,N)
+ includes.append((j,t))
+
+ g = lr0_goto(C[j],t) # Go to next set
+ j = _lr0_cidhash.get(id(g),-1) # Go to next state
+
+ # When we get here, j is the final state, now we have to locate the production
+ for r in C[j]:
+ if r.name != p.name: continue
+ if r.len != p.len: continue
+ i = 0
+ # This look is comparing a production ". A B C" with "A B C ."
+ while i < r.lr_index:
+ if r.prod[i] != p.prod[i+1]: break
+ i = i + 1
+ else:
+ lookb.append((j,r))
+ for i in includes:
+ if not includedict.has_key(i): includedict[i] = []
+ includedict[i].append((state,N))
+ lookdict[(state,N)] = lookb
+
+ return lookdict,includedict
+
+# -----------------------------------------------------------------------------
+# digraph()
+# traverse()
+#
+# The following two functions are used to compute set valued functions
+# of the form:
+#
+# F(x) = F'(x) U U{F(y) | x R y}
+#
+# This is used to compute the values of Read() sets as well as FOLLOW sets
+# in LALR(1) generation.
+#
+# Inputs: X - An input set
+# R - A relation
+# FP - Set-valued function
+# ------------------------------------------------------------------------------
+
+def digraph(X,R,FP):
+ N = { }
+ for x in X:
+ N[x] = 0
+ stack = []
+ F = { }
+ for x in X:
+ if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
+ return F
+
+def traverse(x,N,stack,F,X,R,FP):
+ stack.append(x)
+ d = len(stack)
+ N[x] = d
+ F[x] = FP(x) # F(X) <- F'(x)
+
+ rel = R(x) # Get y's related to x
+ for y in rel:
+ if N[y] == 0:
+ traverse(y,N,stack,F,X,R,FP)
+ N[x] = min(N[x],N[y])
+ for a in F.get(y,[]):
+ if a not in F[x]: F[x].append(a)
+ if N[x] == d:
+ N[stack[-1]] = sys.maxint
+ F[stack[-1]] = F[x]
+ element = stack.pop()
+ while element != x:
+ N[stack[-1]] = sys.maxint
+ F[stack[-1]] = F[x]
+ element = stack.pop()
+
+# -----------------------------------------------------------------------------
+# compute_read_sets()
+#
+# Given a set of LR(0) items, this function computes the read sets.
+#
+# Inputs: C = Set of LR(0) items
+# ntrans = Set of nonterminal transitions
+# nullable = Set of empty transitions
+#
+# Returns a set containing the read sets
+# -----------------------------------------------------------------------------
+
+def compute_read_sets(C, ntrans, nullable):
+ FP = lambda x: dr_relation(C,x,nullable)
+ R = lambda x: reads_relation(C,x,nullable)
+ F = digraph(ntrans,R,FP)
+ return F
+
+# -----------------------------------------------------------------------------
+# compute_follow_sets()
+#
+# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
+# and an include set, this function computes the follow sets
+#
+# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
+#
+# Inputs:
+# ntrans = Set of nonterminal transitions
+# readsets = Readset (previously computed)
+# inclsets = Include sets (previously computed)
+#
+# Returns a set containing the follow sets
+# -----------------------------------------------------------------------------
+
+def compute_follow_sets(ntrans,readsets,inclsets):
+ FP = lambda x: readsets[x]
+ R = lambda x: inclsets.get(x,[])
+ F = digraph(ntrans,R,FP)
+ return F
+
+# -----------------------------------------------------------------------------
+# add_lookaheads()
+#
+# Attaches the lookahead symbols to grammar rules.
+#
+# Inputs: lookbacks - Set of lookback relations
+# followset - Computed follow set
+#
+# This function directly attaches the lookaheads to productions contained
+# in the lookbacks set
+# -----------------------------------------------------------------------------
+
+def add_lookaheads(lookbacks,followset):
+ for trans,lb in lookbacks.items():
+ # Loop over productions in lookback
+ for state,p in lb:
+ if not p.lookaheads.has_key(state):
+ p.lookaheads[state] = []
+ f = followset.get(trans,[])
+ for a in f:
+ if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
+
+# -----------------------------------------------------------------------------
+# add_lalr_lookaheads()
+#
+# This function does all of the work of adding lookahead information for use
+# with LALR parsing
+# -----------------------------------------------------------------------------
+
+def add_lalr_lookaheads(C):
+ # Determine all of the nullable nonterminals
+ nullable = compute_nullable_nonterminals()
+
+ # Find all non-terminal transitions
+ trans = find_nonterminal_transitions(C)
+
+ # Compute read sets
+ readsets = compute_read_sets(C,trans,nullable)
+
+ # Compute lookback/includes relations
+ lookd, included = compute_lookback_includes(C,trans,nullable)
+
+ # Compute LALR FOLLOW sets
+ followsets = compute_follow_sets(trans,readsets,included)
+
+ # Add all of the lookaheads
+ add_lookaheads(lookd,followsets)
+
+# -----------------------------------------------------------------------------
+# lr_parse_table()
+#
+# This function constructs the parse tables for SLR or LALR
+# -----------------------------------------------------------------------------
+def lr_parse_table(method):
+ global _lr_method
+ goto = _lr_goto # Goto array
+ action = _lr_action # Action array
+ actionp = { } # Action production array (temporary)
+
+ _lr_method = method
+
+ n_srconflict = 0
+ n_rrconflict = 0
+
+ if yaccdebug:
+ sys.stderr.write("yacc: Generating %s parsing table...\n" % method)
+ _vf.write("\n\nParsing method: %s\n\n" % method)
+
+ # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
+ # This determines the number of states
+
+ C = lr0_items()
+
+ if method == 'LALR':
+ add_lalr_lookaheads(C)
+
+ # Build the parser table, state by state
+ st = 0
+ for I in C:
+ # Loop over each production in I
+ actlist = [ ] # List of actions
+
+ if yaccdebug:
+ _vf.write("\nstate %d\n\n" % st)
+ for p in I:
+ _vf.write(" (%d) %s\n" % (p.number, str(p)))
+ _vf.write("\n")
+
+ for p in I:
+ try:
+ if p.prod[-1] == ".":
+ if p.name == "S'":
+ # Start symbol. Accept!
+ action[st,"$end"] = 0
+ actionp[st,"$end"] = p
+ else:
+ # We are at the end of a production. Reduce!
+ if method == 'LALR':
+ laheads = p.lookaheads[st]
+ else:
+ laheads = Follow[p.name]
+ for a in laheads:
+ actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
+ r = action.get((st,a),None)
+ if r is not None:
+ # Whoa. Have a shift/reduce or reduce/reduce conflict
+ if r > 0:
+ # Need to decide on shift or reduce here
+ # By default we favor shifting. Need to add
+ # some precedence rules here.
+ sprec,slevel = Productions[actionp[st,a].number].prec
+ rprec,rlevel = Precedence.get(a,('right',0))
+ if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
+ # We really need to reduce here.
+ action[st,a] = -p.number
+ actionp[st,a] = p
+ if not slevel and not rlevel:
+ _vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st)
+ _vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a)
+ n_srconflict += 1
+ elif (slevel == rlevel) and (rprec == 'nonassoc'):
+ action[st,a] = None
+ else:
+ # Hmmm. Guess we'll keep the shift
+ if not rlevel:
+ _vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st)
+ _vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a)
+ n_srconflict +=1
+ elif r < 0:
+ # Reduce/reduce conflict. In this case, we favor the rule
+ # that was defined first in the grammar file
+ oldp = Productions[-r]
+ pp = Productions[p.number]
+ if oldp.line > pp.line:
+ action[st,a] = -p.number
+ actionp[st,a] = p
+ # sys.stderr.write("Reduce/reduce conflict in state %d\n" % st)
+ n_rrconflict += 1
+ _vfc.write("reduce/reduce conflict in state %d resolved using rule %d (%s).\n" % (st, actionp[st,a].number, actionp[st,a]))
+ _vf.write(" ! reduce/reduce conflict for %s resolved using rule %d (%s).\n" % (a,actionp[st,a].number, actionp[st,a]))
+ else:
+ sys.stderr.write("Unknown conflict in state %d\n" % st)
+ else:
+ action[st,a] = -p.number
+ actionp[st,a] = p
+ else:
+ i = p.lr_index
+ a = p.prod[i+1] # Get symbol right after the "."
+ if Terminals.has_key(a):
+ g = lr0_goto(I,a)
+ j = _lr0_cidhash.get(id(g),-1)
+ if j >= 0:
+ # We are in a shift state
+ actlist.append((a,p,"shift and go to state %d" % j))
+ r = action.get((st,a),None)
+ if r is not None:
+ # Whoa have a shift/reduce or shift/shift conflict
+ if r > 0:
+ if r != j:
+ sys.stderr.write("Shift/shift conflict in state %d\n" % st)
+ elif r < 0:
+ # Do a precedence check.
+ # - if precedence of reduce rule is higher, we reduce.
+ # - if precedence of reduce is same and left assoc, we reduce.
+ # - otherwise we shift
+ rprec,rlevel = Productions[actionp[st,a].number].prec
+ sprec,slevel = Precedence.get(a,('right',0))
+ if (slevel > rlevel) or ((slevel == rlevel) and (rprec != 'left')):
+ # We decide to shift here... highest precedence to shift
+ action[st,a] = j
+ actionp[st,a] = p
+ if not rlevel:
+ n_srconflict += 1
+ _vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st)
+ _vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a)
+ elif (slevel == rlevel) and (rprec == 'nonassoc'):
+ action[st,a] = None
+ else:
+ # Hmmm. Guess we'll keep the reduce
+ if not slevel and not rlevel:
+ n_srconflict +=1
+ _vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st)
+ _vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a)
+
+ else:
+ sys.stderr.write("Unknown conflict in state %d\n" % st)
+ else:
+ action[st,a] = j
+ actionp[st,a] = p
+
+ except StandardError,e:
+ raise YaccError, "Hosed in lr_parse_table", e
+
+ # Print the actions associated with each terminal
+ if yaccdebug:
+ _actprint = { }
+ for a,p,m in actlist:
+ if action.has_key((st,a)):
+ if p is actionp[st,a]:
+ _vf.write(" %-15s %s\n" % (a,m))
+ _actprint[(a,m)] = 1
+ _vf.write("\n")
+ for a,p,m in actlist:
+ if action.has_key((st,a)):
+ if p is not actionp[st,a]:
+ if not _actprint.has_key((a,m)):
+ _vf.write(" ! %-15s [ %s ]\n" % (a,m))
+ _actprint[(a,m)] = 1
+
+ # Construct the goto table for this state
+ if yaccdebug:
+ _vf.write("\n")
+ nkeys = { }
+ for ii in I:
+ for s in ii.usyms:
+ if Nonterminals.has_key(s):
+ nkeys[s] = None
+ for n in nkeys.keys():
+ g = lr0_goto(I,n)
+ j = _lr0_cidhash.get(id(g),-1)
+ if j >= 0:
+ goto[st,n] = j
+ if yaccdebug:
+ _vf.write(" %-30s shift and go to state %d\n" % (n,j))
+
+ st += 1
+
+ if yaccdebug:
+ if n_srconflict == 1:
+ sys.stderr.write("yacc: %d shift/reduce conflict\n" % n_srconflict)
+ if n_srconflict > 1:
+ sys.stderr.write("yacc: %d shift/reduce conflicts\n" % n_srconflict)
+ if n_rrconflict == 1:
+ sys.stderr.write("yacc: %d reduce/reduce conflict\n" % n_rrconflict)
+ if n_rrconflict > 1:
+ sys.stderr.write("yacc: %d reduce/reduce conflicts\n" % n_rrconflict)
+
+# -----------------------------------------------------------------------------
+# ==== LR Utility functions ====
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# _lr_write_tables()
+#
+# This function writes the LR parsing tables to a file
+# -----------------------------------------------------------------------------
+
+def lr_write_tables(modulename=tab_module,outputdir=''):
+ filename = os.path.join(outputdir,modulename) + ".py"
+ try:
+ f = open(filename,"w")
+
+ f.write("""
+# %s
+# This file is automatically generated. Do not edit.
+
+_lr_method = %s
+
+_lr_signature = %s
+""" % (filename, repr(_lr_method), repr(Signature.digest())))
+
+ # Change smaller to 0 to go back to original tables
+ smaller = 1
+
+ # Factor out names to try and make smaller
+ if smaller:
+ items = { }
+
+ for k,v in _lr_action.items():
+ i = items.get(k[1])
+ if not i:
+ i = ([],[])
+ items[k[1]] = i
+ i[0].append(k[0])
+ i[1].append(v)
+
+ f.write("\n_lr_action_items = {")
+ for k,v in items.items():
+ f.write("%r:([" % k)
+ for i in v[0]:
+ f.write("%r," % i)
+ f.write("],[")
+ for i in v[1]:
+ f.write("%r," % i)
+
+ f.write("]),")
+ f.write("}\n")
+
+ f.write("""
+_lr_action = { }
+for _k, _v in _lr_action_items.items():
+ for _x,_y in zip(_v[0],_v[1]):
+ _lr_action[(_x,_k)] = _y
+del _lr_action_items
+""")
+
+ else:
+ f.write("\n_lr_action = { ");
+ for k,v in _lr_action.items():
+ f.write("(%r,%r):%r," % (k[0],k[1],v))
+ f.write("}\n");
+
+ if smaller:
+ # Factor out names to try and make smaller
+ items = { }
+
+ for k,v in _lr_goto.items():
+ i = items.get(k[1])
+ if not i:
+ i = ([],[])
+ items[k[1]] = i
+ i[0].append(k[0])
+ i[1].append(v)
+
+ f.write("\n_lr_goto_items = {")
+ for k,v in items.items():
+ f.write("%r:([" % k)
+ for i in v[0]:
+ f.write("%r," % i)
+ f.write("],[")
+ for i in v[1]:
+ f.write("%r," % i)
+
+ f.write("]),")
+ f.write("}\n")
+
+ f.write("""
+_lr_goto = { }
+for _k, _v in _lr_goto_items.items():
+ for _x,_y in zip(_v[0],_v[1]):
+ _lr_goto[(_x,_k)] = _y
+del _lr_goto_items
+""")
+ else:
+ f.write("\n_lr_goto = { ");
+ for k,v in _lr_goto.items():
+ f.write("(%r,%r):%r," % (k[0],k[1],v))
+ f.write("}\n");
+
+ # Write production table
+ f.write("_lr_productions = [\n")
+ for p in Productions:
+ if p:
+ if (p.func):
+ f.write(" (%r,%d,%r,%r,%d),\n" % (p.name, p.len, p.func.__name__,p.file,p.line))
+ else:
+ f.write(" (%r,%d,None,None,None),\n" % (p.name, p.len))
+ else:
+ f.write(" None,\n")
+ f.write("]\n")
+
+ f.close()
+
+ except IOError,e:
+ print "Unable to create '%s'" % filename
+ print e
+ return
+
+def lr_read_tables(module=tab_module,optimize=0):
+ global _lr_action, _lr_goto, _lr_productions, _lr_method
+ try:
+ exec "import %s as parsetab" % module
+
+ if (optimize) or (Signature.digest() == parsetab._lr_signature):
+ _lr_action = parsetab._lr_action
+ _lr_goto = parsetab._lr_goto
+ _lr_productions = parsetab._lr_productions
+ _lr_method = parsetab._lr_method
+ return 1
+ else:
+ return 0
+
+ except (ImportError,AttributeError):
+ return 0
+
+
+# Available instance types. This is used when parsers are defined by a class.
+# it's a little funky because I want to preserve backwards compatibility
+# with Python 2.0 where types.ObjectType is undefined.
+
+try:
+ _INSTANCETYPE = (types.InstanceType, types.ObjectType)
+except AttributeError:
+ _INSTANCETYPE = types.InstanceType
+
+# -----------------------------------------------------------------------------
+# yacc(module)
+#
+# Build the parser module
+# -----------------------------------------------------------------------------
+
+def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module, start=None, check_recursion=1, optimize=0,write_tables=1,debugfile=debug_file,outputdir=''):
+ global yaccdebug
+ yaccdebug = debug
+
+ initialize_vars()
+ files = { }
+ error = 0
+
+
+ # Add parsing method to signature
+ Signature.update(method)
+
+ # If a "module" parameter was supplied, extract its dictionary.
+ # Note: a module may in fact be an instance as well.
+
+ if module:
+ # User supplied a module object.
+ if isinstance(module, types.ModuleType):
+ ldict = module.__dict__
+ elif isinstance(module, _INSTANCETYPE):
+ _items = [(k,getattr(module,k)) for k in dir(module)]
+ ldict = { }
+ for i in _items:
+ ldict[i[0]] = i[1]
+ else:
+ raise ValueError,"Expected a module"
+
+ else:
+ # No module given. We might be able to get information from the caller.
+ # Throw an exception and unwind the traceback to get the globals
+
+ try:
+ raise RuntimeError
+ except RuntimeError:
+ e,b,t = sys.exc_info()
+ f = t.tb_frame
+ f = f.f_back # Walk out to our calling function
+ ldict = f.f_globals # Grab its globals dictionary
+
+ # Add starting symbol to signature
+ if not start:
+ start = ldict.get("start",None)
+ if start:
+ Signature.update(start)
+
+ # If running in optimized mode. We're going to
+
+ if (optimize and lr_read_tables(tabmodule,1)):
+ # Read parse table
+ del Productions[:]
+ for p in _lr_productions:
+ if not p:
+ Productions.append(None)
+ else:
+ m = MiniProduction()
+ m.name = p[0]
+ m.len = p[1]
+ m.file = p[3]
+ m.line = p[4]
+ if p[2]:
+ m.func = ldict[p[2]]
+ Productions.append(m)
+
+ else:
+ # Get the tokens map
+ if (module and isinstance(module,_INSTANCETYPE)):
+ tokens = getattr(module,"tokens",None)
+ else:
+ tokens = ldict.get("tokens",None)
+
+ if not tokens:
+ raise YaccError,"module does not define a list 'tokens'"
+ if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
+ raise YaccError,"tokens must be a list or tuple."
+
+ # Check to see if a requires dictionary is defined.
+ requires = ldict.get("require",None)
+ if requires:
+ if not (isinstance(requires,types.DictType)):
+ raise YaccError,"require must be a dictionary."
+
+ for r,v in requires.items():
+ try:
+ if not (isinstance(v,types.ListType)):
+ raise TypeError
+ v1 = [x.split(".") for x in v]
+ Requires[r] = v1
+ except StandardError:
+ print "Invalid specification for rule '%s' in require. Expected a list of strings" % r
+
+
+ # Build the dictionary of terminals. We a record a 0 in the
+ # dictionary to track whether or not a terminal is actually
+ # used in the grammar
+
+ if 'error' in tokens:
+ print "yacc: Illegal token 'error'. Is a reserved word."
+ raise YaccError,"Illegal token name"
+
+ for n in tokens:
+ if Terminals.has_key(n):
+ print "yacc: Warning. Token '%s' multiply defined." % n
+ Terminals[n] = [ ]
+
+ Terminals['error'] = [ ]
+
+ # Get the precedence map (if any)
+ prec = ldict.get("precedence",None)
+ if prec:
+ if not (isinstance(prec,types.ListType) or isinstance(prec,types.TupleType)):
+ raise YaccError,"precedence must be a list or tuple."
+ add_precedence(prec)
+ Signature.update(repr(prec))
+
+ for n in tokens:
+ if not Precedence.has_key(n):
+ Precedence[n] = ('right',0) # Default, right associative, 0 precedence
+
+ # Look for error handler
+ ef = ldict.get('p_error',None)
+ if ef:
+ if isinstance(ef,types.FunctionType):
+ ismethod = 0
+ elif isinstance(ef, types.MethodType):
+ ismethod = 1
+ else:
+ raise YaccError,"'p_error' defined, but is not a function or method."
+ eline = ef.func_code.co_firstlineno
+ efile = ef.func_code.co_filename
+ files[efile] = None
+
+ if (ef.func_code.co_argcount != 1+ismethod):
+ raise YaccError,"%s:%d: p_error() requires 1 argument." % (efile,eline)
+ global Errorfunc
+ Errorfunc = ef
+ else:
+ print "yacc: Warning. no p_error() function is defined."
+
+ # Get the list of built-in functions with p_ prefix
+ symbols = [ldict[f] for f in ldict.keys()
+ if (type(ldict[f]) in (types.FunctionType, types.MethodType) and ldict[f].__name__[:2] == 'p_'
+ and ldict[f].__name__ != 'p_error')]
+
+ # Check for non-empty symbols
+ if len(symbols) == 0:
+ raise YaccError,"no rules of the form p_rulename are defined."
+
+ # Sort the symbols by line number
+ symbols.sort(lambda x,y: cmp(x.func_code.co_firstlineno,y.func_code.co_firstlineno))
+
+ # Add all of the symbols to the grammar
+ for f in symbols:
+ if (add_function(f)) < 0:
+ error += 1
+ else:
+ files[f.func_code.co_filename] = None
+
+ # Make a signature of the docstrings
+ for f in symbols:
+ if f.__doc__:
+ Signature.update(f.__doc__)
+
+ lr_init_vars()
+
+ if error:
+ raise YaccError,"Unable to construct parser."
+
+ if not lr_read_tables(tabmodule):
+
+ # Validate files
+ for filename in files.keys():
+ if not validate_file(filename):
+ error = 1
+
+ # Validate dictionary
+ validate_dict(ldict)
+
+ if start and not Prodnames.has_key(start):
+ raise YaccError,"Bad starting symbol '%s'" % start
+
+ augment_grammar(start)
+ error = verify_productions(cycle_check=check_recursion)
+ otherfunc = [ldict[f] for f in ldict.keys()
+ if (type(f) in (types.FunctionType,types.MethodType) and ldict[f].__name__[:2] != 'p_')]
+
+ if error:
+ raise YaccError,"Unable to construct parser."
+
+ build_lritems()
+ compute_first1()
+ compute_follow(start)
+
+ if method in ['SLR','LALR']:
+ lr_parse_table(method)
+ else:
+ raise YaccError, "Unknown parsing method '%s'" % method
+
+ if write_tables:
+ lr_write_tables(tabmodule,outputdir)
+
+ if yaccdebug:
+ try:
+ f = open(os.path.join(outputdir,debugfile),"w")
+ f.write(_vfc.getvalue())
+ f.write("\n\n")
+ f.write(_vf.getvalue())
+ f.close()
+ except IOError,e:
+ print "yacc: can't create '%s'" % debugfile,e
+
+ # Made it here. Create a parser object and set up its internal state.
+ # Set global parse() method to bound method of parser object.
+
+ p = Parser("xyzzy")
+ p.productions = Productions
+ p.errorfunc = Errorfunc
+ p.action = _lr_action
+ p.goto = _lr_goto
+ p.method = _lr_method
+ p.require = Requires
+
+ global parse
+ parse = p.parse
+
+ global parser
+ parser = p
+
+ # Clean up all of the globals we created
+ if (not optimize):
+ yacc_cleanup()
+ return p
+
+# yacc_cleanup function. Delete all of the global variables
+# used during table construction
+
+def yacc_cleanup():
+ global _lr_action, _lr_goto, _lr_method, _lr_goto_cache
+ del _lr_action, _lr_goto, _lr_method, _lr_goto_cache
+
+ global Productions, Prodnames, Prodmap, Terminals
+ global Nonterminals, First, Follow, Precedence, LRitems
+ global Errorfunc, Signature, Requires
+
+ del Productions, Prodnames, Prodmap, Terminals
+ del Nonterminals, First, Follow, Precedence, LRitems
+ del Errorfunc, Signature, Requires
+
+ global _vf, _vfc
+ del _vf, _vfc
+
+
+# Stub that raises an error if parsing is attempted without first calling yacc()
+def parse(*args,**kwargs):
+ raise YaccError, "yacc: No parser built with yacc()"
+
diff --git a/lib/python2.7/site-packages/setools/__init__.py b/lib/python2.7/site-packages/setools/__init__.py
new file mode 100644
index 0000000..4d03553
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/__init__.py
@@ -0,0 +1,68 @@
+"""The SETools SELinux policy analysis library."""
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+#try:
+# import pkg_resources
+# # pylint: disable=no-member
+# __version__ = pkg_resources.get_distribution("setools").version
+#except ImportError: # pragma: no cover
+# __version__ = "unknown"
+__version__ = "3.3.8"
+
+# Python classes for policy representation
+from . import policyrep
+from .policyrep import SELinuxPolicy
+
+# Exceptions
+from . import exception
+
+# Component Queries
+from .boolquery import BoolQuery
+from .categoryquery import CategoryQuery
+from .commonquery import CommonQuery
+from .objclassquery import ObjClassQuery
+from .polcapquery import PolCapQuery
+from .rolequery import RoleQuery
+from .sensitivityquery import SensitivityQuery
+from .typequery import TypeQuery
+from .typeattrquery import TypeAttributeQuery
+from .userquery import UserQuery
+
+# Rule Queries
+from .mlsrulequery import MLSRuleQuery
+from .rbacrulequery import RBACRuleQuery
+from .terulequery import TERuleQuery
+
+# Constraint queries
+from .constraintquery import ConstraintQuery
+
+# In-policy Context Queries
+from .fsusequery import FSUseQuery
+from .genfsconquery import GenfsconQuery
+from .initsidquery import InitialSIDQuery
+from .netifconquery import NetifconQuery
+from .nodeconquery import NodeconQuery
+from .portconquery import PortconQuery
+
+# Information Flow Analysis
+from .infoflow import InfoFlowAnalysis
+from .permmap import PermissionMap
+
+# Domain Transition Analysis
+from .dta import DomainTransitionAnalysis
diff --git a/lib/python2.7/site-packages/setools/boolquery.py b/lib/python2.7/site-packages/setools/boolquery.py
new file mode 100644
index 0000000..b70b7d5
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/boolquery.py
@@ -0,0 +1,66 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+
+from . import compquery
+from .descriptors import CriteriaDescriptor
+
+
+class BoolQuery(compquery.ComponentQuery):
+
+ """Query SELinux policy Booleans.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The Boolean name to match.
+ name_regex If true, regular expression matching
+ will be used on the Boolean name.
+ default The default state to match. If this
+ is None, the default state not be matched.
+ """
+
+ _default = None
+
+ @property
+ def default(self):
+ return self._default
+
+ @default.setter
+ def default(self, value):
+ if value is None:
+ self._default = None
+ else:
+ self._default = bool(value)
+
+ def results(self):
+ """Generator which yields all Booleans matching the criteria."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("Default: {0.default}".format(self))
+
+ for boolean in self.policy.bools():
+ if not self._match_name(boolean):
+ continue
+
+ if self.default is not None and boolean.state != self.default:
+ continue
+
+ yield boolean
diff --git a/lib/python2.7/site-packages/setools/categoryquery.py b/lib/python2.7/site-packages/setools/categoryquery.py
new file mode 100644
index 0000000..d4d7c4c
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/categoryquery.py
@@ -0,0 +1,55 @@
+# Copyright 2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+
+from . import compquery
+from . import mixins
+
+
+class CategoryQuery(mixins.MatchAlias, compquery.ComponentQuery):
+
+ """
+ Query MLS Categories
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The name of the category to match.
+ name_regex If true, regular expression matching will
+ be used for matching the name.
+ alias The alias name to match.
+ alias_regex If true, regular expression matching
+ will be used on the alias names.
+ """
+
+ def results(self):
+ """Generator which yields all matching categories."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("Alias: {0.alias}, regex: {0.alias_regex}".format(self))
+
+ for cat in self.policy.categories():
+ if not self._match_name(cat):
+ continue
+
+ if not self._match_alias(cat):
+ continue
+
+ yield cat
diff --git a/lib/python2.7/site-packages/setools/commonquery.py b/lib/python2.7/site-packages/setools/commonquery.py
new file mode 100644
index 0000000..e105ccb
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/commonquery.py
@@ -0,0 +1,60 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import compquery, mixins
+
+
+class CommonQuery(mixins.MatchPermission, compquery.ComponentQuery):
+
+ """
+ Query common permission sets.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The name of the common to match.
+ name_regex If true, regular expression matching will
+ be used for matching the name.
+ perms The permissions to match.
+ perms_equal If true, only commons with permission sets
+ that are equal to the criteria will
+ match. Otherwise, any intersection
+ will match.
+ perms_regex If true, regular expression matching will be used
+ on the permission names instead of set logic.
+ """
+
+ def results(self):
+ """Generator which yields all matching commons."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("Perms: {0.perms!r}, regex: {0.perms_regex}, eq: {0.perms_equal}".
+ format(self))
+
+ for com in self.policy.commons():
+ if not self._match_name(com):
+ continue
+
+ if not self._match_perms(com):
+ continue
+
+ yield com
diff --git a/lib/python2.7/site-packages/setools/compquery.py b/lib/python2.7/site-packages/setools/compquery.py
new file mode 100644
index 0000000..3d8851a
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/compquery.py
@@ -0,0 +1,39 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+# pylint: disable=no-member,attribute-defined-outside-init,abstract-method
+import re
+
+from . import query
+from .descriptors import CriteriaDescriptor
+
+
+class ComponentQuery(query.PolicyQuery):
+
+ """Base class for SETools component queries."""
+
+ name = CriteriaDescriptor("name_regex")
+ name_regex = False
+
+ def _match_name(self, obj):
+ """Match the object to the name criteria."""
+ if not self.name:
+ # if there is no criteria, everything matches.
+ return True
+
+ return self._match_regex(obj, self.name, self.name_regex)
diff --git a/lib/python2.7/site-packages/setools/constraintquery.py b/lib/python2.7/site-packages/setools/constraintquery.py
new file mode 100644
index 0000000..82a6fc2
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/constraintquery.py
@@ -0,0 +1,142 @@
+# Copyright 2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import mixins, query
+from .descriptors import CriteriaDescriptor, CriteriaSetDescriptor, RuletypeDescriptor
+from .policyrep.exception import ConstraintUseError
+
+
+class ConstraintQuery(mixins.MatchObjClass, mixins.MatchPermission, query.PolicyQuery):
+
+ """
+ Query constraint rules, (mls)constrain/(mls)validatetrans.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ ruletype The list of rule type(s) to match.
+ tclass The object class(es) to match.
+ tclass_regex If true, use a regular expression for
+ matching the rule's object class.
+ perms The permission(s) to match.
+ perms_equal If true, the permission set of the rule
+ must exactly match the permissions
+ criteria. If false, any set intersection
+ will match.
+ perms_regex If true, regular expression matching will be used
+ on the permission names instead of set logic.
+ role The name of the role to match in the
+ constraint expression.
+ role_indirect If true, members of an attribute will be
+ matched rather than the attribute itself.
+ role_regex If true, regular expression matching will
+ be used on the role.
+ type_ The name of the type/attribute to match in the
+ constraint expression.
+ type_indirect If true, members of an attribute will be
+ matched rather than the attribute itself.
+ type_regex If true, regular expression matching will
+ be used on the type/attribute.
+ user The name of the user to match in the
+ constraint expression.
+ user_regex If true, regular expression matching will
+ be used on the user.
+ """
+
+ ruletype = RuletypeDescriptor("validate_constraint_ruletype")
+ user = CriteriaDescriptor("user_regex", "lookup_user")
+ user_regex = False
+ role = CriteriaDescriptor("role_regex", "lookup_role")
+ role_regex = False
+ role_indirect = True
+ type_ = CriteriaDescriptor("type_regex", "lookup_type_or_attr")
+ type_regex = False
+ type_indirect = True
+
+ def _match_expr(self, expr, criteria, indirect, regex):
+ """
+ Match roles/types/users in a constraint expression,
+ optionally by expanding the contents of attributes.
+
+ Parameters:
+ expr The expression to match.
+ criteria The criteria to match.
+ indirect If attributes in the expression should be expanded.
+ regex If regular expression matching should be used.
+ """
+
+ if indirect:
+ obj = set()
+ for item in expr:
+ obj.update(item.expand())
+ else:
+ obj = expr
+
+ return self._match_in_set(obj, criteria, regex)
+
+ def results(self):
+ """Generator which yields all matching constraints rules."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Ruletypes: {0.ruletype}".format(self))
+ self.log.debug("Class: {0.tclass!r}, regex: {0.tclass_regex}".format(self))
+ self.log.debug("Perms: {0.perms!r}, regex: {0.perms_regex}, eq: {0.perms_equal}".
+ format(self))
+ self.log.debug("User: {0.user!r}, regex: {0.user_regex}".format(self))
+ self.log.debug("Role: {0.role!r}, regex: {0.role_regex}".format(self))
+ self.log.debug("Type: {0.type_!r}, regex: {0.type_regex}".format(self))
+
+ for c in self.policy.constraints():
+ if self.ruletype:
+ if c.ruletype not in self.ruletype:
+ continue
+
+ if not self._match_object_class(c):
+ continue
+
+ try:
+ if not self._match_perms(c):
+ continue
+ except ConstraintUseError:
+ continue
+
+ if self.role and not self._match_expr(
+ c.roles,
+ self.role,
+ self.role_indirect,
+ self.role_regex):
+ continue
+
+ if self.type_ and not self._match_expr(
+ c.types,
+ self.type_,
+ self.type_indirect,
+ self.type_regex):
+ continue
+
+ if self.user and not self._match_expr(
+ c.users,
+ self.user,
+ False,
+ self.user_regex):
+ continue
+
+ yield c
diff --git a/lib/python2.7/site-packages/setools/contextquery.py b/lib/python2.7/site-packages/setools/contextquery.py
new file mode 100644
index 0000000..5ce1632
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/contextquery.py
@@ -0,0 +1,98 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+# pylint: disable=attribute-defined-outside-init,no-member
+import re
+
+from . import query
+from .descriptors import CriteriaDescriptor
+
+
+class ContextQuery(query.PolicyQuery):
+
+ """
+ Base class for SETools in-policy labeling/context queries.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ context The object to match.
+ user The user to match in the context.
+ user_regex If true, regular expression matching
+ will be used on the user.
+ role The role to match in the context.
+ role_regex If true, regular expression matching
+ will be used on the role.
+ type_ The type to match in the context.
+ type_regex If true, regular expression matching
+ will be used on the type.
+ range_ The range to match in the context.
+ range_subset If true, the criteria will match if it
+ is a subset of the context's range.
+ range_overlap If true, the criteria will match if it
+ overlaps any of the context's range.
+ range_superset If true, the criteria will match if it
+ is a superset of the context's range.
+ range_proper If true, use proper superset/subset
+ on range matching operations.
+ No effect if not using set operations.
+ """
+
+ user = CriteriaDescriptor("user_regex", "lookup_user")
+ user_regex = False
+ role = CriteriaDescriptor("role_regex", "lookup_role")
+ role_regex = False
+ type_ = CriteriaDescriptor("type_regex", "lookup_type")
+ type_regex = False
+ range_ = CriteriaDescriptor(lookup_function="lookup_range")
+ range_overlap = False
+ range_subset = False
+ range_superset = False
+ range_proper = False
+
+ def _match_context(self, context):
+
+ if self.user and not query.PolicyQuery._match_regex(
+ context.user,
+ self.user,
+ self.user_regex):
+ return False
+
+ if self.role and not query.PolicyQuery._match_regex(
+ context.role,
+ self.role,
+ self.role_regex):
+ return False
+
+ if self.type_ and not query.PolicyQuery._match_regex(
+ context.type_,
+ self.type_,
+ self.type_regex):
+ return False
+
+ if self.range_ and not query.PolicyQuery._match_range(
+ context.range_,
+ self.range_,
+ self.range_subset,
+ self.range_overlap,
+ self.range_superset,
+ self.range_proper):
+ return False
+
+ return True
diff --git a/lib/python2.7/site-packages/setools/descriptors.py b/lib/python2.7/site-packages/setools/descriptors.py
new file mode 100644
index 0000000..eab9210
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/descriptors.py
@@ -0,0 +1,230 @@
+# Copyright 2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+"""
+SETools descriptors.
+
+These classes override how a class's attributes are get/set/deleted.
+This is how the @property decorator works.
+
+See https://docs.python.org/3/howto/descriptor.html
+for more details.
+"""
+
+import re
+from collections import defaultdict
+from weakref import WeakKeyDictionary
+
+#
+# Query criteria descriptors
+#
+# Implementation note: if the name_regex attribute value
+# is changed the criteria must be reset.
+#
+
+
+class CriteriaDescriptor(object):
+
+ """
+ Single item criteria descriptor.
+
+ Parameters:
+ name_regex The name of instance's regex setting attribute;
+ used as name_regex below. If unset,
+ regular expressions will never be used.
+ lookup_function The name of the SELinuxPolicy lookup function,
+ e.g. lookup_type or lookup_boolean.
+ default_value The default value of the criteria. The default
+ is None.
+
+ Read-only instance attribute use (obj parameter):
+ policy The instance of SELinuxPolicy
+ name_regex This attribute is read to determine if
+ the criteria should be looked up or
+ compiled into a regex. If the attribute
+ does not exist, False is assumed.
+ """
+
+ def __init__(self, name_regex=None, lookup_function=None, default_value=None):
+ assert name_regex or lookup_function, "A simple attribute should be used if there is " \
+ "no regex nor lookup function."
+ self.regex = name_regex
+ self.default_value = default_value
+ self.lookup_function = lookup_function
+
+ # use weak references so instances can be
+ # garbage collected, rather than unnecessarily
+ # kept around due to this descriptor.
+ self.instances = WeakKeyDictionary()
+
+ def __get__(self, obj, objtype=None):
+ if obj is None:
+ return self
+
+ return self.instances.setdefault(obj, self.default_value)
+
+ def __set__(self, obj, value):
+ if not value:
+ self.instances[obj] = None
+ elif self.regex and getattr(obj, self.regex, False):
+ self.instances[obj] = re.compile(value)
+ elif self.lookup_function:
+ lookup = getattr(obj.policy, self.lookup_function)
+ self.instances[obj] = lookup(value)
+ else:
+ self.instances[obj] = value
+
+
+class CriteriaSetDescriptor(CriteriaDescriptor):
+
+ """Descriptor for a set of criteria."""
+
+ def __set__(self, obj, value):
+ if not value:
+ self.instances[obj] = None
+ elif self.regex and getattr(obj, self.regex, False):
+ self.instances[obj] = re.compile(value)
+ elif self.lookup_function:
+ lookup = getattr(obj.policy, self.lookup_function)
+ self.instances[obj] = set(lookup(v) for v in value)
+ else:
+ self.instances[obj] = set(value)
+
+
+class RuletypeDescriptor(object):
+
+ """
+ Descriptor for a list of rule types.
+
+ Parameters:
+ validator The name of the SELinuxPolicy ruletype
+ validator function, e.g. validate_te_ruletype
+ default_value The default value of the criteria. The default
+ is None.
+
+ Read-only instance attribute use (obj parameter):
+ policy The instance of SELinuxPolicy
+ """
+
+ def __init__(self, validator):
+ self.validator = validator
+
+ # use weak references so instances can be
+ # garbage collected, rather than unnecessarily
+ # kept around due to this descriptor.
+ self.instances = WeakKeyDictionary()
+
+ def __get__(self, obj, objtype=None):
+ if obj is None:
+ return self
+
+ return self.instances.setdefault(obj, None)
+
+ def __set__(self, obj, value):
+ if value:
+ validate = getattr(obj.policy, self.validator)
+ validate(value)
+ self.instances[obj] = value
+ else:
+ self.instances[obj] = None
+
+
+#
+# NetworkX Graph Descriptors
+#
+# These descriptors are used to simplify all
+# of the dictionary use in the NetworkX graph.
+#
+
+
+class NetworkXGraphEdgeDescriptor(object):
+
+ """
+ Descriptor base class for NetworkX graph edge attributes.
+
+ Parameter:
+ name The edge property name
+
+ Instance class attribute use (obj parameter):
+ G The NetworkX graph
+ source The edge's source node
+ target The edge's target node
+ """
+
+ def __init__(self, propname):
+ self.name = propname
+
+ def __get__(self, obj, objtype=None):
+ if obj is None:
+ return self
+
+ return obj.G[obj.source][obj.target][self.name]
+
+ def __set__(self, obj, value):
+ raise NotImplementedError
+
+ def __delete__(self, obj):
+ raise NotImplementedError
+
+
+class EdgeAttrDict(NetworkXGraphEdgeDescriptor):
+
+ """A descriptor for edge attributes that are dictionaries."""
+
+ def __set__(self, obj, value):
+ # None is a special value to initialize the attribute
+ if value is None:
+ obj.G[obj.source][obj.target][self.name] = defaultdict(list)
+ else:
+ raise ValueError("{0} dictionaries should not be assigned directly".format(self.name))
+
+ def __delete__(self, obj):
+ obj.G[obj.source][obj.target][self.name].clear()
+
+
+class EdgeAttrIntMax(NetworkXGraphEdgeDescriptor):
+
+ """
+ A descriptor for edge attributes that are non-negative integers that always
+ keep the max assigned value until re-initialized.
+ """
+
+ def __set__(self, obj, value):
+ # None is a special value to initialize
+ if value is None:
+ obj.G[obj.source][obj.target][self.name] = 0
+ else:
+ current_value = obj.G[obj.source][obj.target][self.name]
+ obj.G[obj.source][obj.target][self.name] = max(current_value, value)
+
+
+class EdgeAttrList(NetworkXGraphEdgeDescriptor):
+
+ """A descriptor for edge attributes that are lists."""
+
+ def __set__(self, obj, value):
+ # None is a special value to initialize
+ if value is None:
+ obj.G[obj.source][obj.target][self.name] = []
+ else:
+ raise ValueError("{0} lists should not be assigned directly".format(self.name))
+
+ def __delete__(self, obj):
+ # in Python3 a .clear() function was added for lists
+ # keep this implementation for Python 2 compat
+ del obj.G[obj.source][obj.target][self.name][:]
diff --git a/lib/python2.7/site-packages/setools/dta.py b/lib/python2.7/site-packages/setools/dta.py
new file mode 100644
index 0000000..271efc4
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/dta.py
@@ -0,0 +1,603 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import itertools
+import logging
+from collections import defaultdict, namedtuple
+
+import networkx as nx
+from networkx.exception import NetworkXError, NetworkXNoPath
+
+from .descriptors import EdgeAttrDict, EdgeAttrList
+
+__all__ = ['DomainTransitionAnalysis']
+
+# Return values for the analysis
+# are in the following tuple formats:
+step_output = namedtuple("step", ["source",
+ "target",
+ "transition",
+ "entrypoints",
+ "setexec",
+ "dyntransition",
+ "setcurrent"])
+
+entrypoint_output = namedtuple("entrypoints", ["name",
+ "entrypoint",
+ "execute",
+ "type_transition"])
+
+
+class DomainTransitionAnalysis(object):
+
+ """Domain transition analysis."""
+
+ def __init__(self, policy, reverse=False, exclude=None):
+ """
+ Parameter:
+ policy The policy to analyze.
+ """
+ self.log = logging.getLogger(self.__class__.__name__)
+
+ self.policy = policy
+ self.exclude = exclude
+ self.reverse = reverse
+ self.rebuildgraph = True
+ self.rebuildsubgraph = True
+ self.G = nx.DiGraph()
+ self.subG = None
+
+ @property
+ def reverse(self):
+ return self._reverse
+
+ @reverse.setter
+ def reverse(self, direction):
+ self._reverse = bool(direction)
+ self.rebuildsubgraph = True
+
+ @property
+ def exclude(self):
+ return self._exclude
+
+ @exclude.setter
+ def exclude(self, types):
+ if types:
+ self._exclude = [self.policy.lookup_type(t) for t in types]
+ else:
+ self._exclude = None
+
+ self.rebuildsubgraph = True
+
+ def shortest_path(self, source, target):
+ """
+ Generator which yields one shortest domain transition path
+ between the source and target types (there may be more).
+
+ Parameters:
+ source The source type.
+ target The target type.
+
+ Yield: generator(steps)
+
+ steps A generator that returns the tuple of
+ source, target, and rules for each
+ domain transition.
+ """
+ s = self.policy.lookup_type(source)
+ t = self.policy.lookup_type(target)
+
+ if self.rebuildsubgraph:
+ self._build_subgraph()
+
+ self.log.info("Generating one shortest path from {0} to {1}...".format(s, t))
+
+ try:
+ yield self.__generate_steps(nx.shortest_path(self.subG, s, t))
+ except (NetworkXNoPath, NetworkXError):
+ # NetworkXError: the type is valid but not in graph, e.g. excluded
+ # NetworkXNoPath: no paths or the target type is
+ # not in the graph
+ pass
+
+ def all_paths(self, source, target, maxlen=2):
+ """
+ Generator which yields all domain transition paths between
+ the source and target up to the specified maximum path
+ length.
+
+ Parameters:
+ source The source type.
+ target The target type.
+ maxlen Maximum length of paths.
+
+ Yield: generator(steps)
+
+ steps A generator that returns the tuple of
+ source, target, and rules for each
+ domain transition.
+ """
+ if maxlen < 1:
+ raise ValueError("Maximum path length must be positive.")
+
+ s = self.policy.lookup_type(source)
+ t = self.policy.lookup_type(target)
+
+ if self.rebuildsubgraph:
+ self._build_subgraph()
+
+ self.log.info("Generating all paths from {0} to {1}, max len {2}...".format(s, t, maxlen))
+
+ try:
+ for path in nx.all_simple_paths(self.subG, s, t, maxlen):
+ yield self.__generate_steps(path)
+ except (NetworkXNoPath, NetworkXError):
+ # NetworkXError: the type is valid but not in graph, e.g. excluded
+ # NetworkXNoPath: no paths or the target type is
+ # not in the graph
+ pass
+
+ def all_shortest_paths(self, source, target):
+ """
+ Generator which yields all shortest domain transition paths
+ between the source and target types.
+
+ Parameters:
+ source The source type.
+ target The target type.
+
+ Yield: generator(steps)
+
+ steps A generator that returns the tuple of
+ source, target, and rules for each
+ domain transition.
+ """
+ s = self.policy.lookup_type(source)
+ t = self.policy.lookup_type(target)
+
+ if self.rebuildsubgraph:
+ self._build_subgraph()
+
+ self.log.info("Generating all shortest paths from {0} to {1}...".format(s, t))
+
+ try:
+ for path in nx.all_shortest_paths(self.subG, s, t):
+ yield self.__generate_steps(path)
+ except (NetworkXNoPath, NetworkXError, KeyError):
+ # NetworkXError: the type is valid but not in graph, e.g. excluded
+ # NetworkXNoPath: no paths or the target type is
+ # not in the graph
+ # KeyError: work around NetworkX bug
+ # when the source node is not in the graph
+ pass
+
+ def transitions(self, type_):
+ """
+ Generator which yields all domain transitions out of a
+ specified source type.
+
+ Parameters:
+ type_ The starting type.
+
+ Yield: generator(steps)
+
+ steps A generator that returns the tuple of
+ source, target, and rules for each
+ domain transition.
+ """
+ s = self.policy.lookup_type(type_)
+
+ if self.rebuildsubgraph:
+ self._build_subgraph()
+
+ self.log.info("Generating all transitions {1} {0}".
+ format(s, "in to" if self.reverse else "out from"))
+
+ try:
+ for source, target in self.subG.out_edges_iter(s):
+ edge = Edge(self.subG, source, target)
+
+ if self.reverse:
+ real_source, real_target = target, source
+ else:
+ real_source, real_target = source, target
+
+ yield step_output(real_source, real_target,
+ edge.transition,
+ self.__generate_entrypoints(edge),
+ edge.setexec,
+ edge.dyntransition,
+ edge.setcurrent)
+
+ except NetworkXError:
+ # NetworkXError: the type is valid but not in graph, e.g. excluded
+ pass
+
+ def get_stats(self): # pragma: no cover
+ """
+ Get the domain transition graph statistics.
+
+ Return: tuple(nodes, edges)
+
+ nodes The number of nodes (types) in the graph.
+ edges The number of edges (domain transitions) in the graph.
+ """
+ return (self.G.number_of_nodes(), self.G.number_of_edges())
+
+ #
+ # Internal functions follow
+ #
+ @staticmethod
+ def __generate_entrypoints(edge):
+ """
+ Generator which yields the entrypoint, execute, and
+ type_transition rules for each entrypoint.
+
+ Parameter:
+ data The dictionary of entrypoints.
+
+ Yield: tuple(type, entry, exec, trans)
+
+ type The entrypoint type.
+ entry The list of entrypoint rules.
+ exec The list of execute rules.
+ trans The list of type_transition rules.
+ """
+ for e in edge.entrypoint:
+ yield entrypoint_output(e, edge.entrypoint[e], edge.execute[e], edge.type_transition[e])
+
+ def __generate_steps(self, path):
+ """
+ Generator which yields the source, target, and associated rules
+ for each domain transition.
+
+ Parameter:
+ path A list of graph node names representing an information flow path.
+
+ Yield: tuple(source, target, transition, entrypoints,
+ setexec, dyntransition, setcurrent)
+
+ source The source type for this step of the domain transition.
+ target The target type for this step of the domain transition.
+ transition The list of transition rules.
+ entrypoints Generator which yields entrypoint-related rules.
+ setexec The list of setexec rules.
+ dyntranstion The list of dynamic transition rules.
+ setcurrent The list of setcurrent rules.
+ """
+
+ for s in range(1, len(path)):
+ source = path[s - 1]
+ target = path[s]
+ edge = Edge(self.subG, source, target)
+
+ # Yield the actual source and target.
+ # The above perspective is reversed
+ # if the graph has been reversed.
+ if self.reverse:
+ real_source, real_target = target, source
+ else:
+ real_source, real_target = source, target
+
+ yield step_output(real_source, real_target,
+ edge.transition,
+ self.__generate_entrypoints(edge),
+ edge.setexec,
+ edge.dyntransition,
+ edge.setcurrent)
+
+ #
+ # Graph building functions
+ #
+
+ # Domain transition requirements:
+ #
+ # Standard transitions a->b:
+ # allow a b:process transition;
+ # allow a b_exec:file execute;
+ # allow b b_exec:file entrypoint;
+ #
+ # and at least one of:
+ # allow a self:process setexec;
+ # type_transition a b_exec:process b;
+ #
+ # Dynamic transition x->y:
+ # allow x y:process dyntransition;
+ # allow x self:process setcurrent;
+ #
+ # Algorithm summary:
+ # 1. iterate over all rules
+ # 1. skip non allow/type_transition rules
+ # 2. if process transition or dyntransition, create edge,
+ # initialize rule lists, add the (dyn)transition rule
+ # 3. if process setexec or setcurrent, add to appropriate dict
+ # keyed on the subject
+ # 4. if file exec, entrypoint, or type_transition:process,
+ # add to appropriate dict keyed on subject,object.
+ # 2. Iterate over all graph edges:
+ # 1. if there is a transition rule (else add to invalid
+ # transition list):
+ # 1. use set intersection to find matching exec
+ # and entrypoint rules. If none, add to invalid
+ # transition list.
+ # 2. for each valid entrypoint, add rules to the
+ # edge's lists if there is either a
+ # type_transition for it or the source process
+ # has setexec permissions.
+ # 3. If there are neither type_transitions nor
+ # setexec permissions, add to the invalid
+ # transition list
+ # 2. if there is a dyntransition rule (else add to invalid
+ # dyntrans list):
+ # 1. If the source has a setcurrent rule, add it
+ # to the edge's list, else add to invalid
+ # dyntransition list.
+ # 3. Iterate over all graph edges:
+ # 1. if the edge has an invalid trans and dyntrans, delete
+ # the edge.
+ # 2. if the edge has an invalid trans, clear the related
+ # lists on the edge.
+ # 3. if the edge has an invalid dyntrans, clear the related
+ # lists on the edge.
+ #
+ def _build_graph(self):
+ self.G.clear()
+
+ self.log.info("Building graph from {0}...".format(self.policy))
+
+ # hash tables keyed on domain type
+ setexec = defaultdict(list)
+ setcurrent = defaultdict(list)
+
+ # hash tables keyed on (domain, entrypoint file type)
+ # the parameter for defaultdict has to be callable
+ # hence the lambda for the nested defaultdict
+ execute = defaultdict(lambda: defaultdict(list))
+ entrypoint = defaultdict(lambda: defaultdict(list))
+
+ # hash table keyed on (domain, entrypoint, target domain)
+ type_trans = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
+
+ for rule in self.policy.terules():
+ if rule.ruletype == "allow":
+ if rule.tclass not in ["process", "file"]:
+ continue
+
+ perms = rule.perms
+
+ if rule.tclass == "process":
+ if "transition" in perms:
+ for s, t in itertools.product(rule.source.expand(), rule.target.expand()):
+ # only add edges if they actually
+ # transition to a new type
+ if s != t:
+ edge = Edge(self.G, s, t, create=True)
+ edge.transition.append(rule)
+
+ if "dyntransition" in perms:
+ for s, t in itertools.product(rule.source.expand(), rule.target.expand()):
+ # only add edges if they actually
+ # transition to a new type
+ if s != t:
+ e = Edge(self.G, s, t, create=True)
+ e.dyntransition.append(rule)
+
+ if "setexec" in perms:
+ for s in rule.source.expand():
+ setexec[s].append(rule)
+
+ if "setcurrent" in perms:
+ for s in rule.source.expand():
+ setcurrent[s].append(rule)
+
+ else:
+ if "execute" in perms:
+ for s, t in itertools.product(
+ rule.source.expand(),
+ rule.target.expand()):
+ execute[s][t].append(rule)
+
+ if "entrypoint" in perms:
+ for s, t in itertools.product(rule.source.expand(), rule.target.expand()):
+ entrypoint[s][t].append(rule)
+
+ elif rule.ruletype == "type_transition":
+ if rule.tclass != "process":
+ continue
+
+ d = rule.default
+ for s, t in itertools.product(rule.source.expand(), rule.target.expand()):
+ type_trans[s][t][d].append(rule)
+
+ invalid_edge = []
+ clear_transition = []
+ clear_dyntransition = []
+
+ for s, t in self.G.edges_iter():
+ edge = Edge(self.G, s, t)
+ invalid_trans = False
+ invalid_dyntrans = False
+
+ if edge.transition:
+ # get matching domain exec w/entrypoint type
+ entry = set(entrypoint[t].keys())
+ exe = set(execute[s].keys())
+ match = entry.intersection(exe)
+
+ if not match:
+ # there are no valid entrypoints
+ invalid_trans = True
+ else:
+ # TODO try to improve the
+ # efficiency in this loop
+ for m in match:
+ if s in setexec or type_trans[s][m]:
+ # add key for each entrypoint
+ edge.entrypoint[m] += entrypoint[t][m]
+ edge.execute[m] += execute[s][m]
+
+ if type_trans[s][m][t]:
+ edge.type_transition[m] += type_trans[s][m][t]
+
+ if s in setexec:
+ edge.setexec.extend(setexec[s])
+
+ if not edge.setexec and not edge.type_transition:
+ invalid_trans = True
+ else:
+ invalid_trans = True
+
+ if edge.dyntransition:
+ if s in setcurrent:
+ edge.setcurrent.extend(setcurrent[s])
+ else:
+ invalid_dyntrans = True
+ else:
+ invalid_dyntrans = True
+
+ # cannot change the edges while iterating over them,
+ # so keep appropriate lists
+ if invalid_trans and invalid_dyntrans:
+ invalid_edge.append(edge)
+ elif invalid_trans:
+ clear_transition.append(edge)
+ elif invalid_dyntrans:
+ clear_dyntransition.append(edge)
+
+ # Remove invalid transitions
+ self.G.remove_edges_from(invalid_edge)
+ for edge in clear_transition:
+ # if only the regular transition is invalid,
+ # clear the relevant lists
+ del edge.transition
+ del edge.execute
+ del edge.entrypoint
+ del edge.type_transition
+ del edge.setexec
+ for edge in clear_dyntransition:
+ # if only the dynamic transition is invalid,
+ # clear the relevant lists
+ del edge.dyntransition
+ del edge.setcurrent
+
+ self.rebuildgraph = False
+ self.rebuildsubgraph = True
+ self.log.info("Completed building graph.")
+
+ def __remove_excluded_entrypoints(self):
+ invalid_edges = []
+ for source, target in self.subG.edges_iter():
+ edge = Edge(self.subG, source, target)
+ entrypoints = set(edge.entrypoint)
+ entrypoints.intersection_update(self.exclude)
+
+ if not entrypoints:
+ # short circuit if there are no
+ # excluded entrypoint types on
+ # this edge.
+ continue
+
+ for e in entrypoints:
+ # clear the entrypoint data
+ del edge.entrypoint[e]
+ del edge.execute[e]
+
+ try:
+ del edge.type_transition[e]
+ except KeyError: # setexec
+ pass
+
+ # cannot delete the edges while iterating over them
+ if not edge.entrypoint and not edge.dyntransition:
+ invalid_edges.append(edge)
+
+ self.subG.remove_edges_from(invalid_edges)
+
+ def _build_subgraph(self):
+ if self.rebuildgraph:
+ self._build_graph()
+
+ self.log.info("Building subgraph.")
+ self.log.debug("Excluding {0}".format(self.exclude))
+ self.log.debug("Reverse {0}".format(self.reverse))
+
+ # reverse graph for reverse DTA
+ if self.reverse:
+ self.subG = self.G.reverse(copy=True)
+ else:
+ self.subG = self.G.copy()
+
+ if self.exclude:
+ # delete excluded domains from subgraph
+ self.subG.remove_nodes_from(self.exclude)
+
+ # delete excluded entrypoints from subgraph
+ self.__remove_excluded_entrypoints()
+
+ self.rebuildsubgraph = False
+ self.log.info("Completed building subgraph.")
+
+
+class Edge(object):
+
+ """
+ A graph edge. Also used for returning domain transition steps.
+
+ Parameters:
+ source The source type of the edge.
+ target The target tyep of the edge.
+
+ Keyword Parameters:
+ create (T/F) create the edge if it does not exist.
+ The default is False.
+ """
+
+ transition = EdgeAttrList('transition')
+ setexec = EdgeAttrList('setexec')
+ dyntransition = EdgeAttrList('dyntransition')
+ setcurrent = EdgeAttrList('setcurrent')
+ entrypoint = EdgeAttrDict('entrypoint')
+ execute = EdgeAttrDict('execute')
+ type_transition = EdgeAttrDict('type_transition')
+
+ def __init__(self, graph, source, target, create=False):
+ self.G = graph
+ self.source = source
+ self.target = target
+
+ # a bit of a hack to make Edges work
+ # in NetworkX functions that work on
+ # 2-tuples of (source, target)
+ # (see __getitem__ below)
+ self.st_tuple = (source, target)
+
+ if not self.G.has_edge(source, target):
+ if not create:
+ raise ValueError("Edge does not exist in graph")
+ else:
+ self.G.add_edge(source, target)
+ self.transition = None
+ self.entrypoint = None
+ self.execute = None
+ self.type_transition = None
+ self.setexec = None
+ self.dyntransition = None
+ self.setcurrent = None
+
+ def __getitem__(self, key):
+ return self.st_tuple[key]
diff --git a/lib/python2.7/site-packages/setools/exception.py b/lib/python2.7/site-packages/setools/exception.py
new file mode 100644
index 0000000..c3505cd
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/exception.py
@@ -0,0 +1,62 @@
+# Copyright 2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+
+#
+# Base class for exceptions
+#
+
+
+class SEToolsException(Exception):
+
+ """Base class for all SETools exceptions."""
+ pass
+
+#
+# Permission map exceptions
+#
+
+
+class PermissionMapException(SEToolsException):
+
+ """Base class for all permission map exceptions."""
+ pass
+
+
+class PermissionMapParseError(PermissionMapException):
+
+ """Exception for parse errors while reading permission map files."""
+ pass
+
+
+class RuleTypeError(PermissionMapException):
+
+ """Exception for using rules with incorrect rule type."""
+ pass
+
+
+class UnmappedClass(PermissionMapException):
+
+ """Exception for classes that are unmapped"""
+ pass
+
+
+class UnmappedPermission(PermissionMapException):
+
+ """Exception for permissions that are unmapped"""
+ pass
diff --git a/lib/python2.7/site-packages/setools/fsusequery.py b/lib/python2.7/site-packages/setools/fsusequery.py
new file mode 100644
index 0000000..6825a45
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/fsusequery.py
@@ -0,0 +1,87 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import contextquery
+from .descriptors import CriteriaDescriptor, CriteriaSetDescriptor
+
+
+class FSUseQuery(contextquery.ContextQuery):
+
+ """
+ Query fs_use_* statements.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ ruletype The rule type(s) to match.
+ fs The criteria to match the file system type.
+ fs_regex If true, regular expression matching
+ will be used on the file system type.
+ user The criteria to match the context's user.
+ user_regex If true, regular expression matching
+ will be used on the user.
+ role The criteria to match the context's role.
+ role_regex If true, regular expression matching
+ will be used on the role.
+ type_ The criteria to match the context's type.
+ type_regex If true, regular expression matching
+ will be used on the type.
+ range_ The criteria to match the context's range.
+ range_subset If true, the criteria will match if it is a subset
+ of the context's range.
+ range_overlap If true, the criteria will match if it overlaps
+ any of the context's range.
+ range_superset If true, the criteria will match if it is a superset
+ of the context's range.
+ range_proper If true, use proper superset/subset operations.
+ No effect if not using set operations.
+ """
+
+ ruletype = None
+ fs = CriteriaDescriptor("fs_regex")
+ fs_regex = False
+
+ def results(self):
+ """Generator which yields all matching fs_use_* statements."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Ruletypes: {0.ruletype}".format(self))
+ self.log.debug("FS: {0.fs!r}, regex: {0.fs_regex}".format(self))
+ self.log.debug("User: {0.user!r}, regex: {0.user_regex}".format(self))
+ self.log.debug("Role: {0.role!r}, regex: {0.role_regex}".format(self))
+ self.log.debug("Type: {0.type_!r}, regex: {0.type_regex}".format(self))
+ self.log.debug("Range: {0.range_!r}, subset: {0.range_subset}, overlap: {0.range_overlap}, "
+ "superset: {0.range_superset}, proper: {0.range_proper}".format(self))
+
+ for fsu in self.policy.fs_uses():
+ if self.ruletype and fsu.ruletype not in self.ruletype:
+ continue
+
+ if self.fs and not self._match_regex(
+ fsu.fs,
+ self.fs,
+ self.fs_regex):
+ continue
+
+ if not self._match_context(fsu.context):
+ continue
+
+ yield fsu
diff --git a/lib/python2.7/site-packages/setools/genfsconquery.py b/lib/python2.7/site-packages/setools/genfsconquery.py
new file mode 100644
index 0000000..c67dfd6
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/genfsconquery.py
@@ -0,0 +1,98 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import contextquery
+from .descriptors import CriteriaDescriptor
+
+
+class GenfsconQuery(contextquery.ContextQuery):
+
+ """
+ Query genfscon statements.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ fs The criteria to match the file system type.
+ fs_regex If true, regular expression matching
+ will be used on the file system type.
+ path The criteria to match the path.
+ path_regex If true, regular expression matching
+ will be used on the path.
+ user The criteria to match the context's user.
+ user_regex If true, regular expression matching
+ will be used on the user.
+ role The criteria to match the context's role.
+ role_regex If true, regular expression matching
+ will be used on the role.
+ type_ The criteria to match the context's type.
+ type_regex If true, regular expression matching
+ will be used on the type.
+ range_ The criteria to match the context's range.
+ range_subset If true, the criteria will match if it is a subset
+ of the context's range.
+ range_overlap If true, the criteria will match if it overlaps
+ any of the context's range.
+ range_superset If true, the criteria will match if it is a superset
+ of the context's range.
+ range_proper If true, use proper superset/subset operations.
+ No effect if not using set operations.
+ """
+
+ filetype = None
+ fs = CriteriaDescriptor("fs_regex")
+ fs_regex = False
+ path = CriteriaDescriptor("path_regex")
+ path_regex = False
+
+ def results(self):
+ """Generator which yields all matching genfscons."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("FS: {0.fs!r}, regex: {0.fs_regex}".format(self))
+ self.log.debug("Path: {0.path!r}, regex: {0.path_regex}".format(self))
+ self.log.debug("Filetype: {0.filetype!r}".format(self))
+ self.log.debug("User: {0.user!r}, regex: {0.user_regex}".format(self))
+ self.log.debug("Role: {0.role!r}, regex: {0.role_regex}".format(self))
+ self.log.debug("Type: {0.type_!r}, regex: {0.type_regex}".format(self))
+ self.log.debug("Range: {0.range_!r}, subset: {0.range_subset}, overlap: {0.range_overlap}, "
+ "superset: {0.range_superset}, proper: {0.range_proper}".format(self))
+
+ for genfs in self.policy.genfscons():
+ if self.fs and not self._match_regex(
+ genfs.fs,
+ self.fs,
+ self.fs_regex):
+ continue
+
+ if self.path and not self._match_regex(
+ genfs.path,
+ self.path,
+ self.path_regex):
+ continue
+
+ if self.filetype and not self.filetype == genfs.filetype:
+ continue
+
+ if not self._match_context(genfs.context):
+ continue
+
+ yield genfs
diff --git a/lib/python2.7/site-packages/setools/infoflow.py b/lib/python2.7/site-packages/setools/infoflow.py
new file mode 100644
index 0000000..ea3ec32
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/infoflow.py
@@ -0,0 +1,403 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import itertools
+import logging
+from collections import namedtuple
+
+import networkx as nx
+from networkx.exception import NetworkXError, NetworkXNoPath
+
+from .descriptors import EdgeAttrIntMax, EdgeAttrList
+
+__all__ = ['InfoFlowAnalysis']
+
+# Return values for the analysis
+# are in the following tuple format:
+step_output = namedtuple("step", ["source",
+ "target",
+ "rules"])
+
+
+class InfoFlowAnalysis(object):
+
+ """Information flow analysis."""
+
+ def __init__(self, policy, perm_map, min_weight=1, exclude=None):
+ """
+ Parameters:
+ policy The policy to analyze.
+ perm_map The permission map or path to the permission map file.
+ minweight The minimum permission weight to include in the analysis.
+ (default is 1)
+ exclude The types excluded from the information flow analysis.
+ (default is none)
+ """
+ self.log = logging.getLogger(self.__class__.__name__)
+
+ self.policy = policy
+
+ self.min_weight = min_weight
+ self.perm_map = perm_map
+ self.exclude = exclude
+ self.rebuildgraph = True
+ self.rebuildsubgraph = True
+
+ self.G = nx.DiGraph()
+ self.subG = None
+
+ @property
+ def min_weight(self):
+ return self._min_weight
+
+ @min_weight.setter
+ def min_weight(self, weight):
+ if not 1 <= weight <= 10:
+ raise ValueError(
+ "Min information flow weight must be an integer 1-10.")
+
+ self._min_weight = weight
+ self.rebuildsubgraph = True
+
+ @property
+ def perm_map(self):
+ return self._perm_map
+
+ @perm_map.setter
+ def perm_map(self, perm_map):
+ self._perm_map = perm_map
+ self.rebuildgraph = True
+ self.rebuildsubgraph = True
+
+ @property
+ def exclude(self):
+ return self._exclude
+
+ @exclude.setter
+ def exclude(self, types):
+ if types:
+ self._exclude = [self.policy.lookup_type(t) for t in types]
+ else:
+ self._exclude = []
+
+ self.rebuildsubgraph = True
+
+ def shortest_path(self, source, target):
+ """
+ Generator which yields one shortest path between the source
+ and target types (there may be more).
+
+ Parameters:
+ source The source type.
+ target The target type.
+
+ Yield: generator(steps)
+
+ steps Yield: tuple(source, target, rules)
+
+ source The source type for this step of the information flow.
+ target The target type for this step of the information flow.
+ rules The list of rules creating this information flow step.
+ """
+ s = self.policy.lookup_type(source)
+ t = self.policy.lookup_type(target)
+
+ if self.rebuildsubgraph:
+ self._build_subgraph()
+
+ self.log.info("Generating one shortest path from {0} to {1}...".format(s, t))
+
+ try:
+ yield self.__generate_steps(nx.shortest_path(self.subG, s, t))
+ except (NetworkXNoPath, NetworkXError):
+ # NetworkXError: the type is valid but not in graph, e.g.
+ # excluded or disconnected due to min weight
+ # NetworkXNoPath: no paths or the target type is
+ # not in the graph
+ pass
+
+ def all_paths(self, source, target, maxlen=2):
+ """
+ Generator which yields all paths between the source and target
+ up to the specified maximum path length. This algorithm
+ tends to get very expensive above 3-5 steps, depending
+ on the policy complexity.
+
+ Parameters:
+ source The source type.
+ target The target type.
+ maxlen Maximum length of paths.
+
+ Yield: generator(steps)
+
+ steps Yield: tuple(source, target, rules)
+
+ source The source type for this step of the information flow.
+ target The target type for this step of the information flow.
+ rules The list of rules creating this information flow step.
+ """
+ if maxlen < 1:
+ raise ValueError("Maximum path length must be positive.")
+
+ s = self.policy.lookup_type(source)
+ t = self.policy.lookup_type(target)
+
+ if self.rebuildsubgraph:
+ self._build_subgraph()
+
+ self.log.info("Generating all paths from {0} to {1}, max len {2}...".format(s, t, maxlen))
+
+ try:
+ for path in nx.all_simple_paths(self.subG, s, t, maxlen):
+ yield self.__generate_steps(path)
+ except (NetworkXNoPath, NetworkXError):
+ # NetworkXError: the type is valid but not in graph, e.g.
+ # excluded or disconnected due to min weight
+ # NetworkXNoPath: no paths or the target type is
+ # not in the graph
+ pass
+
+ def all_shortest_paths(self, source, target):
+ """
+ Generator which yields all shortest paths between the source
+ and target types.
+
+ Parameters:
+ source The source type.
+ target The target type.
+
+ Yield: generator(steps)
+
+ steps Yield: tuple(source, target, rules)
+
+ source The source type for this step of the information flow.
+ target The target type for this step of the information flow.
+ rules The list of rules creating this information flow step.
+ """
+ s = self.policy.lookup_type(source)
+ t = self.policy.lookup_type(target)
+
+ if self.rebuildsubgraph:
+ self._build_subgraph()
+
+ self.log.info("Generating all shortest paths from {0} to {1}...".format(s, t))
+
+ try:
+ for path in nx.all_shortest_paths(self.subG, s, t):
+ yield self.__generate_steps(path)
+ except (NetworkXNoPath, NetworkXError, KeyError):
+ # NetworkXError: the type is valid but not in graph, e.g.
+ # excluded or disconnected due to min weight
+ # NetworkXNoPath: no paths or the target type is
+ # not in the graph
+ # KeyError: work around NetworkX bug
+ # when the source node is not in the graph
+ pass
+
+ def infoflows(self, type_, out=True):
+ """
+ Generator which yields all information flows in/out of a
+ specified source type.
+
+ Parameters:
+ source The starting type.
+
+ Keyword Parameters:
+ out If true, information flows out of the type will
+ be returned. If false, information flows in to the
+ type will be returned. Default is true.
+
+ Yield: generator(steps)
+
+ steps A generator that returns the tuple of
+ source, target, and rules for each
+ information flow.
+ """
+ s = self.policy.lookup_type(type_)
+
+ if self.rebuildsubgraph:
+ self._build_subgraph()
+
+ self.log.info("Generating all infoflows out of {0}...".format(s))
+
+ if out:
+ flows = self.subG.out_edges_iter(s)
+ else:
+ flows = self.subG.in_edges_iter(s)
+
+ try:
+ for source, target in flows:
+ edge = Edge(self.subG, source, target)
+ yield step_output(source, target, edge.rules)
+ except NetworkXError:
+ # NetworkXError: the type is valid but not in graph, e.g.
+ # excluded or disconnected due to min weight
+ pass
+
+ def get_stats(self): # pragma: no cover
+ """
+ Get the information flow graph statistics.
+
+ Return: tuple(nodes, edges)
+
+ nodes The number of nodes (types) in the graph.
+ edges The number of edges (information flows between types)
+ in the graph.
+ """
+ return (self.G.number_of_nodes(), self.G.number_of_edges())
+
+ #
+ # Internal functions follow
+ #
+
+ def __generate_steps(self, path):
+ """
+ Generator which returns the source, target, and associated rules
+ for each information flow step.
+
+ Parameter:
+ path A list of graph node names representing an information flow path.
+
+ Yield: tuple(source, target, rules)
+
+ source The source type for this step of the information flow.
+ target The target type for this step of the information flow.
+ rules The list of rules creating this information flow step.
+ """
+ for s in range(1, len(path)):
+ edge = Edge(self.subG, path[s - 1], path[s])
+ yield step_output(edge.source, edge.target, edge.rules)
+
+ #
+ #
+ # Graph building functions
+ #
+ #
+ # 1. _build_graph determines the flow in each direction for each TE
+ # rule and then expands the rule. All information flows are
+ # included in this main graph: memory is traded off for efficiency
+ # as the main graph should only need to be rebuilt if permission
+ # weights change.
+ # 2. _build_subgraph derives a subgraph which removes all excluded
+ # types (nodes) and edges (information flows) which are below the
+ # minimum weight. This subgraph is rebuilt only if the main graph
+ # is rebuilt or the minimum weight or excluded types change.
+
+ def _build_graph(self):
+ self.G.clear()
+
+ self.perm_map.map_policy(self.policy)
+
+ self.log.info("Building graph from {0}...".format(self.policy))
+
+ for rule in self.policy.terules():
+ if rule.ruletype != "allow":
+ continue
+
+ (rweight, wweight) = self.perm_map.rule_weight(rule)
+
+ for s, t in itertools.product(rule.source.expand(), rule.target.expand()):
+ # only add flows if they actually flow
+ # in or out of the source type type
+ if s != t:
+ if wweight:
+ edge = Edge(self.G, s, t, create=True)
+ edge.rules.append(rule)
+ edge.weight = wweight
+
+ if rweight:
+ edge = Edge(self.G, t, s, create=True)
+ edge.rules.append(rule)
+ edge.weight = rweight
+
+ self.rebuildgraph = False
+ self.rebuildsubgraph = True
+ self.log.info("Completed building graph.")
+
+ def _build_subgraph(self):
+ if self.rebuildgraph:
+ self._build_graph()
+
+ self.log.info("Building subgraph...")
+ self.log.debug("Excluding {0!r}".format(self.exclude))
+ self.log.debug("Min weight {0}".format(self.min_weight))
+
+ # delete excluded types from subgraph
+ nodes = [n for n in self.G.nodes() if n not in self.exclude]
+ self.subG = self.G.subgraph(nodes)
+
+ # delete edges below minimum weight.
+ # no need if weight is 1, since that
+ # does not exclude any edges.
+ if self.min_weight > 1:
+ delete_list = []
+ for s, t in self.subG.edges_iter():
+ edge = Edge(self.subG, s, t)
+ if edge.weight < self.min_weight:
+ delete_list.append(edge)
+
+ self.subG.remove_edges_from(delete_list)
+
+ self.rebuildsubgraph = False
+ self.log.info("Completed building subgraph.")
+
+
+class Edge(object):
+
+ """
+ A graph edge. Also used for returning information flow steps.
+
+ Parameters:
+ source The source type of the edge.
+ target The target type of the edge.
+
+ Keyword Parameters:
+ create (T/F) create the edge if it does not exist.
+ The default is False.
+ """
+
+ rules = EdgeAttrList('rules')
+
+ # use capacity to store the info flow weight so
+ # we can use network flow algorithms naturally.
+ # The weight for each edge is 1 since each info
+ # flow step is no more costly than another
+ # (see below add_edge() call)
+ weight = EdgeAttrIntMax('capacity')
+
+ def __init__(self, graph, source, target, create=False):
+ self.G = graph
+ self.source = source
+ self.target = target
+
+ # a bit of a hack to make edges work
+ # in NetworkX functions that work on
+ # 2-tuples of (source, target)
+ # (see __getitem__ below)
+ self.st_tuple = (source, target)
+
+ if not self.G.has_edge(source, target):
+ if create:
+ self.G.add_edge(source, target, weight=1)
+ self.rules = None
+ self.weight = None
+ else:
+ raise ValueError("Edge does not exist in graph")
+
+ def __getitem__(self, key):
+ return self.st_tuple[key]
diff --git a/lib/python2.7/site-packages/setools/initsidquery.py b/lib/python2.7/site-packages/setools/initsidquery.py
new file mode 100644
index 0000000..1eb3790
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/initsidquery.py
@@ -0,0 +1,74 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+
+from . import compquery
+from . import contextquery
+
+
+class InitialSIDQuery(compquery.ComponentQuery, contextquery.ContextQuery):
+
+ """
+ Initial SID (Initial context) query.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The Initial SID name to match.
+ name_regex If true, regular expression matching
+ will be used on the Initial SID name.
+ user The criteria to match the context's user.
+ user_regex If true, regular expression matching
+ will be used on the user.
+ role The criteria to match the context's role.
+ role_regex If true, regular expression matching
+ will be used on the role.
+ type_ The criteria to match the context's type.
+ type_regex If true, regular expression matching
+ will be used on the type.
+ range_ The criteria to match the context's range.
+ range_subset If true, the criteria will match if it is a subset
+ of the context's range.
+ range_overlap If true, the criteria will match if it overlaps
+ any of the context's range.
+ range_superset If true, the criteria will match if it is a superset
+ of the context's range.
+ range_proper If true, use proper superset/subset operations.
+ No effect if not using set operations.
+ """
+
+ def results(self):
+ """Generator which yields all matching initial SIDs."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("User: {0.user!r}, regex: {0.user_regex}".format(self))
+ self.log.debug("Role: {0.role!r}, regex: {0.role_regex}".format(self))
+ self.log.debug("Type: {0.type_!r}, regex: {0.type_regex}".format(self))
+ self.log.debug("Range: {0.range_!r}, subset: {0.range_subset}, overlap: {0.range_overlap}, "
+ "superset: {0.range_superset}, proper: {0.range_proper}".format(self))
+
+ for i in self.policy.initialsids():
+ if not self._match_name(i):
+ continue
+
+ if not self._match_context(i.context):
+ continue
+
+ yield i
diff --git a/lib/python2.7/site-packages/setools/mixins.py b/lib/python2.7/site-packages/setools/mixins.py
new file mode 100644
index 0000000..a31d420
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/mixins.py
@@ -0,0 +1,91 @@
+# Copyright 2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+# pylint: disable=attribute-defined-outside-init,no-member
+import re
+
+from .descriptors import CriteriaDescriptor, CriteriaSetDescriptor
+
+
+class MatchAlias(object):
+
+ """Mixin for matching an object's aliases."""
+
+ alias = CriteriaDescriptor("alias_regex")
+ alias_regex = False
+
+ def _match_alias(self, obj):
+ """
+ Match the alias criteria
+
+ Parameter:
+ obj An object with an alias generator method named "aliases"
+ """
+
+ if not self.alias:
+ # if there is no criteria, everything matches.
+ return True
+
+ return self._match_in_set(obj.aliases(), self.alias, self.alias_regex)
+
+
+class MatchObjClass(object):
+
+ """Mixin for matching an object's class."""
+
+ tclass = CriteriaSetDescriptor("tclass_regex", "lookup_class")
+ tclass_regex = False
+
+ def _match_object_class(self, obj):
+ """
+ Match the object class criteria
+
+ Parameter:
+ obj An object with an object class attribute named "tclass"
+ """
+
+ if not self.tclass:
+ # if there is no criteria, everything matches.
+ return True
+ elif self.tclass_regex:
+ return bool(self.tclass.search(str(obj.tclass)))
+ else:
+ return obj.tclass in self.tclass
+
+
+class MatchPermission(object):
+
+ """Mixin for matching an object's permissions."""
+
+ perms = CriteriaSetDescriptor("perms_regex")
+ perms_equal = False
+ perms_regex = False
+
+ def _match_perms(self, obj):
+ """
+ Match the permission criteria
+
+ Parameter:
+ obj An object with a permission set class attribute named "perms"
+ """
+
+ if not self.perms:
+ # if there is no criteria, everything matches.
+ return True
+
+ return self._match_regex_or_set(obj.perms, self.perms, self.perms_equal, self.perms_regex)
diff --git a/lib/python2.7/site-packages/setools/mlsrulequery.py b/lib/python2.7/site-packages/setools/mlsrulequery.py
new file mode 100644
index 0000000..3a9e1bf
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/mlsrulequery.py
@@ -0,0 +1,115 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+
+from . import mixins, query
+from .descriptors import CriteriaDescriptor, CriteriaSetDescriptor, RuletypeDescriptor
+
+
+class MLSRuleQuery(mixins.MatchObjClass, query.PolicyQuery):
+
+ """
+ Query MLS rules.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ ruletype The list of rule type(s) to match.
+ source The name of the source type/attribute to match.
+ source_regex If true, regular expression matching will
+ be used on the source type/attribute.
+ target The name of the target type/attribute to match.
+ target_regex If true, regular expression matching will
+ be used on the target type/attribute.
+ tclass The object class(es) to match.
+ tclass_regex If true, use a regular expression for
+ matching the rule's object class.
+ """
+
+ ruletype = RuletypeDescriptor("validate_mls_ruletype")
+ source = CriteriaDescriptor("source_regex", "lookup_type_or_attr")
+ source_regex = False
+ target = CriteriaDescriptor("target_regex", "lookup_type_or_attr")
+ target_regex = False
+ tclass = CriteriaSetDescriptor("tclass_regex", "lookup_class")
+ tclass_regex = False
+ default = CriteriaDescriptor(lookup_function="lookup_range")
+ default_overlap = False
+ default_subset = False
+ default_superset = False
+ default_proper = False
+
+ def results(self):
+ """Generator which yields all matching MLS rules."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Ruletypes: {0.ruletype}".format(self))
+ self.log.debug("Source: {0.source!r}, regex: {0.source_regex}".format(self))
+ self.log.debug("Target: {0.target!r}, regex: {0.target_regex}".format(self))
+ self.log.debug("Class: {0.tclass!r}, regex: {0.tclass_regex}".format(self))
+ self.log.debug("Default: {0.default!r}, overlap: {0.default_overlap}, "
+ "subset: {0.default_subset}, superset: {0.default_superset}, "
+ "proper: {0.default_proper}".format(self))
+
+ for rule in self.policy.mlsrules():
+ #
+ # Matching on rule type
+ #
+ if self.ruletype:
+ if rule.ruletype not in self.ruletype:
+ continue
+
+ #
+ # Matching on source type
+ #
+ if self.source and not self._match_regex(
+ rule.source,
+ self.source,
+ self.source_regex):
+ continue
+
+ #
+ # Matching on target type
+ #
+ if self.target and not self._match_regex(
+ rule.target,
+ self.target,
+ self.target_regex):
+ continue
+
+ #
+ # Matching on object class
+ #
+ if not self._match_object_class(rule):
+ continue
+
+ #
+ # Matching on range
+ #
+ if self.default and not self._match_range(
+ rule.default,
+ self.default,
+ self.default_subset,
+ self.default_overlap,
+ self.default_superset,
+ self.default_proper):
+ continue
+
+ # if we get here, we have matched all available criteria
+ yield rule
diff --git a/lib/python2.7/site-packages/setools/netifconquery.py b/lib/python2.7/site-packages/setools/netifconquery.py
new file mode 100644
index 0000000..30db977
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/netifconquery.py
@@ -0,0 +1,77 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+
+from . import compquery
+from . import contextquery
+
+
+class NetifconQuery(compquery.ComponentQuery, contextquery.ContextQuery):
+
+ """
+ Network interface context query.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The name of the network interface to match.
+ name_regex If true, regular expression matching will
+ be used for matching the name.
+ user The criteria to match the context's user.
+ user_regex If true, regular expression matching
+ will be used on the user.
+ role The criteria to match the context's role.
+ role_regex If true, regular expression matching
+ will be used on the role.
+ type_ The criteria to match the context's type.
+ type_regex If true, regular expression matching
+ will be used on the type.
+ range_ The criteria to match the context's range.
+ range_subset If true, the criteria will match if it is a subset
+ of the context's range.
+ range_overlap If true, the criteria will match if it overlaps
+ any of the context's range.
+ range_superset If true, the criteria will match if it is a superset
+ of the context's range.
+ range_proper If true, use proper superset/subset operations.
+ No effect if not using set operations.
+ """
+
+ def results(self):
+ """Generator which yields all matching netifcons."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("User: {0.user!r}, regex: {0.user_regex}".format(self))
+ self.log.debug("Role: {0.role!r}, regex: {0.role_regex}".format(self))
+ self.log.debug("Type: {0.type_!r}, regex: {0.type_regex}".format(self))
+ self.log.debug("Range: {0.range_!r}, subset: {0.range_subset}, overlap: {0.range_overlap}, "
+ "superset: {0.range_superset}, proper: {0.range_proper}".format(self))
+
+ for netif in self.policy.netifcons():
+ if self.name and not self._match_regex(
+ netif.netif,
+ self.name,
+ self.name_regex):
+ continue
+
+ if not self._match_context(netif.context):
+ continue
+
+ yield netif
diff --git a/lib/python2.7/site-packages/setools/nodeconquery.py b/lib/python2.7/site-packages/setools/nodeconquery.py
new file mode 100644
index 0000000..eb21d81
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/nodeconquery.py
@@ -0,0 +1,148 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+try:
+ import ipaddress
+except ImportError: # pragma: no cover
+ pass
+
+import logging
+from socket import AF_INET, AF_INET6
+
+from . import contextquery
+
+
+class NodeconQuery(contextquery.ContextQuery):
+
+ """
+ Query nodecon statements.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ network The IPv4/IPv6 address or IPv4/IPv6 network address
+ with netmask, e.g. 192.168.1.0/255.255.255.0 or
+ "192.168.1.0/24".
+ network_overlap If true, the net will match if it overlaps with
+ the nodecon's network instead of equality.
+ ip_version The IP version of the nodecon to match. (socket.AF_INET
+ for IPv4 or socket.AF_INET6 for IPv6)
+ user The criteria to match the context's user.
+ user_regex If true, regular expression matching
+ will be used on the user.
+ role The criteria to match the context's role.
+ role_regex If true, regular expression matching
+ will be used on the role.
+ type_ The criteria to match the context's type.
+ type_regex If true, regular expression matching
+ will be used on the type.
+ range_ The criteria to match the context's range.
+ range_subset If true, the criteria will match if it is a subset
+ of the context's range.
+ range_overlap If true, the criteria will match if it overlaps
+ any of the context's range.
+ range_superset If true, the criteria will match if it is a superset
+ of the context's range.
+ range_proper If true, use proper superset/subset operations.
+ No effect if not using set operations.
+ """
+
+ _network = None
+ network_overlap = False
+ _ip_version = None
+
+ @property
+ def ip_version(self):
+ return self._ip_version
+
+ @ip_version.setter
+ def ip_version(self, value):
+ if value:
+ if not (value == AF_INET or value == AF_INET6):
+ raise ValueError(
+ "The address family must be {0} for IPv4 or {1} for IPv6.".
+ format(AF_INET, AF_INET6))
+
+ self._ip_version = value
+ else:
+ self._ip_version = None
+
+ @property
+ def network(self):
+ return self._network
+
+ @network.setter
+ def network(self, value):
+ if value:
+ try:
+ self._network = ipaddress.ip_network(value)
+ except NameError: # pragma: no cover
+ raise RuntimeError("Nodecon IP address/network functions require Python 3.3+.")
+ else:
+ self._network = None
+
+ def results(self):
+ """Generator which yields all matching nodecons."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Network: {0.network!r}, overlap: {0.network_overlap}".format(self))
+ self.log.debug("IP Version: {0.ip_version}".format(self))
+ self.log.debug("User: {0.user!r}, regex: {0.user_regex}".format(self))
+ self.log.debug("Role: {0.role!r}, regex: {0.role_regex}".format(self))
+ self.log.debug("Type: {0.type_!r}, regex: {0.type_regex}".format(self))
+ self.log.debug("Range: {0.range_!r}, subset: {0.range_subset}, overlap: {0.range_overlap}, "
+ "superset: {0.range_superset}, proper: {0.range_proper}".format(self))
+
+ for nodecon in self.policy.nodecons():
+
+ if self.network:
+ try:
+ netmask = ipaddress.ip_address(nodecon.netmask)
+ except NameError: # pragma: no cover
+ # Should never actually hit this since the self.network
+ # setter raises the same exception.
+ raise RuntimeError("Nodecon IP address/network functions require Python 3.3+.")
+
+ # Python 3.3's IPv6Network constructor does not support
+ # expanded netmasks, only CIDR numbers. Convert netmask
+ # into CIDR.
+ # This is Brian Kernighan's method for counting set bits.
+ # If the netmask happens to be invalid, this will
+ # not detect it.
+ CIDR = 0
+ int_netmask = int(netmask)
+ while int_netmask:
+ int_netmask &= int_netmask - 1
+ CIDR += 1
+
+ net = ipaddress.ip_network('{0}/{1}'.format(nodecon.address, CIDR))
+
+ if self.network_overlap:
+ if not self.network.overlaps(net):
+ continue
+ else:
+ if not net == self.network:
+ continue
+
+ if self.ip_version and self.ip_version != nodecon.ip_version:
+ continue
+
+ if not self._match_context(nodecon.context):
+ continue
+
+ yield nodecon
diff --git a/lib/python2.7/site-packages/setools/objclassquery.py b/lib/python2.7/site-packages/setools/objclassquery.py
new file mode 100644
index 0000000..8f40df8
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/objclassquery.py
@@ -0,0 +1,101 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import compquery
+from .descriptors import CriteriaDescriptor, CriteriaSetDescriptor
+from .policyrep.exception import NoCommon
+
+
+class ObjClassQuery(compquery.ComponentQuery):
+
+ """
+ Query object classes.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The name of the object set to match.
+ name_regex If true, regular expression matching will
+ be used for matching the name.
+ common The name of the inherited common to match.
+ common_regex If true, regular expression matching will
+ be used for matching the common name.
+ perms The permissions to match.
+ perms_equal If true, only commons with permission sets
+ that are equal to the criteria will
+ match. Otherwise, any intersection
+ will match.
+ perms_regex If true, regular expression matching
+ will be used on the permission names instead
+ of set logic.
+ comparison will not be used.
+ perms_indirect If false, permissions inherited from a common
+ permission set not will be evaluated. Default
+ is true.
+ """
+
+ common = CriteriaDescriptor("common_regex", "lookup_common")
+ common_regex = False
+ perms = CriteriaSetDescriptor("perms_regex")
+ perms_equal = False
+ perms_indirect = True
+ perms_regex = False
+
+ def results(self):
+ """Generator which yields all matching object classes."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("Common: {0.common!r}, regex: {0.common_regex}".format(self))
+ self.log.debug("Perms: {0.perms}, regex: {0.perms_regex}, "
+ "eq: {0.perms_equal}, indirect: {0.perms_indirect}".format(self))
+
+ for class_ in self.policy.classes():
+ if not self._match_name(class_):
+ continue
+
+ if self.common:
+ try:
+ if not self._match_regex(
+ class_.common,
+ self.common,
+ self.common_regex):
+ continue
+ except NoCommon:
+ continue
+
+ if self.perms:
+ perms = class_.perms
+
+ if self.perms_indirect:
+ try:
+ perms |= class_.common.perms
+ except NoCommon:
+ pass
+
+ if not self._match_regex_or_set(
+ perms,
+ self.perms,
+ self.perms_equal,
+ self.perms_regex):
+ continue
+
+ yield class_
diff --git a/lib/python2.7/site-packages/setools/permmap.py b/lib/python2.7/site-packages/setools/permmap.py
new file mode 100644
index 0000000..54cd9f9
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/permmap.py
@@ -0,0 +1,363 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import sys
+import logging
+from errno import ENOENT
+
+from . import exception
+from . import policyrep
+
+
+class PermissionMap(object):
+
+ """Permission Map for information flow analysis."""
+
+ valid_infoflow_directions = ["r", "w", "b", "n", "u"]
+ min_weight = 1
+ max_weight = 10
+
+ def __init__(self, permmapfile=None):
+ """
+ Parameter:
+ permmapfile The path to the permission map to load.
+ """
+ self.log = logging.getLogger(self.__class__.__name__)
+
+ if permmapfile:
+ self.load(permmapfile)
+ else:
+ for path in ["data/", sys.prefix + "/share/setools/"]:
+ try:
+ self.load(path + "perm_map")
+ break
+ except (IOError, OSError) as err:
+ if err.errno != ENOENT:
+ raise
+ else:
+ raise RuntimeError("Unable to load default permission map.")
+
+ def load(self, permmapfile):
+ """
+ Parameter:
+ permmapfile The path to the permission map to load.
+ """
+ self.log.info("Opening permission map \"{0}\"".format(permmapfile))
+
+ # state machine
+ # 1 = read number of classes
+ # 2 = read class name and number of perms
+ # 3 = read perms
+ with open(permmapfile, "r") as mapfile:
+ class_count = 0
+ num_classes = 0
+ state = 1
+
+ self.permmap = dict()
+
+ for line_num, line in enumerate(mapfile, start=1):
+ entry = line.split()
+
+ if len(entry) == 0 or entry[0][0] == '#':
+ continue
+
+ if state == 1:
+ try:
+ num_classes = int(entry[0])
+ except ValueError:
+ raise exception.PermissionMapParseError(
+ "{0}:{1}:Invalid number of classes: {2}".
+ format(permmapfile, line_num, entry[0]))
+
+ if num_classes < 1:
+ raise exception.PermissionMapParseError(
+ "{0}:{1}:Number of classes must be positive: {2}".
+ format(permmapfile, line_num, entry[0]))
+
+ state = 2
+
+ elif state == 2:
+ if len(entry) != 3 or entry[0] != "class":
+ raise exception.PermissionMapParseError(
+ "{0}:{1}:Invalid class declaration: {2}".
+ format(permmapfile, line_num, entry))
+
+ class_name = str(entry[1])
+
+ try:
+ num_perms = int(entry[2])
+ except ValueError:
+ raise exception.PermissionMapParseError(
+ "{0}:{1}:Invalid number of permissions: {2}".
+ format(permmapfile, line_num, entry[2]))
+
+ if num_perms < 1:
+ raise exception.PermissionMapParseError(
+ "{0}:{1}:Number of permissions must be positive: {2}".
+ format(permmapfile, line_num, entry[2]))
+
+ class_count += 1
+ if class_count > num_classes:
+ raise exception.PermissionMapParseError(
+ "{0}:{1}:Extra class found: {2}".
+ format(permmapfile, line_num, class_name))
+
+ self.permmap[class_name] = dict()
+ perm_count = 0
+ state = 3
+
+ elif state == 3:
+ perm_name = str(entry[0])
+
+ flow_direction = str(entry[1])
+ if flow_direction not in self.valid_infoflow_directions:
+ raise exception.PermissionMapParseError(
+ "{0}:{1}:Invalid information flow direction: {2}".
+ format(permmapfile, line_num, entry[1]))
+
+ try:
+ weight = int(entry[2])
+ except ValueError:
+ raise exception.PermissionMapParseError(
+ "{0}:{1}:Invalid permission weight: {2}".
+ format(permmapfile, line_num, entry[2]))
+
+ if not self.min_weight <= weight <= self.max_weight:
+ raise exception.PermissionMapParseError(
+ "{0}:{1}:Permission weight must be {3}-{4}: {2}".
+ format(permmapfile, line_num, entry[2],
+ self.min_weight, self.max_weight))
+
+ self.permmap[class_name][perm_name] = {'direction': flow_direction,
+ 'weight': weight,
+ 'enabled': True}
+
+ perm_count += 1
+ if perm_count >= num_perms:
+ state = 2
+
+ def exclude_class(self, class_):
+ """
+ Exclude all permissions in an object class for calculating rule weights.
+
+ Parameter:
+ class_ The object class to exclude.
+
+ Exceptions:
+ UnmappedClass The specified object class is not mapped.
+ """
+
+ classname = str(class_)
+
+ try:
+ for perm in self.permmap[classname]:
+ self.permmap[classname][perm]['enabled'] = False
+ except KeyError:
+ raise exception.UnmappedClass("{0} is not mapped.".format(classname))
+
+ def exclude_permission(self, class_, permission):
+ """
+ Exclude a permission for calculating rule weights.
+
+ Parameter:
+ class_ The object class of the permission.
+ permission The permission name to exclude.
+
+ Exceptions:
+ UnmappedClass The specified object class is not mapped.
+ UnmappedPermission The specified permission is not mapped for the object class.
+ """
+ classname = str(class_)
+
+ if classname not in self.permmap:
+ raise exception.UnmappedClass("{0} is not mapped.".format(classname))
+
+ try:
+ self.permmap[classname][permission]['enabled'] = False
+ except KeyError:
+ raise exception.UnmappedPermission("{0}:{1} is not mapped.".
+ format(classname, permission))
+
+ def include_class(self, class_):
+ """
+ Include all permissions in an object class for calculating rule weights.
+
+ Parameter:
+ class_ The object class to include.
+
+ Exceptions:
+ UnmappedClass The specified object class is not mapped.
+ """
+
+ classname = str(class_)
+
+ try:
+ for perm in self.permmap[classname]:
+ self.permmap[classname][perm]['enabled'] = True
+ except KeyError:
+ raise exception.UnmappedClass("{0} is not mapped.".format(classname))
+
+ def include_permission(self, class_, permission):
+ """
+ Include a permission for calculating rule weights.
+
+ Parameter:
+ class_ The object class of the permission.
+ permission The permission name to include.
+
+ Exceptions:
+ UnmappedClass The specified object class is not mapped.
+ UnmappedPermission The specified permission is not mapped for the object class.
+ """
+
+ classname = str(class_)
+
+ if classname not in self.permmap:
+ raise exception.UnmappedClass("{0} is not mapped.".format(classname))
+
+ try:
+ self.permmap[classname][permission]['enabled'] = True
+ except KeyError:
+ raise exception.UnmappedPermission("{0}:{1} is not mapped.".
+ format(classname, permission))
+
+ def map_policy(self, policy):
+ """Create mappings for all classes and permissions in the specified policy."""
+ for class_ in policy.classes():
+ class_name = str(class_)
+
+ if class_name not in self.permmap:
+ self.log.info("Adding unmapped class {0} from {1}".format(class_name, policy))
+ self.permmap[class_name] = dict()
+
+ perms = class_.perms
+
+ try:
+ perms |= class_.common.perms
+ except policyrep.exception.NoCommon:
+ pass
+
+ for perm_name in perms:
+ if perm_name not in self.permmap[class_name]:
+ self.log.info("Adding unmapped permission {0} in {1} from {2}".
+ format(perm_name, class_name, policy))
+ self.permmap[class_name][perm_name] = {'direction': 'u',
+ 'weight': 1,
+ 'enabled': True}
+
+ def rule_weight(self, rule):
+ """
+ Get the type enforcement rule's information flow read and write weights.
+
+ Parameter:
+ rule A type enforcement rule.
+
+ Return: Tuple(read_weight, write_weight)
+ read_weight The type enforcement rule's read weight.
+ write_weight The type enforcement rule's write weight.
+ """
+
+ write_weight = 0
+ read_weight = 0
+ class_name = str(rule.tclass)
+
+ if rule.ruletype != 'allow':
+ raise exception.RuleTypeError("{0} rules cannot be used for calculating a weight".
+ format(rule.ruletype))
+
+ if class_name not in self.permmap:
+ raise exception.UnmappedClass("{0} is not mapped.".format(class_name))
+
+ # iterate over the permissions and determine the
+ # weight of the rule in each direction. The result
+ # is the largest-weight permission in each direction
+ for perm_name in rule.perms:
+ try:
+ mapping = self.permmap[class_name][perm_name]
+ except KeyError:
+ raise exception.UnmappedPermission("{0}:{1} is not mapped.".
+ format(class_name, perm_name))
+
+ if not mapping['enabled']:
+ continue
+
+ if mapping['direction'] == "r":
+ read_weight = max(read_weight, mapping['weight'])
+ elif mapping['direction'] == "w":
+ write_weight = max(write_weight, mapping['weight'])
+ elif mapping['direction'] == "b":
+ read_weight = max(read_weight, mapping['weight'])
+ write_weight = max(write_weight, mapping['weight'])
+
+ return (read_weight, write_weight)
+
+ def set_direction(self, class_, permission, direction):
+ """
+ Set the information flow direction of a permission.
+
+ Parameter:
+ class_ The object class of the permission.
+ permission The permission name.
+ direction The information flow direction the permission (r/w/b/n).
+
+ Exceptions:
+ UnmappedClass The specified object class is not mapped.
+ UnmappedPermission The specified permission is not mapped for the object class.
+ """
+
+ if direction not in self.valid_infoflow_directions:
+ raise ValueError("Invalid information flow direction: {0}".format(direction))
+
+ classname = str(class_)
+
+ if classname not in self.permmap:
+ raise exception.UnmappedClass("{0} is not mapped.".format(classname))
+
+ try:
+ self.permmap[classname][permission]['direction'] = direction
+ except KeyError:
+ raise exception.UnmappedPermission("{0}:{1} is not mapped.".
+ format(classname, permission))
+
+ def set_weight(self, class_, permission, weight):
+ """
+ Set the weight of a permission.
+
+ Parameter:
+ class_ The object class of the permission.
+ permission The permission name.
+ weight The weight of the permission (1-10).
+
+ Exceptions:
+ UnmappedClass The specified object class is not mapped.
+ UnmappedPermission The specified permission is not mapped for the object class.
+ """
+
+ if not self.min_weight <= weight <= self.max_weight:
+ raise ValueError("Permission weights must be 1-10: {0}".format(weight))
+
+ classname = str(class_)
+
+ if classname not in self.permmap:
+ raise exception.UnmappedClass("{0} is not mapped.".format(classname))
+
+ try:
+ self.permmap[classname][permission]['weight'] = weight
+ except KeyError:
+ raise exception.UnmappedPermission("{0}:{1} is not mapped.".
+ format(classname, permission))
diff --git a/lib/python2.7/site-packages/setools/polcapquery.py b/lib/python2.7/site-packages/setools/polcapquery.py
new file mode 100644
index 0000000..e024b05
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/polcapquery.py
@@ -0,0 +1,47 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+
+from . import compquery
+
+
+class PolCapQuery(compquery.ComponentQuery):
+
+ """
+ Query SELinux policy capabilities
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The name of the policy capability to match.
+ name_regex If true, regular expression matching will
+ be used for matching the name.
+ """
+
+ def results(self):
+ """Generator which yields all matching policy capabilities."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+
+ for cap in self.policy.polcaps():
+ if not self._match_name(cap):
+ continue
+
+ yield cap
diff --git a/lib/python2.7/site-packages/setools/policyrep/__init__.py b/lib/python2.7/site-packages/setools/policyrep/__init__.py
new file mode 100644
index 0000000..b03e524
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/__init__.py
@@ -0,0 +1,568 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+# pylint: disable=too-many-public-methods
+#
+# Create a Python representation of the policy.
+# The idea is that this is module provides convenient
+# abstractions and methods for accessing the policy
+# structures.
+import logging
+from itertools import chain
+from errno import ENOENT
+
+try:
+ import selinux
+except ImportError:
+ pass
+
+from . import qpol
+
+# The libqpol SWIG class is not quite natural for
+# Python the policy is repeatedly referenced in the
+# function calls, which makes sense for C code
+# but not for python code, so each object keeps
+# a reference to the policy for internal use.
+# This also makes sense since an object would only
+# be valid for the policy it comes from.
+
+# Exceptions
+from . import exception
+
+# Components
+from . import boolcond
+from . import default
+from . import mls
+from . import objclass
+from . import polcap
+from . import role
+from . import typeattr
+from . import user
+
+# Rules
+from . import mlsrule
+from . import rbacrule
+from . import terule
+
+# Constraints
+from . import constraint
+
+# In-policy Labeling
+from . import fscontext
+from . import initsid
+from . import netcontext
+
+
+class SELinuxPolicy(object):
+
+ """The complete SELinux policy."""
+
+ def __init__(self, policyfile=None):
+ """
+ Parameter:
+ policyfile Path to a policy to open.
+ """
+
+ self.log = logging.getLogger(self.__class__.__name__)
+ self.policy = None
+ self.filename = None
+
+ if policyfile:
+ self._load_policy(policyfile)
+ else:
+ try:
+ self._load_running_policy()
+ except NameError:
+ raise RuntimeError("Loading the running policy requires libselinux Python bindings")
+
+ def __repr__(self):
+ return "<SELinuxPolicy(\"{0}\")>".format(self.filename)
+
+ def __str__(self):
+ return self.filename
+
+ def __deepcopy__(self, memo):
+ # shallow copy as all of the members are immutable
+ newobj = SELinuxPolicy.__new__(SELinuxPolicy)
+ newobj.policy = self.policy
+ newobj.filename = self.filename
+ memo[id(self)] = newobj
+ return newobj
+
+ #
+ # Policy loading functions
+ #
+
+ def _load_policy(self, filename):
+ """Load the specified policy."""
+ self.log.info("Opening SELinux policy \"{0}\"".format(filename))
+
+ try:
+ self.policy = qpol.qpol_policy_factory(str(filename))
+ except SyntaxError as err:
+ raise exception.InvalidPolicy("Error opening policy file \"{0}\": {1}".
+ format(filename, err))
+
+ self.log.info("Successfully opened SELinux policy \"{0}\"".format(filename))
+ self.filename = filename
+
+ @staticmethod
+ def _potential_policies():
+ """Generate a list of potential policies to use."""
+ # Start with binary policies in the standard location
+ base_policy_path = selinux.selinux_binary_policy_path()
+ for version in range(qpol.QPOL_POLICY_MAX_VERSION, qpol.QPOL_POLICY_MIN_VERSION-1, -1):
+ yield "{0}.{1}".format(base_policy_path, version)
+
+ # Last chance, try selinuxfs. This is not first, to avoid
+ # holding kernel memory for a long time
+ if selinux.selinuxfs_exists():
+ yield selinux.selinux_current_policy_path()
+
+ def _load_running_policy(self):
+ """Try to load the current running policy."""
+ self.log.info("Attempting to locate current running policy.")
+
+ for filename in self._potential_policies():
+ try:
+ self._load_policy(filename)
+ except OSError as err:
+ if err.errno != ENOENT:
+ raise
+ else:
+ break
+ else:
+ raise RuntimeError("Unable to locate an SELinux policy to load.")
+
+ #
+ # Policy properties
+ #
+ @property
+ def handle_unknown(self):
+ """The handle unknown permissions setting (allow,deny,reject)"""
+ return self.policy.handle_unknown()
+
+ @property
+ def mls(self):
+ """(T/F) The policy has MLS enabled."""
+ return mls.enabled(self.policy)
+
+ @property
+ def version(self):
+ """The policy database version (e.g. v29)"""
+ return self.policy.version()
+
+ #
+ # Policy statistics
+ #
+
+ @property
+ def allow_count(self):
+ """The number of (type) allow rules."""
+ return self.policy.avrule_allow_count()
+
+ @property
+ def auditallow_count(self):
+ """The number of auditallow rules."""
+ return self.policy.avrule_auditallow_count()
+
+ @property
+ def boolean_count(self):
+ """The number of Booleans."""
+ return self.policy.bool_count()
+
+ @property
+ def category_count(self):
+ """The number of categories."""
+ return sum(1 for _ in self.categories())
+
+ @property
+ def class_count(self):
+ """The number of object classes."""
+ return self.policy.class_count()
+
+ @property
+ def common_count(self):
+ """The number of common permission sets."""
+ return self.policy.common_count()
+
+ @property
+ def conditional_count(self):
+ """The number of conditionals."""
+ return self.policy.cond_count()
+
+ @property
+ def constraint_count(self):
+ """The number of standard constraints."""
+ return sum(1 for c in self.constraints() if c.ruletype == "constrain")
+
+ @property
+ def dontaudit_count(self):
+ """The number of dontaudit rules."""
+ return self.policy.avrule_dontaudit_count()
+
+ @property
+ def fs_use_count(self):
+ """fs_use_* statements."""
+ return self.policy.fs_use_count()
+
+ @property
+ def genfscon_count(self):
+ """The number of genfscon statements."""
+ return self.policy.genfscon_count()
+
+ @property
+ def initialsids_count(self):
+ """The number of initial sid statements."""
+ return self.policy.isid_count()
+
+ @property
+ def level_count(self):
+ """The number of levels."""
+ return sum(1 for _ in self.levels())
+
+ @property
+ def mlsconstraint_count(self):
+ """The number of MLS constraints."""
+ return sum(1 for c in self.constraints() if c.ruletype == "mlsconstrain")
+
+ @property
+ def mlsvalidatetrans_count(self):
+ """The number of MLS validatetrans."""
+ return sum(1 for v in self.constraints() if v.ruletype == "mlsvalidatetrans")
+
+ @property
+ def netifcon_count(self):
+ """The number of netifcon statements."""
+ return self.policy.netifcon_count()
+
+ @property
+ def neverallow_count(self):
+ """The number of neverallow rules."""
+ return self.policy.avrule_neverallow_count()
+
+ @property
+ def nodecon_count(self):
+ """The number of nodecon statements."""
+ return self.policy.nodecon_count()
+
+ @property
+ def permission_count(self):
+ """The number of permissions."""
+ return sum(len(c.perms) for c in chain(self.commons(), self.classes()))
+
+ @property
+ def permissives_count(self):
+ """The number of permissive types."""
+ return self.policy.permissive_count()
+
+ @property
+ def polcap_count(self):
+ """The number of policy capabilities."""
+ return self.policy.polcap_count()
+
+ @property
+ def portcon_count(self):
+ """The number of portcon statements."""
+ return self.policy.portcon_count()
+
+ @property
+ def range_transition_count(self):
+ """The number of range_transition rules."""
+ return self.policy.range_trans_count()
+
+ @property
+ def role_count(self):
+ """The number of roles."""
+ return self.policy.role_count()
+
+ @property
+ def role_allow_count(self):
+ """The number of (role) allow rules."""
+ return self.policy.role_allow_count()
+
+ @property
+ def role_transition_count(self):
+ """The number of role_transition rules."""
+ return self.policy.role_trans_count()
+
+ @property
+ def type_attribute_count(self):
+ """The number of (type) attributes."""
+ return sum(1 for _ in self.typeattributes())
+
+ @property
+ def type_count(self):
+ """The number of types."""
+ return sum(1 for _ in self.types())
+
+ @property
+ def type_change_count(self):
+ """The number of type_change rules."""
+ return self.policy.terule_change_count()
+
+ @property
+ def type_member_count(self):
+ """The number of type_member rules."""
+ return self.policy.terule_member_count()
+
+ @property
+ def type_transition_count(self):
+ """The number of type_transition rules."""
+ return self.policy.terule_trans_count() + self.policy.filename_trans_count()
+
+ @property
+ def user_count(self):
+ """The number of users."""
+ return self.policy.user_count()
+
+ @property
+ def validatetrans_count(self):
+ """The number of validatetrans."""
+ return sum(1 for v in self.constraints() if v.ruletype == "validatetrans")
+
+ #
+ # Policy components lookup functions
+ #
+ def lookup_boolean(self, name):
+ """Look up a Boolean."""
+ return boolcond.boolean_factory(self.policy, name)
+
+ def lookup_class(self, name):
+ """Look up an object class."""
+ return objclass.class_factory(self.policy, name)
+
+ def lookup_common(self, name):
+ """Look up a common permission set."""
+ return objclass.common_factory(self.policy, name)
+
+ def lookup_initialsid(self, name):
+ """Look up an initial sid."""
+ return initsid.initialsid_factory(self.policy, name)
+
+ def lookup_level(self, level):
+ """Look up a MLS level."""
+ return mls.level_factory(self.policy, level)
+
+ def lookup_sensitivity(self, name):
+ """Look up a MLS sensitivity by name."""
+ return mls.sensitivity_factory(self.policy, name)
+
+ def lookup_range(self, range_):
+ """Look up a MLS range."""
+ return mls.range_factory(self.policy, range_)
+
+ def lookup_role(self, name):
+ """Look up a role by name."""
+ return role.role_factory(self.policy, name)
+
+ def lookup_type(self, name):
+ """Look up a type by name."""
+ return typeattr.type_factory(self.policy, name, deref=True)
+
+ def lookup_type_or_attr(self, name):
+ """Look up a type or type attribute by name."""
+ return typeattr.type_or_attr_factory(self.policy, name, deref=True)
+
+ def lookup_typeattr(self, name):
+ """Look up a type attribute by name."""
+ return typeattr.attribute_factory(self.policy, name)
+
+ def lookup_user(self, name):
+ """Look up a user by name."""
+ return user.user_factory(self.policy, name)
+
+ #
+ # Policy components generators
+ #
+
+ def bools(self):
+ """Generator which yields all Booleans."""
+ for bool_ in self.policy.bool_iter():
+ yield boolcond.boolean_factory(self.policy, bool_)
+
+ def categories(self):
+ """Generator which yields all MLS categories."""
+ for cat in self.policy.cat_iter():
+ try:
+ yield mls.category_factory(self.policy, cat)
+ except TypeError:
+ # libqpol unfortunately iterates over aliases too
+ pass
+
+ def classes(self):
+ """Generator which yields all object classes."""
+ for class_ in self.policy.class_iter():
+ yield objclass.class_factory(self.policy, class_)
+
+ def commons(self):
+ """Generator which yields all commons."""
+ for common in self.policy.common_iter():
+ yield objclass.common_factory(self.policy, common)
+
+ def defaults(self):
+ """Generator which yields all default_* statements."""
+ for default_ in self.policy.default_iter():
+ try:
+ for default_obj in default.default_factory(self.policy, default_):
+ yield default_obj
+ except exception.NoDefaults:
+ # qpol iterates over all classes. Handle case
+ # where a class has no default_* settings.
+ pass
+
+ def levels(self):
+ """Generator which yields all level declarations."""
+ for level in self.policy.level_iter():
+
+ try:
+ yield mls.level_decl_factory(self.policy, level)
+ except TypeError:
+ # libqpol unfortunately iterates over levels and sens aliases
+ pass
+
+ def polcaps(self):
+ """Generator which yields all policy capabilities."""
+ for cap in self.policy.polcap_iter():
+ yield polcap.polcap_factory(self.policy, cap)
+
+ def roles(self):
+ """Generator which yields all roles."""
+ for role_ in self.policy.role_iter():
+ yield role.role_factory(self.policy, role_)
+
+ def sensitivities(self):
+ """Generator which yields all sensitivities."""
+ # see mls.py for more info on why level_iter is used here.
+ for sens in self.policy.level_iter():
+ try:
+ yield mls.sensitivity_factory(self.policy, sens)
+ except TypeError:
+ # libqpol unfortunately iterates over sens and aliases
+ pass
+
+ def types(self):
+ """Generator which yields all types."""
+ for type_ in self.policy.type_iter():
+ try:
+ yield typeattr.type_factory(self.policy, type_)
+ except TypeError:
+ # libqpol unfortunately iterates over attributes and aliases
+ pass
+
+ def typeattributes(self):
+ """Generator which yields all (type) attributes."""
+ for type_ in self.policy.type_iter():
+ try:
+ yield typeattr.attribute_factory(self.policy, type_)
+ except TypeError:
+ # libqpol unfortunately iterates over attributes and aliases
+ pass
+
+ def users(self):
+ """Generator which yields all users."""
+ for user_ in self.policy.user_iter():
+ yield user.user_factory(self.policy, user_)
+
+ #
+ # Policy rules generators
+ #
+ def mlsrules(self):
+ """Generator which yields all MLS rules."""
+ for rule in self.policy.range_trans_iter():
+ yield mlsrule.mls_rule_factory(self.policy, rule)
+
+ def rbacrules(self):
+ """Generator which yields all RBAC rules."""
+ for rule in chain(self.policy.role_allow_iter(),
+ self.policy.role_trans_iter()):
+ yield rbacrule.rbac_rule_factory(self.policy, rule)
+
+ def terules(self):
+ """Generator which yields all type enforcement rules."""
+ for rule in chain(self.policy.avrule_iter(),
+ self.policy.terule_iter(),
+ self.policy.filename_trans_iter()):
+ yield terule.te_rule_factory(self.policy, rule)
+
+ #
+ # Policy rule type validators
+ #
+ @staticmethod
+ def validate_constraint_ruletype(types):
+ """Validate constraint types."""
+ constraint.validate_ruletype(types)
+
+ @staticmethod
+ def validate_mls_ruletype(types):
+ """Validate MLS rule types."""
+ mlsrule.validate_ruletype(types)
+
+ @staticmethod
+ def validate_rbac_ruletype(types):
+ """Validate RBAC rule types."""
+ rbacrule.validate_ruletype(types)
+
+ @staticmethod
+ def validate_te_ruletype(types):
+ """Validate type enforcement rule types."""
+ terule.validate_ruletype(types)
+
+ #
+ # Constraints generators
+ #
+
+ def constraints(self):
+ """Generator which yields all constraints (regular and MLS)."""
+ for constraint_ in chain(self.policy.constraint_iter(),
+ self.policy.validatetrans_iter()):
+
+ yield constraint.constraint_factory(self.policy, constraint_)
+
+ #
+ # In-policy Labeling statement generators
+ #
+ def fs_uses(self):
+ """Generator which yields all fs_use_* statements."""
+ for fs_use in self.policy.fs_use_iter():
+ yield fscontext.fs_use_factory(self.policy, fs_use)
+
+ def genfscons(self):
+ """Generator which yields all genfscon statements."""
+ for fscon in self.policy.genfscon_iter():
+ yield fscontext.genfscon_factory(self.policy, fscon)
+
+ def initialsids(self):
+ """Generator which yields all initial SID statements."""
+ for sid in self.policy.isid_iter():
+ yield initsid.initialsid_factory(self.policy, sid)
+
+ def netifcons(self):
+ """Generator which yields all netifcon statements."""
+ for ifcon in self.policy.netifcon_iter():
+ yield netcontext.netifcon_factory(self.policy, ifcon)
+
+ def nodecons(self):
+ """Generator which yields all nodecon statements."""
+ for node in self.policy.nodecon_iter():
+ yield netcontext.nodecon_factory(self.policy, node)
+
+ def portcons(self):
+ """Generator which yields all portcon statements."""
+ for port in self.policy.portcon_iter():
+ yield netcontext.portcon_factory(self.policy, port)
diff --git a/lib/python2.7/site-packages/setools/policyrep/_qpol.so b/lib/python2.7/site-packages/setools/policyrep/_qpol.so
new file mode 100755
index 0000000..aaccf28
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/_qpol.so
Binary files differ
diff --git a/lib/python2.7/site-packages/setools/policyrep/boolcond.py b/lib/python2.7/site-packages/setools/policyrep/boolcond.py
new file mode 100644
index 0000000..c3c0608
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/boolcond.py
@@ -0,0 +1,167 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import qpol
+from . import symbol
+
+
+def boolean_factory(policy, name):
+ """Factory function for creating Boolean statement objects."""
+
+ if isinstance(name, Boolean):
+ assert name.policy == policy
+ return name
+ elif isinstance(name, qpol.qpol_bool_t):
+ return Boolean(policy, name)
+
+ try:
+ return Boolean(policy, qpol.qpol_bool_t(policy, str(name)))
+ except ValueError:
+ raise exception.InvalidBoolean("{0} is not a valid Boolean".format(name))
+
+
+def condexpr_factory(policy, name):
+ """Factory function for creating conditional expression objects."""
+
+ if not isinstance(name, qpol.qpol_cond_t):
+ raise TypeError("Conditional expressions cannot be looked up.")
+
+ return ConditionalExpr(policy, name)
+
+
+class Boolean(symbol.PolicySymbol):
+
+ """A Boolean."""
+
+ @property
+ def state(self):
+ """The default state of the Boolean."""
+ return bool(self.qpol_symbol.state(self.policy))
+
+ def statement(self):
+ """The policy statement."""
+ return "bool {0} {1};".format(self, str(self.state).lower())
+
+
+class ConditionalExpr(symbol.PolicySymbol):
+
+ """A conditional policy expression."""
+
+ _cond_expr_val_to_text = {
+ qpol.QPOL_COND_EXPR_NOT: "!",
+ qpol.QPOL_COND_EXPR_OR: "||",
+ qpol.QPOL_COND_EXPR_AND: "&&",
+ qpol.QPOL_COND_EXPR_XOR: "^",
+ qpol.QPOL_COND_EXPR_EQ: "==",
+ qpol.QPOL_COND_EXPR_NEQ: "!="}
+
+ _cond_expr_val_to_precedence = {
+ qpol.QPOL_COND_EXPR_NOT: 5,
+ qpol.QPOL_COND_EXPR_OR: 1,
+ qpol.QPOL_COND_EXPR_AND: 3,
+ qpol.QPOL_COND_EXPR_XOR: 2,
+ qpol.QPOL_COND_EXPR_EQ: 4,
+ qpol.QPOL_COND_EXPR_NEQ: 4}
+
+ def __contains__(self, other):
+ for expr_node in self.qpol_symbol.expr_node_iter(self.policy):
+ expr_node_type = expr_node.expr_type(self.policy)
+
+ if expr_node_type == qpol.QPOL_COND_EXPR_BOOL and other == \
+ boolean_factory(self.policy, expr_node.get_boolean(self.policy)):
+ return True
+
+ return False
+
+ def __str__(self):
+ # qpol representation is in postfix notation. This code
+ # converts it to infix notation. Parentheses are added
+ # to ensure correct expressions, though they may end up
+ # being overused. Set previous operator at start to the
+ # highest precedence (NOT) so if there is a single binary
+ # operator, no parentheses are output
+ stack = []
+ prev_op_precedence = self._cond_expr_val_to_precedence[qpol.QPOL_COND_EXPR_NOT]
+ for expr_node in self.qpol_symbol.expr_node_iter(self.policy):
+ expr_node_type = expr_node.expr_type(self.policy)
+
+ if expr_node_type == qpol.QPOL_COND_EXPR_BOOL:
+ # append the boolean name
+ nodebool = boolean_factory(
+ self.policy, expr_node.get_boolean(self.policy))
+ stack.append(str(nodebool))
+ elif expr_node_type == qpol.QPOL_COND_EXPR_NOT: # unary operator
+ operand = stack.pop()
+ operator = self._cond_expr_val_to_text[expr_node_type]
+ op_precedence = self._cond_expr_val_to_precedence[expr_node_type]
+
+ # NOT is the highest precedence, so only need
+ # parentheses if the operand is a subexpression
+ if isinstance(operand, list):
+ subexpr = [operator, "(", operand, ")"]
+ else:
+ subexpr = [operator, operand]
+
+ stack.append(subexpr)
+ prev_op_precedence = op_precedence
+ else:
+ operand1 = stack.pop()
+ operand2 = stack.pop()
+ operator = self._cond_expr_val_to_text[expr_node_type]
+ op_precedence = self._cond_expr_val_to_precedence[expr_node_type]
+
+ if prev_op_precedence > op_precedence:
+ # if previous operator is of higher precedence
+ # no parentheses are needed.
+ subexpr = [operand1, operator, operand2]
+ else:
+ subexpr = ["(", operand1, operator, operand2, ")"]
+
+ stack.append(subexpr)
+ prev_op_precedence = op_precedence
+
+ return self.__unwind_subexpression(stack)
+
+ def __unwind_subexpression(self, expr):
+ ret = []
+
+ # do a string.join on sublists (subexpressions)
+ for i in expr:
+ if isinstance(i, list):
+ ret.append(self.__unwind_subexpression(i))
+ else:
+ ret.append(i)
+
+ return ' '.join(ret)
+
+ @property
+ def booleans(self):
+ """The set of Booleans in the expression."""
+ bools = set()
+
+ for expr_node in self.qpol_symbol.expr_node_iter(self.policy):
+ expr_node_type = expr_node.expr_type(self.policy)
+
+ if expr_node_type == qpol.QPOL_COND_EXPR_BOOL:
+ bools.add(boolean_factory(self.policy, expr_node.get_boolean(self.policy)))
+
+ return bools
+
+ def statement(self):
+ raise exception.NoStatement
diff --git a/lib/python2.7/site-packages/setools/policyrep/constraint.py b/lib/python2.7/site-packages/setools/policyrep/constraint.py
new file mode 100644
index 0000000..9994c5b
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/constraint.py
@@ -0,0 +1,297 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import qpol
+from . import role
+from . import symbol
+from . import objclass
+from . import typeattr
+from . import user
+
+
+def _is_mls(policy, sym):
+ """Determine if this is a regular or MLS constraint/validatetrans."""
+ # this can only be determined by inspecting the expression.
+ for expr_node in sym.expr_iter(policy):
+ sym_type = expr_node.sym_type(policy)
+ expr_type = expr_node.expr_type(policy)
+
+ if expr_type == qpol.QPOL_CEXPR_TYPE_ATTR and sym_type >= qpol.QPOL_CEXPR_SYM_L1L2:
+ return True
+
+ return False
+
+
+def validate_ruletype(types):
+ """Validate constraint rule types."""
+ for t in types:
+ if t not in ["constrain", "mlsconstrain", "validatetrans", "mlsvalidatetrans"]:
+ raise exception.InvalidConstraintType("{0} is not a valid constraint type.".format(t))
+
+
+def constraint_factory(policy, sym):
+ """Factory function for creating constraint objects."""
+
+ try:
+ if _is_mls(policy, sym):
+ if isinstance(sym, qpol.qpol_constraint_t):
+ return Constraint(policy, sym, "mlsconstrain")
+ else:
+ return Validatetrans(policy, sym, "mlsvalidatetrans")
+ else:
+ if isinstance(sym, qpol.qpol_constraint_t):
+ return Constraint(policy, sym, "constrain")
+ else:
+ return Validatetrans(policy, sym, "validatetrans")
+
+ except AttributeError:
+ raise TypeError("Constraints cannot be looked-up.")
+
+
+class BaseConstraint(symbol.PolicySymbol):
+
+ """Base class for constraint rules."""
+
+ _expr_type_to_text = {
+ qpol.QPOL_CEXPR_TYPE_NOT: "not",
+ qpol.QPOL_CEXPR_TYPE_AND: "and",
+ qpol.QPOL_CEXPR_TYPE_OR: "\n\tor"}
+
+ _expr_op_to_text = {
+ qpol.QPOL_CEXPR_OP_EQ: "==",
+ qpol.QPOL_CEXPR_OP_NEQ: "!=",
+ qpol.QPOL_CEXPR_OP_DOM: "dom",
+ qpol.QPOL_CEXPR_OP_DOMBY: "domby",
+ qpol.QPOL_CEXPR_OP_INCOMP: "incomp"}
+
+ _sym_to_text = {
+ qpol.QPOL_CEXPR_SYM_USER: "u1",
+ qpol.QPOL_CEXPR_SYM_ROLE: "r1",
+ qpol.QPOL_CEXPR_SYM_TYPE: "t1",
+ qpol.QPOL_CEXPR_SYM_USER + qpol.QPOL_CEXPR_SYM_TARGET: "u2",
+ qpol.QPOL_CEXPR_SYM_ROLE + qpol.QPOL_CEXPR_SYM_TARGET: "r2",
+ qpol.QPOL_CEXPR_SYM_TYPE + qpol.QPOL_CEXPR_SYM_TARGET: "t2",
+ qpol.QPOL_CEXPR_SYM_USER + qpol.QPOL_CEXPR_SYM_XTARGET: "u3",
+ qpol.QPOL_CEXPR_SYM_ROLE + qpol.QPOL_CEXPR_SYM_XTARGET: "r3",
+ qpol.QPOL_CEXPR_SYM_TYPE + qpol.QPOL_CEXPR_SYM_XTARGET: "t3",
+ qpol.QPOL_CEXPR_SYM_L1L2: "l1",
+ qpol.QPOL_CEXPR_SYM_L1H2: "l1",
+ qpol.QPOL_CEXPR_SYM_H1L2: "h1",
+ qpol.QPOL_CEXPR_SYM_H1H2: "h1",
+ qpol.QPOL_CEXPR_SYM_L1H1: "l1",
+ qpol.QPOL_CEXPR_SYM_L2H2: "l2",
+ qpol.QPOL_CEXPR_SYM_L1L2 + qpol.QPOL_CEXPR_SYM_TARGET: "l2",
+ qpol.QPOL_CEXPR_SYM_L1H2 + qpol.QPOL_CEXPR_SYM_TARGET: "h2",
+ qpol.QPOL_CEXPR_SYM_H1L2 + qpol.QPOL_CEXPR_SYM_TARGET: "l2",
+ qpol.QPOL_CEXPR_SYM_H1H2 + qpol.QPOL_CEXPR_SYM_TARGET: "h2",
+ qpol.QPOL_CEXPR_SYM_L1H1 + qpol.QPOL_CEXPR_SYM_TARGET: "h1",
+ qpol.QPOL_CEXPR_SYM_L2H2 + qpol.QPOL_CEXPR_SYM_TARGET: "h2"}
+
+ # Boolean operators
+ _expr_type_to_precedence = {
+ qpol.QPOL_CEXPR_TYPE_NOT: 3,
+ qpol.QPOL_CEXPR_TYPE_AND: 2,
+ qpol.QPOL_CEXPR_TYPE_OR: 1}
+
+ # Logical operators have the same precedence
+ _logical_op_precedence = 4
+
+ def __init__(self, policy, qpol_symbol, ruletype):
+ symbol.PolicySymbol.__init__(self, policy, qpol_symbol)
+ self.ruletype = ruletype
+
+ def __str__(self):
+ raise NotImplementedError
+
+ def _build_expression(self):
+ # qpol representation is in postfix notation. This code
+ # converts it to infix notation. Parentheses are added
+ # to ensure correct expressions, though they may end up
+ # being overused. Set previous operator at start to the
+ # highest precedence (op) so if there is a single binary
+ # operator, no parentheses are output
+
+ stack = []
+ prev_op_precedence = self._logical_op_precedence
+ for expr_node in self.qpol_symbol.expr_iter(self.policy):
+ op = expr_node.op(self.policy)
+ sym_type = expr_node.sym_type(self.policy)
+ expr_type = expr_node.expr_type(self.policy)
+
+ if expr_type == qpol.QPOL_CEXPR_TYPE_ATTR:
+ # logical operator with symbol (e.g. u1 == u2)
+ operand1 = self._sym_to_text[sym_type]
+ operand2 = self._sym_to_text[sym_type + qpol.QPOL_CEXPR_SYM_TARGET]
+ operator = self._expr_op_to_text[op]
+
+ stack.append([operand1, operator, operand2])
+
+ prev_op_precedence = self._logical_op_precedence
+ elif expr_type == qpol.QPOL_CEXPR_TYPE_NAMES:
+ # logical operator with type or attribute list (e.g. t1 == { spam_t eggs_t })
+ operand1 = self._sym_to_text[sym_type]
+ operator = self._expr_op_to_text[op]
+
+ names = list(expr_node.names_iter(self.policy))
+
+ if not names:
+ operand2 = "<empty set>"
+ elif len(names) == 1:
+ operand2 = names[0]
+ else:
+ operand2 = "{{ {0} }}".format(' '.join(names))
+
+ stack.append([operand1, operator, operand2])
+
+ prev_op_precedence = self._logical_op_precedence
+ elif expr_type == qpol.QPOL_CEXPR_TYPE_NOT:
+ # unary operator (not)
+ operand = stack.pop()
+ operator = self._expr_type_to_text[expr_type]
+
+ stack.append([operator, "(", operand, ")"])
+
+ prev_op_precedence = self._expr_type_to_precedence[expr_type]
+ else:
+ # binary operator (and/or)
+ operand1 = stack.pop()
+ operand2 = stack.pop()
+ operator = self._expr_type_to_text[expr_type]
+ op_precedence = self._expr_type_to_precedence[expr_type]
+
+ # if previous operator is of higher precedence
+ # no parentheses are needed.
+ if op_precedence < prev_op_precedence:
+ stack.append([operand1, operator, operand2])
+ else:
+ stack.append(["(", operand1, operator, operand2, ")"])
+
+ prev_op_precedence = op_precedence
+
+ return self.__unwind_subexpression(stack)
+
+ def _get_symbols(self, syms, factory):
+ """
+ Internal generator for getting users/roles/types in a constraint
+ expression. Symbols will be yielded multiple times if they appear
+ in the expression multiple times.
+
+ Parameters:
+ syms List of qpol symbol types.
+ factory The factory function related to these symbols.
+ """
+ for expr_node in self.qpol_symbol.expr_iter(self.policy):
+ sym_type = expr_node.sym_type(self.policy)
+ expr_type = expr_node.expr_type(self.policy)
+
+ if expr_type == qpol.QPOL_CEXPR_TYPE_NAMES and sym_type in syms:
+ for s in expr_node.names_iter(self.policy):
+ yield factory(self.policy, s)
+
+ def __unwind_subexpression(self, expr):
+ ret = []
+
+ # do a string.join on sublists (subexpressions)
+ for i in expr:
+ if isinstance(i, list):
+ ret.append(self.__unwind_subexpression(i))
+ else:
+ ret.append(i)
+
+ return ' '.join(ret)
+
+ # There is no levels function as specific
+ # levels cannot be used in expressions, only
+ # the l1, h1, etc. symbols
+
+ @property
+ def roles(self):
+ """The roles used in the expression."""
+ role_syms = [qpol.QPOL_CEXPR_SYM_ROLE,
+ qpol.QPOL_CEXPR_SYM_ROLE + qpol.QPOL_CEXPR_SYM_TARGET,
+ qpol.QPOL_CEXPR_SYM_ROLE + qpol.QPOL_CEXPR_SYM_XTARGET]
+
+ return set(self._get_symbols(role_syms, role.role_factory))
+
+ @property
+ def perms(self):
+ raise NotImplementedError
+
+ def statement(self):
+ return str(self)
+
+ @property
+ def tclass(self):
+ """Object class for this constraint."""
+ return objclass.class_factory(self.policy, self.qpol_symbol.object_class(self.policy))
+
+ @property
+ def types(self):
+ """The types and type attributes used in the expression."""
+ type_syms = [qpol.QPOL_CEXPR_SYM_TYPE,
+ qpol.QPOL_CEXPR_SYM_TYPE + qpol.QPOL_CEXPR_SYM_TARGET,
+ qpol.QPOL_CEXPR_SYM_TYPE + qpol.QPOL_CEXPR_SYM_XTARGET]
+
+ return set(self._get_symbols(type_syms, typeattr.type_or_attr_factory))
+
+ @property
+ def users(self):
+ """The users used in the expression."""
+ user_syms = [qpol.QPOL_CEXPR_SYM_USER,
+ qpol.QPOL_CEXPR_SYM_USER + qpol.QPOL_CEXPR_SYM_TARGET,
+ qpol.QPOL_CEXPR_SYM_USER + qpol.QPOL_CEXPR_SYM_XTARGET]
+
+ return set(self._get_symbols(user_syms, user.user_factory))
+
+
+class Constraint(BaseConstraint):
+
+ """A constraint rule (constrain/mlsconstrain)."""
+
+ def __str__(self):
+ rule_string = "{0.ruletype} {0.tclass} ".format(self)
+
+ perms = self.perms
+ if len(perms) > 1:
+ rule_string += "{{ {0} }} (\n".format(' '.join(perms))
+ else:
+ # convert to list since sets cannot be indexed
+ rule_string += "{0} (\n".format(list(perms)[0])
+
+ rule_string += "\t{0}\n);".format(self._build_expression())
+
+ return rule_string
+
+ @property
+ def perms(self):
+ """The constraint's permission set."""
+ return set(self.qpol_symbol.perm_iter(self.policy))
+
+
+class Validatetrans(BaseConstraint):
+
+ """A validatetrans rule (validatetrans/mlsvalidatetrans)."""
+
+ def __str__(self):
+ return "{0.ruletype} {0.tclass}\n\t{1}\n);".format(self, self._build_expression())
+
+ @property
+ def perms(self):
+ raise exception.ConstraintUseError("{0} rules do not have permissions.".
+ format(self.ruletype))
diff --git a/lib/python2.7/site-packages/setools/policyrep/context.py b/lib/python2.7/site-packages/setools/policyrep/context.py
new file mode 100644
index 0000000..f2f3fc7
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/context.py
@@ -0,0 +1,68 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import qpol
+from . import symbol
+from . import user
+from . import role
+from . import typeattr
+from . import mls
+
+
+def context_factory(policy, name):
+ """Factory function for creating context objects."""
+
+ if not isinstance(name, qpol.qpol_context_t):
+ raise TypeError("Contexts cannot be looked-up.")
+
+ return Context(policy, name)
+
+
+class Context(symbol.PolicySymbol):
+
+ """A SELinux security context/security attribute."""
+
+ def __str__(self):
+ try:
+ return "{0.user}:{0.role}:{0.type_}:{0.range_}".format(self)
+ except exception.MLSDisabled:
+ return "{0.user}:{0.role}:{0.type_}".format(self)
+
+ @property
+ def user(self):
+ """The user portion of the context."""
+ return user.user_factory(self.policy, self.qpol_symbol.user(self.policy))
+
+ @property
+ def role(self):
+ """The role portion of the context."""
+ return role.role_factory(self.policy, self.qpol_symbol.role(self.policy))
+
+ @property
+ def type_(self):
+ """The type portion of the context."""
+ return typeattr.type_factory(self.policy, self.qpol_symbol.type_(self.policy))
+
+ @property
+ def range_(self):
+ """The MLS range of the context."""
+ return mls.range_factory(self.policy, self.qpol_symbol.range(self.policy))
+
+ def statement(self):
+ raise exception.NoStatement
diff --git a/lib/python2.7/site-packages/setools/policyrep/default.py b/lib/python2.7/site-packages/setools/policyrep/default.py
new file mode 100644
index 0000000..175b709
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/default.py
@@ -0,0 +1,128 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import symbol
+from . import objclass
+from . import qpol
+
+
+def default_factory(policy, sym):
+ """Factory generator for creating default_* statement objects."""
+
+ # The low level policy groups default_* settings by object class.
+ # Since each class can have up to four default_* statements,
+ # this factory function is a generator which yields up to
+ # four Default objects.
+
+ if not isinstance(sym, qpol.qpol_default_object_t):
+ raise NotImplementedError
+
+ # qpol will essentially iterate over all classes
+ # and emit None for classes that don't set a default
+ if not sym.object_class(policy):
+ raise exception.NoDefaults
+
+ if sym.user_default(policy):
+ yield UserDefault(policy, sym)
+
+ if sym.role_default(policy):
+ yield RoleDefault(policy, sym)
+
+ if sym.type_default(policy):
+ yield TypeDefault(policy, sym)
+
+ if sym.range_default(policy):
+ yield RangeDefault(policy, sym)
+
+
+class Default(symbol.PolicySymbol):
+
+ """Base class for default_* statements."""
+
+ def __str__(self):
+ raise NotImplementedError
+
+ @property
+ def object_class(self):
+ """The object class."""
+ return objclass.class_factory(self.policy, self.qpol_symbol.object_class(self.policy))
+
+ @property
+ def default(self):
+ raise NotImplementedError
+
+ def statement(self):
+ return str(self)
+
+
+class UserDefault(Default):
+
+ """A default_user statement."""
+
+ def __str__(self):
+ return "default_user {0.object_class} {0.default};".format(self)
+
+ @property
+ def default(self):
+ """The default user location (source/target)."""
+ return self.qpol_symbol.user_default(self.policy)
+
+
+class RoleDefault(Default):
+
+ """A default_role statement."""
+
+ def __str__(self):
+ return "default_role {0.object_class} {0.default};".format(self)
+
+ @property
+ def default(self):
+ """The default role location (source/target)."""
+ return self.qpol_symbol.role_default(self.policy)
+
+
+class TypeDefault(Default):
+
+ """A default_type statement."""
+
+ def __str__(self):
+ return "default_type {0.object_class} {0.default};".format(self)
+
+ @property
+ def default(self):
+ """The default type location (source/target)."""
+ return self.qpol_symbol.type_default(self.policy)
+
+
+class RangeDefault(Default):
+
+ """A default_range statement."""
+
+ def __str__(self):
+ return "default_range {0.object_class} {0.default} {0.default_range};".format(self)
+
+ @property
+ def default(self):
+ """The default range location (source/target)."""
+ return self.qpol_symbol.range_default(self.policy).split()[0]
+
+ @property
+ def default_range(self):
+ """The default range setting (low/high/low_high)."""
+ return self.qpol_symbol.range_default(self.policy).split()[1]
diff --git a/lib/python2.7/site-packages/setools/policyrep/exception.py b/lib/python2.7/site-packages/setools/policyrep/exception.py
new file mode 100644
index 0000000..ce367c0
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/exception.py
@@ -0,0 +1,248 @@
+# Copyright 2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from ..exception import SEToolsException
+
+#
+# Policyrep base exception
+#
+
+
+class PolicyrepException(SEToolsException):
+
+ """Base class for all policyrep exceptions."""
+ pass
+
+
+#
+# General Policyrep exceptions
+#
+
+
+class InvalidPolicy(SyntaxError, PolicyrepException):
+
+ """Exception for invalid policy."""
+ pass
+
+
+class MLSDisabled(PolicyrepException):
+
+ """
+ Exception when MLS is disabled.
+ """
+ pass
+
+
+#
+# Invalid component exceptions
+#
+class InvalidSymbol(ValueError, PolicyrepException):
+
+ """
+ Base class for invalid symbols. Typically this is attempting to
+ look up an object in the policy, but it does not exist.
+ """
+ pass
+
+
+class InvalidBoolean(InvalidSymbol):
+
+ """Exception for invalid Booleans."""
+ pass
+
+
+class InvalidCategory(InvalidSymbol):
+
+ """Exception for invalid MLS categories."""
+ pass
+
+
+class InvalidClass(InvalidSymbol):
+
+ """Exception for invalid object classes."""
+ pass
+
+
+class InvalidCommon(InvalidSymbol):
+
+ """Exception for invalid common permission sets."""
+ pass
+
+
+class InvalidInitialSid(InvalidSymbol):
+
+ """Exception for invalid initial sids."""
+ pass
+
+
+class InvalidLevel(InvalidSymbol):
+
+ """
+ Exception for an invalid level.
+ """
+ pass
+
+
+class InvalidLevelDecl(InvalidSymbol):
+
+ """
+ Exception for an invalid level declaration.
+ """
+ pass
+
+
+class InvalidRange(InvalidSymbol):
+
+ """
+ Exception for an invalid range.
+ """
+ pass
+
+
+class InvalidRole(InvalidSymbol):
+
+ """Exception for invalid roles."""
+ pass
+
+
+class InvalidSensitivity(InvalidSymbol):
+
+ """
+ Exception for an invalid sensitivity.
+ """
+ pass
+
+
+class InvalidType(InvalidSymbol):
+
+ """Exception for invalid types and attributes."""
+ pass
+
+
+class InvalidUser(InvalidSymbol):
+
+ """Exception for invalid users."""
+ pass
+
+#
+# Rule type exceptions
+#
+
+
+class InvalidRuleType(InvalidSymbol):
+
+ """Exception for invalid rule types."""
+ pass
+
+
+class InvalidConstraintType(InvalidSymbol):
+
+ """Exception for invalid constraint types."""
+ # This is not a rule but is similar.
+ pass
+
+
+class InvalidMLSRuleType(InvalidRuleType):
+
+ """Exception for invalid MLS rule types."""
+ pass
+
+
+class InvalidRBACRuleType(InvalidRuleType):
+
+ """Exception for invalid RBAC rule types."""
+ pass
+
+
+class InvalidTERuleType(InvalidRuleType):
+
+ """Exception for invalid TE rule types."""
+ pass
+
+
+#
+# Object use errors
+#
+class SymbolUseError(PolicyrepException):
+
+ """
+ Base class for incorrectly using an object. Typically this is
+ for classes with strong similarities, but with slight variances in
+ functionality, e.g. allow vs type_transition rules.
+ """
+ pass
+
+
+class RuleUseError(SymbolUseError):
+
+ """
+ Base class for incorrect parameters for a rule. For
+ example, trying to get the permissions of a rule that has no
+ permissions.
+ """
+ pass
+
+
+class ConstraintUseError(SymbolUseError):
+
+ """Exception when getting permissions from a validatetrans."""
+ pass
+
+
+class NoStatement(SymbolUseError):
+
+ """
+ Exception for objects that have no inherent statement, such
+ as conditional expressions and MLS ranges.
+ """
+ pass
+
+
+#
+# Other exceptions
+#
+class NoCommon(PolicyrepException):
+
+ """
+ Exception when a class does not inherit a common permission set.
+ """
+ pass
+
+
+class NoDefaults(InvalidSymbol):
+
+ """Exception for classes that have no default_* statements."""
+ pass
+
+
+class RuleNotConditional(PolicyrepException):
+
+ """
+ Exception when getting the conditional expression for rules
+ that are unconditional (not conditional).
+ """
+ pass
+
+
+class TERuleNoFilename(PolicyrepException):
+
+ """
+ Exception when getting the file name of a
+ type_transition rule that has no file name.
+ """
+ pass
diff --git a/lib/python2.7/site-packages/setools/policyrep/fscontext.py b/lib/python2.7/site-packages/setools/policyrep/fscontext.py
new file mode 100644
index 0000000..a17b0bc
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/fscontext.py
@@ -0,0 +1,123 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import stat
+
+from . import qpol
+from . import symbol
+from . import context
+
+
+def fs_use_factory(policy, name):
+ """Factory function for creating fs_use_* objects."""
+
+ if not isinstance(name, qpol.qpol_fs_use_t):
+ raise TypeError("fs_use_* cannot be looked-up.")
+
+ return FSUse(policy, name)
+
+
+def genfscon_factory(policy, name):
+ """Factory function for creating genfscon objects."""
+
+ if not isinstance(name, qpol.qpol_genfscon_t):
+ raise TypeError("Genfscons cannot be looked-up.")
+
+ return Genfscon(policy, name)
+
+
+class FSContext(symbol.PolicySymbol):
+
+ """Base class for in-policy labeling rules."""
+
+ def __str__(self):
+ raise NotImplementedError
+
+ @property
+ def fs(self):
+ """The filesystem type for this statement."""
+ return self.qpol_symbol.name(self.policy)
+
+ @property
+ def context(self):
+ """The context for this statement."""
+ return context.context_factory(self.policy, self.qpol_symbol.context(self.policy))
+
+ def statement(self):
+ return str(self)
+
+
+class Genfscon(FSContext):
+
+ """A genfscon statement."""
+
+ _filetype_to_text = {
+ 0: "",
+ stat.S_IFBLK: "-b",
+ stat.S_IFCHR: "-c",
+ stat.S_IFDIR: "-d",
+ stat.S_IFIFO: "-p",
+ stat.S_IFREG: "--",
+ stat.S_IFLNK: "-l",
+ stat.S_IFSOCK: "-s"}
+
+ def __str__(self):
+ return "genfscon {0.fs} {0.path} {1} {0.context}".format(
+ self, self._filetype_to_text[self.filetype])
+
+ def __eq__(self, other):
+ # Libqpol allocates new C objects in the
+ # genfscons iterator, so pointer comparison
+ # in the PolicySymbol object doesn't work.
+ try:
+ return (self.fs == other.fs and
+ self.path == other.path and
+ self.filetype == other.filetype and
+ self.context == other.context)
+ except AttributeError:
+ return str(self) == str(other)
+
+ @property
+ def filetype(self):
+ """The file type (e.g. stat.S_IFBLK) for this genfscon statement."""
+ return self.qpol_symbol.object_class(self.policy)
+
+ @property
+ def path(self):
+ """The path for this genfscon statement."""
+ return self.qpol_symbol.path(self.policy)
+
+
+class FSUse(FSContext):
+
+ """A fs_use_* statement."""
+
+ # there are more rule types, but modern SELinux
+ # only supports these three.
+ _ruletype_to_text = {
+ qpol.QPOL_FS_USE_XATTR: 'fs_use_xattr',
+ qpol.QPOL_FS_USE_TRANS: 'fs_use_trans',
+ qpol.QPOL_FS_USE_TASK: 'fs_use_task'}
+
+ def __str__(self):
+ return "{0.ruletype} {0.fs} {0.context};".format(self)
+
+ @property
+ def ruletype(self):
+ """The rule type for this fs_use_* statement."""
+ return self._ruletype_to_text[self.qpol_symbol.behavior(self.policy)]
diff --git a/lib/python2.7/site-packages/setools/policyrep/initsid.py b/lib/python2.7/site-packages/setools/policyrep/initsid.py
new file mode 100644
index 0000000..0197c74
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/initsid.py
@@ -0,0 +1,50 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import qpol
+from . import symbol
+from . import context
+
+
+def initialsid_factory(policy, name):
+ """Factory function for creating initial sid objects."""
+
+ if isinstance(name, InitialSID):
+ assert name.policy == policy
+ return name
+ elif isinstance(name, qpol.qpol_isid_t):
+ return InitialSID(policy, name)
+
+ try:
+ return InitialSID(policy, qpol.qpol_isid_t(policy, name))
+ except ValueError:
+ raise exception.InvalidInitialSid("{0} is not a valid initial sid".format(name))
+
+
+class InitialSID(symbol.PolicySymbol):
+
+ """An initial SID statement."""
+
+ @property
+ def context(self):
+ """The context for this initial SID."""
+ return context.context_factory(self.policy, self.qpol_symbol.context(self.policy))
+
+ def statement(self):
+ return "sid {0} {0.context}".format(self)
diff --git a/lib/python2.7/site-packages/setools/policyrep/mls.py b/lib/python2.7/site-packages/setools/policyrep/mls.py
new file mode 100644
index 0000000..2541704
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/mls.py
@@ -0,0 +1,463 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+# pylint: disable=protected-access
+import itertools
+
+from . import exception
+from . import qpol
+from . import symbol
+
+# qpol does not expose an equivalent of a sensitivity declaration.
+# qpol_level_t is equivalent to the level declaration:
+# level s0:c0.c1023;
+
+# qpol_mls_level_t represents a level as used in contexts,
+# such as range_transitions or labeling statements such as
+# portcon and nodecon.
+
+# Here qpol_level_t is also used for MLSSensitivity
+# since it has the sensitivity name, dominance, and there
+# is a 1:1 correspondence between the sensitivity declarations
+# and level declarations.
+
+# Hashing has to be handled below because the qpol references,
+# normally used for a hash key, are not the same for multiple
+# instances of the same object (except for level decl).
+
+
+def enabled(policy):
+ """Determine if MLS is enabled."""
+ return policy.capability(qpol.QPOL_CAP_MLS)
+
+
+def category_factory(policy, sym):
+ """Factory function for creating MLS category objects."""
+
+ if not enabled(policy):
+ raise exception.MLSDisabled
+
+ if isinstance(sym, Category):
+ assert sym.policy == policy
+ return sym
+ elif isinstance(sym, qpol.qpol_cat_t):
+ if sym.isalias(policy):
+ raise TypeError("{0} is an alias".format(sym.name(policy)))
+
+ return Category(policy, sym)
+
+ try:
+ return Category(policy, qpol.qpol_cat_t(policy, str(sym)))
+ except ValueError:
+ raise exception.InvalidCategory("{0} is not a valid category".format(sym))
+
+
+def sensitivity_factory(policy, sym):
+ """Factory function for creating MLS sensitivity objects."""
+
+ if not enabled(policy):
+ raise exception.MLSDisabled
+
+ if isinstance(sym, Sensitivity):
+ assert sym.policy == policy
+ return sym
+ elif isinstance(sym, qpol.qpol_level_t):
+ if sym.isalias(policy):
+ raise TypeError("{0} is an alias".format(sym.name(policy)))
+
+ return Sensitivity(policy, sym)
+
+ try:
+ return Sensitivity(policy, qpol.qpol_level_t(policy, str(sym)))
+ except ValueError:
+ raise exception.InvalidSensitivity("{0} is not a valid sensitivity".format(sym))
+
+
+def level_factory(policy, sym):
+ """
+ Factory function for creating MLS level objects (e.g. levels used
+ in contexts of labeling statements)
+ """
+
+ if not enabled(policy):
+ raise exception.MLSDisabled
+
+ if isinstance(sym, Level):
+ assert sym.policy == policy
+ return sym
+ elif isinstance(sym, qpol.qpol_mls_level_t):
+ return Level(policy, sym)
+
+ sens_split = str(sym).split(":")
+
+ sens = sens_split[0]
+ try:
+ semantic_level = qpol.qpol_semantic_level_t(policy, sens)
+ except ValueError:
+ raise exception.InvalidLevel("{0} is invalid ({1} is not a valid sensitivity)".
+ format(sym, sens))
+
+ try:
+ cats = sens_split[1]
+ except IndexError:
+ pass
+ else:
+ for group in cats.split(","):
+ catrange = group.split(".")
+
+ if len(catrange) == 2:
+ try:
+ semantic_level.add_cats(policy, catrange[0], catrange[1])
+ except ValueError:
+ raise exception.InvalidLevel(
+ "{0} is invalid ({1} is not a valid category range)".format(sym, group))
+ elif len(catrange) == 1:
+ try:
+ semantic_level.add_cats(policy, catrange[0], catrange[0])
+ except ValueError:
+ raise exception.InvalidLevel("{0} is invalid ({1} is not a valid category)".
+ format(sym, group))
+ else:
+ raise exception.InvalidLevel("{0} is invalid (level parsing error)".format(sym))
+
+ # convert to level object
+ try:
+ policy_level = qpol.qpol_mls_level_t(policy, semantic_level)
+ except ValueError:
+ raise exception.InvalidLevel(
+ "{0} is invalid (one or more categories are not associated with the sensitivity)".
+ format(sym))
+
+ return Level(policy, policy_level)
+
+
+def level_decl_factory(policy, sym):
+ """
+ Factory function for creating MLS level declaration objects.
+ (level statements) Lookups are only by sensitivity name.
+ """
+
+ if not enabled(policy):
+ raise exception.MLSDisabled
+
+ if isinstance(sym, LevelDecl):
+ assert sym.policy == policy
+ return sym
+ elif isinstance(sym, qpol.qpol_level_t):
+ if sym.isalias(policy):
+ raise TypeError("{0} is an alias".format(sym.name(policy)))
+
+ return LevelDecl(policy, sym)
+
+ try:
+ return LevelDecl(policy, qpol.qpol_level_t(policy, str(sym)))
+ except ValueError:
+ raise exception.InvalidLevelDecl("{0} is not a valid sensitivity".format(sym))
+
+
+def range_factory(policy, sym):
+ """Factory function for creating MLS range objects."""
+
+ if not enabled(policy):
+ raise exception.MLSDisabled
+
+ if isinstance(sym, Range):
+ assert sym.policy == policy
+ return sym
+ elif isinstance(sym, qpol.qpol_mls_range_t):
+ return Range(policy, sym)
+
+ # build range:
+ levels = str(sym).split("-")
+
+ # strip() levels to handle ranges with spaces in them,
+ # e.g. s0:c1 - s0:c0.c255
+ try:
+ low = level_factory(policy, levels[0].strip())
+ except exception.InvalidLevel as ex:
+ raise exception.InvalidRange("{0} is not a valid range ({1}).".format(sym, ex))
+
+ try:
+ high = level_factory(policy, levels[1].strip())
+ except exception.InvalidLevel as ex:
+ raise exception.InvalidRange("{0} is not a valid range ({1}).".format(sym, ex))
+ except IndexError:
+ high = low
+
+ # convert to range object
+ try:
+ policy_range = qpol.qpol_mls_range_t(policy, low.qpol_symbol, high.qpol_symbol)
+ except ValueError:
+ raise exception.InvalidRange("{0} is not a valid range ({1} is not dominated by {2})".
+ format(sym, low, high))
+
+ return Range(policy, policy_range)
+
+
+class BaseMLSComponent(symbol.PolicySymbol):
+
+ """Base class for sensitivities and categories."""
+
+ @property
+ def _value(self):
+ """
+ The value of the component.
+
+ This is a low-level policy detail exposed for internal use only.
+ """
+ return self.qpol_symbol.value(self.policy)
+
+ def aliases(self):
+ """Generator that yields all aliases for this category."""
+
+ for alias in self.qpol_symbol.alias_iter(self.policy):
+ yield alias
+
+
+class Category(BaseMLSComponent):
+
+ """An MLS category."""
+
+ def statement(self):
+ aliases = list(self.aliases())
+ stmt = "category {0}".format(self)
+ if aliases:
+ if len(aliases) > 1:
+ stmt += " alias {{ {0} }}".format(' '.join(aliases))
+ else:
+ stmt += " alias {0}".format(aliases[0])
+ stmt += ";"
+ return stmt
+
+
+class Sensitivity(BaseMLSComponent):
+
+ """An MLS sensitivity"""
+
+ def __eq__(self, other):
+ try:
+ return self._value == other._value
+ except AttributeError:
+ return str(self) == str(other)
+
+ def __ge__(self, other):
+ return self._value >= other._value
+
+ def __gt__(self, other):
+ return self._value > other._value
+
+ def __le__(self, other):
+ return self._value <= other._value
+
+ def __lt__(self, other):
+ return self._value < other._value
+
+ def statement(self):
+ aliases = list(self.aliases())
+ stmt = "sensitivity {0}".format(self)
+ if aliases:
+ if len(aliases) > 1:
+ stmt += " alias {{ {0} }}".format(' '.join(aliases))
+ else:
+ stmt += " alias {0}".format(aliases[0])
+ stmt += ";"
+ return stmt
+
+
+class BaseMLSLevel(symbol.PolicySymbol):
+
+ """Base class for MLS levels."""
+
+ def __str__(self):
+ lvl = str(self.sensitivity)
+
+ # sort by policy declaration order
+ cats = sorted(self.categories(), key=lambda k: k._value)
+
+ if cats:
+ # generate short category notation
+ shortlist = []
+ for _, i in itertools.groupby(cats, key=lambda k,
+ c=itertools.count(): k._value - next(c)):
+ group = list(i)
+ if len(group) > 1:
+ shortlist.append("{0}.{1}".format(group[0], group[-1]))
+ else:
+ shortlist.append(str(group[0]))
+
+ lvl += ":" + ','.join(shortlist)
+
+ return lvl
+
+ @property
+ def sensitivity(self):
+ raise NotImplementedError
+
+ def categories(self):
+ """
+ Generator that yields all individual categories for this level.
+ All categories are yielded, not a compact notation such as
+ c0.c255
+ """
+
+ for cat in self.qpol_symbol.cat_iter(self.policy):
+ yield category_factory(self.policy, cat)
+
+
+class LevelDecl(BaseMLSLevel):
+
+ """
+ The declaration statement for MLS levels, e.g:
+
+ level s7:c0.c1023;
+ """
+ # below comparisons are only based on sensitivity
+ # dominance since, in this context, the allowable
+ # category set is being defined for the level.
+ # object type is asserted here because this cannot
+ # be compared to a Level instance.
+
+ def __eq__(self, other):
+ assert not isinstance(other, Level), "Levels cannot be compared to level declarations"
+
+ try:
+ return self.sensitivity == other.sensitivity
+ except AttributeError:
+ return str(self) == str(other)
+
+ def __ge__(self, other):
+ assert not isinstance(other, Level), "Levels cannot be compared to level declarations"
+ return self.sensitivity >= other.sensitivity
+
+ def __gt__(self, other):
+ assert not isinstance(other, Level), "Levels cannot be compared to level declarations"
+ return self.sensitivity > other.sensitivity
+
+ def __le__(self, other):
+ assert not isinstance(other, Level), "Levels cannot be compared to level declarations"
+ return self.sensitivity <= other.sensitivity
+
+ def __lt__(self, other):
+ assert not isinstance(other, Level), "Levels cannot be compared to level declarations"
+ return self.sensitivity < other.sensitivity
+
+ @property
+ def sensitivity(self):
+ """The sensitivity of the level."""
+ # since the qpol symbol for levels is also used for
+ # MLSSensitivity objects, use self's qpol symbol
+ return sensitivity_factory(self.policy, self.qpol_symbol)
+
+ def statement(self):
+ return "level {0};".format(self)
+
+
+class Level(BaseMLSLevel):
+
+ """An MLS level used in contexts."""
+
+ def __hash__(self):
+ return hash(str(self))
+
+ def __eq__(self, other):
+ try:
+ othercats = set(other.categories())
+ except AttributeError:
+ return str(self) == str(other)
+ else:
+ selfcats = set(self.categories())
+ return self.sensitivity == other.sensitivity and selfcats == othercats
+
+ def __ge__(self, other):
+ """Dom operator."""
+ selfcats = set(self.categories())
+ othercats = set(other.categories())
+ return self.sensitivity >= other.sensitivity and selfcats >= othercats
+
+ def __gt__(self, other):
+ selfcats = set(self.categories())
+ othercats = set(other.categories())
+ return ((self.sensitivity > other.sensitivity and selfcats >= othercats) or
+ (self.sensitivity >= other.sensitivity and selfcats > othercats))
+
+ def __le__(self, other):
+ """Domby operator."""
+ selfcats = set(self.categories())
+ othercats = set(other.categories())
+ return self.sensitivity <= other.sensitivity and selfcats <= othercats
+
+ def __lt__(self, other):
+ selfcats = set(self.categories())
+ othercats = set(other.categories())
+ return ((self.sensitivity < other.sensitivity and selfcats <= othercats) or
+ (self.sensitivity <= other.sensitivity and selfcats < othercats))
+
+ def __xor__(self, other):
+ """Incomp operator."""
+ return not (self >= other or self <= other)
+
+ @property
+ def sensitivity(self):
+ """The sensitivity of the level."""
+ return sensitivity_factory(self.policy, self.qpol_symbol.sens_name(self.policy))
+
+ def statement(self):
+ raise exception.NoStatement
+
+
+class Range(symbol.PolicySymbol):
+
+ """An MLS range"""
+
+ def __str__(self):
+ high = self.high
+ low = self.low
+ if high == low:
+ return str(low)
+
+ return "{0} - {1}".format(low, high)
+
+ def __hash__(self):
+ return hash(str(self))
+
+ def __eq__(self, other):
+ try:
+ return self.low == other.low and self.high == other.high
+ except AttributeError:
+ # remove all spaces in the string representations
+ # to handle cases where the other object does not
+ # have spaces around the '-'
+ other_str = str(other).replace(" ", "")
+ self_str = str(self).replace(" ", "")
+ return self_str == other_str
+
+ def __contains__(self, other):
+ return self.low <= other <= self.high
+
+ @property
+ def high(self):
+ """The high end/clearance level of this range."""
+ return level_factory(self.policy, self.qpol_symbol.high_level(self.policy))
+
+ @property
+ def low(self):
+ """The low end/current level of this range."""
+ return level_factory(self.policy, self.qpol_symbol.low_level(self.policy))
+
+ def statement(self):
+ raise exception.NoStatement
diff --git a/lib/python2.7/site-packages/setools/policyrep/mlsrule.py b/lib/python2.7/site-packages/setools/policyrep/mlsrule.py
new file mode 100644
index 0000000..5c91c59
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/mlsrule.py
@@ -0,0 +1,62 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import qpol
+from . import rule
+from . import typeattr
+from . import mls
+
+
+def mls_rule_factory(policy, symbol):
+ """Factory function for creating MLS rule objects."""
+ if not isinstance(symbol, qpol.qpol_range_trans_t):
+ raise TypeError("MLS rules cannot be looked-up.")
+
+ return MLSRule(policy, symbol)
+
+
+def validate_ruletype(types):
+ """Validate MLS rule types."""
+ for t in types:
+ if t not in ["range_transition"]:
+ raise exception.InvalidMLSRuleType("{0} is not a valid MLS rule type.".format(t))
+
+
+class MLSRule(rule.PolicyRule):
+
+ """An MLS rule."""
+
+ def __str__(self):
+ # TODO: If we ever get more MLS rules, fix this format.
+ return "range_transition {0.source} {0.target}:{0.tclass} {0.default};".format(self)
+
+ @property
+ def source(self):
+ """The rule's source type/attribute."""
+ return typeattr.type_or_attr_factory(self.policy, self.qpol_symbol.source_type(self.policy))
+
+ @property
+ def target(self):
+ """The rule's target type/attribute."""
+ return typeattr.type_or_attr_factory(self.policy, self.qpol_symbol.target_type(self.policy))
+
+ @property
+ def default(self):
+ """The rule's default range."""
+ return mls.range_factory(self.policy, self.qpol_symbol.range(self.policy))
diff --git a/lib/python2.7/site-packages/setools/policyrep/netcontext.py b/lib/python2.7/site-packages/setools/policyrep/netcontext.py
new file mode 100644
index 0000000..5aeed5c
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/netcontext.py
@@ -0,0 +1,167 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import socket
+from collections import namedtuple
+
+from . import qpol
+from . import symbol
+from . import context
+
+port_range = namedtuple("port_range", ["low", "high"])
+
+
+def netifcon_factory(policy, name):
+ """Factory function for creating netifcon objects."""
+
+ if not isinstance(name, qpol.qpol_netifcon_t):
+ raise NotImplementedError
+
+ return Netifcon(policy, name)
+
+
+def nodecon_factory(policy, name):
+ """Factory function for creating nodecon objects."""
+
+ if not isinstance(name, qpol.qpol_nodecon_t):
+ raise NotImplementedError
+
+ return Nodecon(policy, name)
+
+
+def portcon_factory(policy, name):
+ """Factory function for creating portcon objects."""
+
+ if not isinstance(name, qpol.qpol_portcon_t):
+ raise NotImplementedError
+
+ return Portcon(policy, name)
+
+
+class NetContext(symbol.PolicySymbol):
+
+ """Base class for in-policy network labeling rules."""
+
+ def __str__(self):
+ raise NotImplementedError
+
+ @property
+ def context(self):
+ """The context for this statement."""
+ return context.context_factory(self.policy, self.qpol_symbol.context(self.policy))
+
+ def statement(self):
+ return str(self)
+
+
+class Netifcon(NetContext):
+
+ """A netifcon statement."""
+
+ def __str__(self):
+ return "netifcon {0.netif} {0.context} {0.packet}".format(self)
+
+ @property
+ def netif(self):
+ """The network interface name."""
+ return self.qpol_symbol.name(self.policy)
+
+ @property
+ def context(self):
+ """The context for the interface."""
+ return context.context_factory(self.policy, self.qpol_symbol.if_con(self.policy))
+
+ @property
+ def packet(self):
+ """The context for the packets."""
+ return context.context_factory(self.policy, self.qpol_symbol.msg_con(self.policy))
+
+
+class Nodecon(NetContext):
+
+ """A nodecon statement."""
+
+ def __str__(self):
+ return "nodecon {0.address} {0.netmask} {0.context}".format(self)
+
+ def __eq__(self, other):
+ # Libqpol allocates new C objects in the
+ # nodecons iterator, so pointer comparison
+ # in the PolicySymbol object doesn't work.
+ try:
+ return (self.address == other.address and
+ self.netmask == other.netmask and
+ self.context == other.context)
+ except AttributeError:
+ return (str(self) == str(other))
+
+ @property
+ def ip_version(self):
+ """
+ The IP version for the nodecon (socket.AF_INET or
+ socket.AF_INET6).
+ """
+ return self.qpol_symbol.protocol(self.policy)
+
+ @property
+ def address(self):
+ """The network address for the nodecon."""
+ return self.qpol_symbol.addr(self.policy)
+
+ @property
+ def netmask(self):
+ """The network mask for the nodecon."""
+ return self.qpol_symbol.mask(self.policy)
+
+
+class Portcon(NetContext):
+
+ """A portcon statement."""
+
+ _proto_to_text = {socket.IPPROTO_TCP: 'tcp',
+ socket.IPPROTO_UDP: 'udp'}
+
+ def __str__(self):
+ low, high = self.ports
+ proto = self._proto_to_text[self.protocol]
+
+ if low == high:
+ return "portcon {0} {1} {2}".format(proto, low, self.context)
+ else:
+ return "portcon {0} {1}-{2} {3}".format(proto, low, high, self.context)
+
+ @property
+ def protocol(self):
+ """
+ The protocol number for the portcon (socket.IPPROTO_TCP
+ or socket.IPPROTO_UDP).
+ """
+ return self.qpol_symbol.protocol(self.policy)
+
+ @property
+ def ports(self):
+ """
+ The port range for this portcon.
+
+ Return: Tuple(low, high)
+ low The low port of the range.
+ high The high port of the range.
+ """
+ low = self.qpol_symbol.low_port(self.policy)
+ high = self.qpol_symbol.high_port(self.policy)
+ return port_range(low, high)
diff --git a/lib/python2.7/site-packages/setools/policyrep/objclass.py b/lib/python2.7/site-packages/setools/policyrep/objclass.py
new file mode 100644
index 0000000..bf9a553
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/objclass.py
@@ -0,0 +1,110 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import symbol
+from . import qpol
+
+
+def common_factory(policy, name):
+ """Factory function for creating common permission set objects."""
+
+ if isinstance(name, Common):
+ assert name.policy == policy
+ return name
+ elif isinstance(name, qpol.qpol_common_t):
+ return Common(policy, name)
+
+ try:
+ return Common(policy, qpol.qpol_common_t(policy, str(name)))
+ except ValueError:
+ raise exception.InvalidCommon("{0} is not a valid common".format(name))
+
+
+def class_factory(policy, name):
+ """Factory function for creating object class objects."""
+
+ if isinstance(name, ObjClass):
+ assert name.policy == policy
+ return name
+ elif isinstance(name, qpol.qpol_class_t):
+ return ObjClass(policy, name)
+
+ try:
+ return ObjClass(policy, qpol.qpol_class_t(policy, str(name)))
+ except ValueError:
+ raise exception.InvalidClass("{0} is not a valid object class".format(name))
+
+
+class Common(symbol.PolicySymbol):
+
+ """A common permission set."""
+
+ def __contains__(self, other):
+ return other in self.perms
+
+ @property
+ def perms(self):
+ """The list of the common's permissions."""
+ return set(self.qpol_symbol.perm_iter(self.policy))
+
+ def statement(self):
+ return "common {0}\n{{\n\t{1}\n}}".format(self, '\n\t'.join(self.perms))
+
+
+class ObjClass(Common):
+
+ """An object class."""
+
+ def __contains__(self, other):
+ try:
+ if other in self.common.perms:
+ return True
+ except exception.NoCommon:
+ pass
+
+ return other in self.perms
+
+ @property
+ def common(self):
+ """
+ The common that the object class inherits.
+
+ Exceptions:
+ NoCommon The object class does not inherit a common.
+ """
+
+ try:
+ return common_factory(self.policy, self.qpol_symbol.common(self.policy))
+ except ValueError:
+ raise exception.NoCommon("{0} does not inherit a common.".format(self))
+
+ def statement(self):
+ stmt = "class {0}\n".format(self)
+
+ try:
+ stmt += "inherits {0}\n".format(self.common)
+ except exception.NoCommon:
+ pass
+
+ # a class that inherits may not have additional permissions
+ perms = self.perms
+ if len(perms) > 0:
+ stmt += "{{\n\t{0}\n}}".format('\n\t'.join(perms))
+
+ return stmt
diff --git a/lib/python2.7/site-packages/setools/policyrep/polcap.py b/lib/python2.7/site-packages/setools/policyrep/polcap.py
new file mode 100644
index 0000000..8ab164d
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/polcap.py
@@ -0,0 +1,40 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import qpol
+from . import symbol
+
+
+def polcap_factory(policy, name):
+ """Factory function for creating policy capability objects."""
+
+ if isinstance(name, PolicyCapability):
+ assert name.policy == policy
+ return name
+ elif isinstance(name, qpol.qpol_polcap_t):
+ return PolicyCapability(policy, name)
+ else:
+ raise TypeError("Policy capabilities cannot be looked up.")
+
+
+class PolicyCapability(symbol.PolicySymbol):
+
+ """A policy capability."""
+
+ def statement(self):
+ return "policycap {0};".format(self)
diff --git a/lib/python2.7/site-packages/setools/policyrep/qpol.py b/lib/python2.7/site-packages/setools/policyrep/qpol.py
new file mode 100644
index 0000000..97e602b
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/qpol.py
@@ -0,0 +1,1114 @@
+# This file was automatically generated by SWIG (http://www.swig.org).
+# Version 2.0.11
+#
+# Do not make changes to this file unless you know what you are doing--modify
+# the SWIG interface file instead.
+
+
+
+
+
+from sys import version_info
+if version_info >= (2,6,0):
+ def swig_import_helper():
+ from os.path import dirname
+ import imp
+ fp = None
+ try:
+ fp, pathname, description = imp.find_module('_qpol', [dirname(__file__)])
+ except ImportError:
+ import _qpol
+ return _qpol
+ if fp is not None:
+ try:
+ _mod = imp.load_module('_qpol', fp, pathname, description)
+ finally:
+ fp.close()
+ return _mod
+ _qpol = swig_import_helper()
+ del swig_import_helper
+else:
+ import _qpol
+del version_info
+try:
+ _swig_property = property
+except NameError:
+ pass # Python < 2.2 doesn't have 'property'.
+def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
+ if (name == "thisown"): return self.this.own(value)
+ if (name == "this"):
+ if type(value).__name__ == 'SwigPyObject':
+ self.__dict__[name] = value
+ return
+ method = class_type.__swig_setmethods__.get(name,None)
+ if method: return method(self,value)
+ if (not static):
+ self.__dict__[name] = value
+ else:
+ raise AttributeError("You cannot add attributes to %s" % self)
+
+def _swig_setattr(self,class_type,name,value):
+ return _swig_setattr_nondynamic(self,class_type,name,value,0)
+
+def _swig_getattr(self,class_type,name):
+ if (name == "thisown"): return self.this.own()
+ method = class_type.__swig_getmethods__.get(name,None)
+ if method: return method(self)
+ raise AttributeError(name)
+
+def _swig_repr(self):
+ try: strthis = "proxy of " + self.this.__repr__()
+ except: strthis = ""
+ return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
+
+try:
+ _object = object
+ _newclass = 1
+except AttributeError:
+ class _object : pass
+ _newclass = 0
+
+
+
+def to_str(*args):
+ return _qpol.to_str(*args)
+to_str = _qpol.to_str
+import logging
+from functools import wraps
+
+def QpolGenerator(cast):
+ """
+ A decorator which converts qpol iterators into Python generators.
+
+ Qpol iterators use void* to be generic about their contents.
+ The purpose of the _from_void functions below is to wrap
+ the pointer casting, hence the "cast" variable name here.
+
+ Decorator parameter:
+ cast A wrapper function which casts the qpol iterator return pointer
+ to the proper C data type pointer. The Python function
+ reference to the C Python extension is used, for example:
+
+ @QpolGenerator(_qpol.qpol_type_from_void)
+ """
+
+ def decorate(func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ qpol_iter = func(*args)
+ while not qpol_iter.isend():
+ yield cast(qpol_iter.item())
+ qpol_iter.next_()
+
+ return wrapper
+ return decorate
+
+def qpol_logger(level, msg):
+ """Log qpol messages via Python logging."""
+ logging.getLogger("libqpol").debug(msg)
+
+def qpol_policy_factory(path):
+ """Factory function for qpol policy objects."""
+ # The main purpose here is to hook in the
+ # above logger callback.
+ return qpol_policy_t(path, 0, qpol_logger)
+
+QPOL_POLICY_OPTION_NO_NEVERALLOWS = _qpol.QPOL_POLICY_OPTION_NO_NEVERALLOWS
+QPOL_POLICY_OPTION_NO_RULES = _qpol.QPOL_POLICY_OPTION_NO_RULES
+QPOL_POLICY_OPTION_MATCH_SYSTEM = _qpol.QPOL_POLICY_OPTION_MATCH_SYSTEM
+QPOL_POLICY_MAX_VERSION = _qpol.QPOL_POLICY_MAX_VERSION
+QPOL_POLICY_MIN_VERSION = _qpol.QPOL_POLICY_MIN_VERSION
+class qpol_policy_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_policy_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_policy_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_policy_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_policy_t
+ __del__ = lambda self : None;
+ def version(self): return _qpol.qpol_policy_t_version(self)
+ def handle_unknown(self): return _qpol.qpol_policy_t_handle_unknown(self)
+ def capability(self, *args): return _qpol.qpol_policy_t_capability(self, *args)
+ @QpolGenerator(_qpol.qpol_type_from_void)
+ def type_iter(self): return _qpol.qpol_policy_t_type_iter(self)
+ def type_count(self): return _qpol.qpol_policy_t_type_count(self)
+ @QpolGenerator(_qpol.qpol_role_from_void)
+ def role_iter(self): return _qpol.qpol_policy_t_role_iter(self)
+ def role_count(self): return _qpol.qpol_policy_t_role_count(self)
+ @QpolGenerator(_qpol.qpol_level_from_void)
+ def level_iter(self): return _qpol.qpol_policy_t_level_iter(self)
+ def level_count(self): return _qpol.qpol_policy_t_level_count(self)
+ @QpolGenerator(_qpol.qpol_cat_from_void)
+ def cat_iter(self): return _qpol.qpol_policy_t_cat_iter(self)
+ def cat_count(self): return _qpol.qpol_policy_t_cat_count(self)
+ @QpolGenerator(_qpol.qpol_user_from_void)
+ def user_iter(self): return _qpol.qpol_policy_t_user_iter(self)
+ def user_count(self): return _qpol.qpol_policy_t_user_count(self)
+ @QpolGenerator(_qpol.qpol_bool_from_void)
+ def bool_iter(self): return _qpol.qpol_policy_t_bool_iter(self)
+ def bool_count(self): return _qpol.qpol_policy_t_bool_count(self)
+ @QpolGenerator(_qpol.qpol_class_from_void)
+ def class_iter(self, perm=None): return _qpol.qpol_policy_t_class_iter(self, perm)
+ def class_count(self): return _qpol.qpol_policy_t_class_count(self)
+ @QpolGenerator(_qpol.qpol_common_from_void)
+ def common_iter(self, perm=None): return _qpol.qpol_policy_t_common_iter(self, perm)
+ def common_count(self): return _qpol.qpol_policy_t_common_count(self)
+ @QpolGenerator(_qpol.qpol_fs_use_from_void)
+ def fs_use_iter(self): return _qpol.qpol_policy_t_fs_use_iter(self)
+ def fs_use_count(self): return _qpol.qpol_policy_t_fs_use_count(self)
+ @QpolGenerator(_qpol.qpol_genfscon_from_void)
+ def genfscon_iter(self): return _qpol.qpol_policy_t_genfscon_iter(self)
+ def genfscon_count(self): return _qpol.qpol_policy_t_genfscon_count(self)
+ @QpolGenerator(_qpol.qpol_isid_from_void)
+ def isid_iter(self): return _qpol.qpol_policy_t_isid_iter(self)
+ def isid_count(self): return _qpol.qpol_policy_t_isid_count(self)
+ @QpolGenerator(_qpol.qpol_netifcon_from_void)
+ def netifcon_iter(self): return _qpol.qpol_policy_t_netifcon_iter(self)
+ def netifcon_count(self): return _qpol.qpol_policy_t_netifcon_count(self)
+ @QpolGenerator(_qpol.qpol_nodecon_from_void)
+ def nodecon_iter(self): return _qpol.qpol_policy_t_nodecon_iter(self)
+ def nodecon_count(self): return _qpol.qpol_policy_t_nodecon_count(self)
+ @QpolGenerator(_qpol.qpol_portcon_from_void)
+ def portcon_iter(self): return _qpol.qpol_policy_t_portcon_iter(self)
+ def portcon_count(self): return _qpol.qpol_policy_t_portcon_count(self)
+ @QpolGenerator(_qpol.qpol_constraint_from_void)
+ def constraint_iter(self): return _qpol.qpol_policy_t_constraint_iter(self)
+ def constraint_count(self): return _qpol.qpol_policy_t_constraint_count(self)
+ @QpolGenerator(_qpol.qpol_validatetrans_from_void)
+ def validatetrans_iter(self): return _qpol.qpol_policy_t_validatetrans_iter(self)
+ def validatetrans_count(self): return _qpol.qpol_policy_t_validatetrans_count(self)
+ @QpolGenerator(_qpol.qpol_role_allow_from_void)
+ def role_allow_iter(self): return _qpol.qpol_policy_t_role_allow_iter(self)
+ def role_allow_count(self): return _qpol.qpol_policy_t_role_allow_count(self)
+ @QpolGenerator(_qpol.qpol_role_trans_from_void)
+ def role_trans_iter(self): return _qpol.qpol_policy_t_role_trans_iter(self)
+ def role_trans_count(self): return _qpol.qpol_policy_t_role_trans_count(self)
+ @QpolGenerator(_qpol.qpol_range_trans_from_void)
+ def range_trans_iter(self): return _qpol.qpol_policy_t_range_trans_iter(self)
+ def range_trans_count(self): return _qpol.qpol_policy_t_range_trans_count(self)
+ @QpolGenerator(_qpol.qpol_avrule_from_void)
+ def avrule_iter(self): return _qpol.qpol_policy_t_avrule_iter(self)
+ def avrule_allow_count(self): return _qpol.qpol_policy_t_avrule_allow_count(self)
+ def avrule_auditallow_count(self): return _qpol.qpol_policy_t_avrule_auditallow_count(self)
+ def avrule_neverallow_count(self): return _qpol.qpol_policy_t_avrule_neverallow_count(self)
+ def avrule_dontaudit_count(self): return _qpol.qpol_policy_t_avrule_dontaudit_count(self)
+ @QpolGenerator(_qpol.qpol_terule_from_void)
+ def terule_iter(self): return _qpol.qpol_policy_t_terule_iter(self)
+ def terule_trans_count(self): return _qpol.qpol_policy_t_terule_trans_count(self)
+ def terule_change_count(self): return _qpol.qpol_policy_t_terule_change_count(self)
+ def terule_member_count(self): return _qpol.qpol_policy_t_terule_member_count(self)
+ def cond_iter(self): return _qpol.qpol_policy_t_cond_iter(self)
+ def cond_count(self): return _qpol.qpol_policy_t_cond_count(self)
+ @QpolGenerator(_qpol.qpol_filename_trans_from_void)
+ def filename_trans_iter(self): return _qpol.qpol_policy_t_filename_trans_iter(self)
+ def filename_trans_count(self): return _qpol.qpol_policy_t_filename_trans_count(self)
+ @QpolGenerator(_qpol.qpol_type_from_void)
+ def permissive_iter(self): return _qpol.qpol_policy_t_permissive_iter(self)
+ def permissive_count(self): return _qpol.qpol_policy_t_permissive_count(self)
+ def typebounds_iter(self): return _qpol.qpol_policy_t_typebounds_iter(self)
+ def typebounds_count(self): return _qpol.qpol_policy_t_typebounds_count(self)
+ @QpolGenerator(_qpol.qpol_polcap_from_void)
+ def polcap_iter(self): return _qpol.qpol_policy_t_polcap_iter(self)
+ def polcap_count(self): return _qpol.qpol_policy_t_polcap_count(self)
+ @QpolGenerator(_qpol.qpol_default_object_from_void)
+ def default_iter(self): return _qpol.qpol_policy_t_default_iter(self)
+qpol_policy_t_swigregister = _qpol.qpol_policy_t_swigregister
+qpol_policy_t_swigregister(qpol_policy_t)
+
+QPOL_CAP_ATTRIB_NAMES = _qpol.QPOL_CAP_ATTRIB_NAMES
+QPOL_CAP_SYN_RULES = _qpol.QPOL_CAP_SYN_RULES
+QPOL_CAP_LINE_NUMBERS = _qpol.QPOL_CAP_LINE_NUMBERS
+QPOL_CAP_CONDITIONALS = _qpol.QPOL_CAP_CONDITIONALS
+QPOL_CAP_MLS = _qpol.QPOL_CAP_MLS
+QPOL_CAP_MODULES = _qpol.QPOL_CAP_MODULES
+QPOL_CAP_RULES_LOADED = _qpol.QPOL_CAP_RULES_LOADED
+QPOL_CAP_SOURCE = _qpol.QPOL_CAP_SOURCE
+QPOL_CAP_NEVERALLOW = _qpol.QPOL_CAP_NEVERALLOW
+QPOL_CAP_POLCAPS = _qpol.QPOL_CAP_POLCAPS
+QPOL_CAP_BOUNDS = _qpol.QPOL_CAP_BOUNDS
+QPOL_CAP_DEFAULT_OBJECTS = _qpol.QPOL_CAP_DEFAULT_OBJECTS
+QPOL_CAP_DEFAULT_TYPE = _qpol.QPOL_CAP_DEFAULT_TYPE
+QPOL_CAP_PERMISSIVE = _qpol.QPOL_CAP_PERMISSIVE
+QPOL_CAP_FILENAME_TRANS = _qpol.QPOL_CAP_FILENAME_TRANS
+QPOL_CAP_ROLETRANS = _qpol.QPOL_CAP_ROLETRANS
+class qpol_iterator_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_iterator_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_iterator_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_iterator_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_iterator_t
+ __del__ = lambda self : None;
+ def item(self): return _qpol.qpol_iterator_t_item(self)
+ def next_(self): return _qpol.qpol_iterator_t_next_(self)
+ def isend(self): return _qpol.qpol_iterator_t_isend(self)
+ def size(self): return _qpol.qpol_iterator_t_size(self)
+qpol_iterator_t_swigregister = _qpol.qpol_iterator_t_swigregister
+qpol_iterator_t_swigregister(qpol_iterator_t)
+
+class qpol_type_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_type_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_type_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_type_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_type_t
+ __del__ = lambda self : None;
+ def name(self, *args): return _qpol.qpol_type_t_name(self, *args)
+ def value(self, *args): return _qpol.qpol_type_t_value(self, *args)
+ def isalias(self, *args): return _qpol.qpol_type_t_isalias(self, *args)
+ def isattr(self, *args): return _qpol.qpol_type_t_isattr(self, *args)
+ def ispermissive(self, *args): return _qpol.qpol_type_t_ispermissive(self, *args)
+ @QpolGenerator(_qpol.qpol_type_from_void)
+ def type_iter(self, *args): return _qpol.qpol_type_t_type_iter(self, *args)
+ @QpolGenerator(_qpol.qpol_type_from_void)
+ def attr_iter(self, *args): return _qpol.qpol_type_t_attr_iter(self, *args)
+ @QpolGenerator(_qpol.to_str)
+ def alias_iter(self, *args): return _qpol.qpol_type_t_alias_iter(self, *args)
+qpol_type_t_swigregister = _qpol.qpol_type_t_swigregister
+qpol_type_t_swigregister(qpol_type_t)
+
+
+def qpol_type_from_void(*args):
+ return _qpol.qpol_type_from_void(*args)
+qpol_type_from_void = _qpol.qpol_type_from_void
+class qpol_role_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_role_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_role_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_role_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_role_t
+ __del__ = lambda self : None;
+ def value(self, *args): return _qpol.qpol_role_t_value(self, *args)
+ def name(self, *args): return _qpol.qpol_role_t_name(self, *args)
+ @QpolGenerator(_qpol.qpol_type_from_void)
+ def type_iter(self, *args): return _qpol.qpol_role_t_type_iter(self, *args)
+ def dominate_iter(self, *args): return _qpol.qpol_role_t_dominate_iter(self, *args)
+qpol_role_t_swigregister = _qpol.qpol_role_t_swigregister
+qpol_role_t_swigregister(qpol_role_t)
+
+
+def qpol_role_from_void(*args):
+ return _qpol.qpol_role_from_void(*args)
+qpol_role_from_void = _qpol.qpol_role_from_void
+class qpol_level_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_level_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_level_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_level_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_level_t
+ __del__ = lambda self : None;
+ def isalias(self, *args): return _qpol.qpol_level_t_isalias(self, *args)
+ def value(self, *args): return _qpol.qpol_level_t_value(self, *args)
+ def name(self, *args): return _qpol.qpol_level_t_name(self, *args)
+ @QpolGenerator(_qpol.qpol_cat_from_void)
+ def cat_iter(self, *args): return _qpol.qpol_level_t_cat_iter(self, *args)
+ @QpolGenerator(_qpol.to_str)
+ def alias_iter(self, *args): return _qpol.qpol_level_t_alias_iter(self, *args)
+qpol_level_t_swigregister = _qpol.qpol_level_t_swigregister
+qpol_level_t_swigregister(qpol_level_t)
+
+
+def qpol_level_from_void(*args):
+ return _qpol.qpol_level_from_void(*args)
+qpol_level_from_void = _qpol.qpol_level_from_void
+class qpol_cat_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_cat_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_cat_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_cat_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_cat_t
+ __del__ = lambda self : None;
+ def isalias(self, *args): return _qpol.qpol_cat_t_isalias(self, *args)
+ def value(self, *args): return _qpol.qpol_cat_t_value(self, *args)
+ def name(self, *args): return _qpol.qpol_cat_t_name(self, *args)
+ @QpolGenerator(_qpol.to_str)
+ def alias_iter(self, *args): return _qpol.qpol_cat_t_alias_iter(self, *args)
+qpol_cat_t_swigregister = _qpol.qpol_cat_t_swigregister
+qpol_cat_t_swigregister(qpol_cat_t)
+
+
+def qpol_cat_from_void(*args):
+ return _qpol.qpol_cat_from_void(*args)
+qpol_cat_from_void = _qpol.qpol_cat_from_void
+class qpol_mls_range_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_mls_range_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_mls_range_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_mls_range_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_mls_range_t
+ __del__ = lambda self : None;
+ def high_level(self, *args): return _qpol.qpol_mls_range_t_high_level(self, *args)
+ def low_level(self, *args): return _qpol.qpol_mls_range_t_low_level(self, *args)
+qpol_mls_range_t_swigregister = _qpol.qpol_mls_range_t_swigregister
+qpol_mls_range_t_swigregister(qpol_mls_range_t)
+
+
+def qpol_mls_range_from_void(*args):
+ return _qpol.qpol_mls_range_from_void(*args)
+qpol_mls_range_from_void = _qpol.qpol_mls_range_from_void
+class qpol_semantic_level_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_semantic_level_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_semantic_level_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_semantic_level_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_semantic_level_t
+ __del__ = lambda self : None;
+ def add_cats(self, *args): return _qpol.qpol_semantic_level_t_add_cats(self, *args)
+qpol_semantic_level_t_swigregister = _qpol.qpol_semantic_level_t_swigregister
+qpol_semantic_level_t_swigregister(qpol_semantic_level_t)
+
+class qpol_mls_level_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_mls_level_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_mls_level_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_mls_level_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_mls_level_t
+ __del__ = lambda self : None;
+ def sens_name(self, *args): return _qpol.qpol_mls_level_t_sens_name(self, *args)
+ @QpolGenerator(_qpol.qpol_cat_from_void)
+ def cat_iter(self, *args): return _qpol.qpol_mls_level_t_cat_iter(self, *args)
+qpol_mls_level_t_swigregister = _qpol.qpol_mls_level_t_swigregister
+qpol_mls_level_t_swigregister(qpol_mls_level_t)
+
+
+def qpol_mls_level_from_void(*args):
+ return _qpol.qpol_mls_level_from_void(*args)
+qpol_mls_level_from_void = _qpol.qpol_mls_level_from_void
+class qpol_user_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_user_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_user_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_user_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_user_t
+ __del__ = lambda self : None;
+ def value(self, *args): return _qpol.qpol_user_t_value(self, *args)
+ @QpolGenerator(_qpol.qpol_role_from_void)
+ def role_iter(self, *args): return _qpol.qpol_user_t_role_iter(self, *args)
+ def range(self, *args): return _qpol.qpol_user_t_range(self, *args)
+ def name(self, *args): return _qpol.qpol_user_t_name(self, *args)
+ def dfltlevel(self, *args): return _qpol.qpol_user_t_dfltlevel(self, *args)
+qpol_user_t_swigregister = _qpol.qpol_user_t_swigregister
+qpol_user_t_swigregister(qpol_user_t)
+
+
+def qpol_user_from_void(*args):
+ return _qpol.qpol_user_from_void(*args)
+qpol_user_from_void = _qpol.qpol_user_from_void
+class qpol_bool_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_bool_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_bool_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_bool_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_bool_t
+ __del__ = lambda self : None;
+ def value(self, *args): return _qpol.qpol_bool_t_value(self, *args)
+ def state(self, *args): return _qpol.qpol_bool_t_state(self, *args)
+ def name(self, *args): return _qpol.qpol_bool_t_name(self, *args)
+qpol_bool_t_swigregister = _qpol.qpol_bool_t_swigregister
+qpol_bool_t_swigregister(qpol_bool_t)
+
+
+def qpol_bool_from_void(*args):
+ return _qpol.qpol_bool_from_void(*args)
+qpol_bool_from_void = _qpol.qpol_bool_from_void
+class qpol_context_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_context_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_context_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_context_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_context_t
+ __del__ = lambda self : None;
+ def user(self, *args): return _qpol.qpol_context_t_user(self, *args)
+ def role(self, *args): return _qpol.qpol_context_t_role(self, *args)
+ def type_(self, *args): return _qpol.qpol_context_t_type_(self, *args)
+ def range(self, *args): return _qpol.qpol_context_t_range(self, *args)
+qpol_context_t_swigregister = _qpol.qpol_context_t_swigregister
+qpol_context_t_swigregister(qpol_context_t)
+
+
+def qpol_context_from_void(*args):
+ return _qpol.qpol_context_from_void(*args)
+qpol_context_from_void = _qpol.qpol_context_from_void
+class qpol_class_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_class_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_class_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_class_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_class_t
+ __del__ = lambda self : None;
+ def value(self, *args): return _qpol.qpol_class_t_value(self, *args)
+ def common(self, *args): return _qpol.qpol_class_t_common(self, *args)
+ @QpolGenerator(_qpol.to_str)
+ def perm_iter(self, *args): return _qpol.qpol_class_t_perm_iter(self, *args)
+ @QpolGenerator(_qpol.qpol_constraint_from_void)
+ def constraint_iter(self, *args): return _qpol.qpol_class_t_constraint_iter(self, *args)
+ @QpolGenerator(_qpol.qpol_validatetrans_from_void)
+ def validatetrans_iter(self, *args): return _qpol.qpol_class_t_validatetrans_iter(self, *args)
+ def name(self, *args): return _qpol.qpol_class_t_name(self, *args)
+qpol_class_t_swigregister = _qpol.qpol_class_t_swigregister
+qpol_class_t_swigregister(qpol_class_t)
+
+
+def qpol_class_from_void(*args):
+ return _qpol.qpol_class_from_void(*args)
+qpol_class_from_void = _qpol.qpol_class_from_void
+class qpol_common_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_common_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_common_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_common_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_common_t
+ __del__ = lambda self : None;
+ def value(self, *args): return _qpol.qpol_common_t_value(self, *args)
+ @QpolGenerator(_qpol.to_str)
+ def perm_iter(self, *args): return _qpol.qpol_common_t_perm_iter(self, *args)
+ def name(self, *args): return _qpol.qpol_common_t_name(self, *args)
+qpol_common_t_swigregister = _qpol.qpol_common_t_swigregister
+qpol_common_t_swigregister(qpol_common_t)
+
+
+def qpol_common_from_void(*args):
+ return _qpol.qpol_common_from_void(*args)
+qpol_common_from_void = _qpol.qpol_common_from_void
+QPOL_FS_USE_XATTR = _qpol.QPOL_FS_USE_XATTR
+QPOL_FS_USE_TRANS = _qpol.QPOL_FS_USE_TRANS
+QPOL_FS_USE_TASK = _qpol.QPOL_FS_USE_TASK
+QPOL_FS_USE_GENFS = _qpol.QPOL_FS_USE_GENFS
+QPOL_FS_USE_NONE = _qpol.QPOL_FS_USE_NONE
+QPOL_FS_USE_PSID = _qpol.QPOL_FS_USE_PSID
+class qpol_fs_use_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_fs_use_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_fs_use_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_fs_use_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_fs_use_t
+ __del__ = lambda self : None;
+ def name(self, *args): return _qpol.qpol_fs_use_t_name(self, *args)
+ def behavior(self, *args): return _qpol.qpol_fs_use_t_behavior(self, *args)
+ def context(self, *args): return _qpol.qpol_fs_use_t_context(self, *args)
+qpol_fs_use_t_swigregister = _qpol.qpol_fs_use_t_swigregister
+qpol_fs_use_t_swigregister(qpol_fs_use_t)
+
+
+def qpol_fs_use_from_void(*args):
+ return _qpol.qpol_fs_use_from_void(*args)
+qpol_fs_use_from_void = _qpol.qpol_fs_use_from_void
+QPOL_CLASS_ALL = _qpol.QPOL_CLASS_ALL
+QPOL_CLASS_BLK_FILE = _qpol.QPOL_CLASS_BLK_FILE
+QPOL_CLASS_CHR_FILE = _qpol.QPOL_CLASS_CHR_FILE
+QPOL_CLASS_DIR = _qpol.QPOL_CLASS_DIR
+QPOL_CLASS_FIFO_FILE = _qpol.QPOL_CLASS_FIFO_FILE
+QPOL_CLASS_FILE = _qpol.QPOL_CLASS_FILE
+QPOL_CLASS_LNK_FILE = _qpol.QPOL_CLASS_LNK_FILE
+QPOL_CLASS_SOCK_FILE = _qpol.QPOL_CLASS_SOCK_FILE
+class qpol_genfscon_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_genfscon_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_genfscon_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_genfscon_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_genfscon_t
+ __del__ = lambda self : None;
+ def name(self, *args): return _qpol.qpol_genfscon_t_name(self, *args)
+ def path(self, *args): return _qpol.qpol_genfscon_t_path(self, *args)
+ def object_class(self, *args): return _qpol.qpol_genfscon_t_object_class(self, *args)
+ def context(self, *args): return _qpol.qpol_genfscon_t_context(self, *args)
+qpol_genfscon_t_swigregister = _qpol.qpol_genfscon_t_swigregister
+qpol_genfscon_t_swigregister(qpol_genfscon_t)
+
+
+def qpol_genfscon_from_void(*args):
+ return _qpol.qpol_genfscon_from_void(*args)
+qpol_genfscon_from_void = _qpol.qpol_genfscon_from_void
+class qpol_isid_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_isid_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_isid_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_isid_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_isid_t
+ __del__ = lambda self : None;
+ def name(self, *args): return _qpol.qpol_isid_t_name(self, *args)
+ def context(self, *args): return _qpol.qpol_isid_t_context(self, *args)
+qpol_isid_t_swigregister = _qpol.qpol_isid_t_swigregister
+qpol_isid_t_swigregister(qpol_isid_t)
+
+
+def qpol_isid_from_void(*args):
+ return _qpol.qpol_isid_from_void(*args)
+qpol_isid_from_void = _qpol.qpol_isid_from_void
+class qpol_netifcon_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_netifcon_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_netifcon_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_netifcon_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_netifcon_t
+ __del__ = lambda self : None;
+ def name(self, *args): return _qpol.qpol_netifcon_t_name(self, *args)
+ def msg_con(self, *args): return _qpol.qpol_netifcon_t_msg_con(self, *args)
+ def if_con(self, *args): return _qpol.qpol_netifcon_t_if_con(self, *args)
+qpol_netifcon_t_swigregister = _qpol.qpol_netifcon_t_swigregister
+qpol_netifcon_t_swigregister(qpol_netifcon_t)
+
+
+def qpol_netifcon_from_void(*args):
+ return _qpol.qpol_netifcon_from_void(*args)
+qpol_netifcon_from_void = _qpol.qpol_netifcon_from_void
+QPOL_IPV4 = _qpol.QPOL_IPV4
+QPOL_IPV6 = _qpol.QPOL_IPV6
+class qpol_nodecon_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_nodecon_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_nodecon_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_nodecon_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_nodecon_t
+ __del__ = lambda self : None;
+ def addr(self, *args): return _qpol.qpol_nodecon_t_addr(self, *args)
+ def mask(self, *args): return _qpol.qpol_nodecon_t_mask(self, *args)
+ def protocol(self, *args): return _qpol.qpol_nodecon_t_protocol(self, *args)
+ def context(self, *args): return _qpol.qpol_nodecon_t_context(self, *args)
+qpol_nodecon_t_swigregister = _qpol.qpol_nodecon_t_swigregister
+qpol_nodecon_t_swigregister(qpol_nodecon_t)
+
+
+def qpol_nodecon_from_void(*args):
+ return _qpol.qpol_nodecon_from_void(*args)
+qpol_nodecon_from_void = _qpol.qpol_nodecon_from_void
+IPPROTO_TCP = _qpol.IPPROTO_TCP
+IPPROTO_UDP = _qpol.IPPROTO_UDP
+class qpol_portcon_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_portcon_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_portcon_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_portcon_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_portcon_t
+ __del__ = lambda self : None;
+ def low_port(self, *args): return _qpol.qpol_portcon_t_low_port(self, *args)
+ def high_port(self, *args): return _qpol.qpol_portcon_t_high_port(self, *args)
+ def protocol(self, *args): return _qpol.qpol_portcon_t_protocol(self, *args)
+ def context(self, *args): return _qpol.qpol_portcon_t_context(self, *args)
+qpol_portcon_t_swigregister = _qpol.qpol_portcon_t_swigregister
+qpol_portcon_t_swigregister(qpol_portcon_t)
+
+
+def qpol_portcon_from_void(*args):
+ return _qpol.qpol_portcon_from_void(*args)
+qpol_portcon_from_void = _qpol.qpol_portcon_from_void
+class qpol_constraint_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_constraint_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_constraint_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_constraint_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_constraint_t
+ __del__ = lambda self : None;
+ def object_class(self, *args): return _qpol.qpol_constraint_t_object_class(self, *args)
+ @QpolGenerator(_qpol.to_str)
+ def perm_iter(self, *args): return _qpol.qpol_constraint_t_perm_iter(self, *args)
+ @QpolGenerator(_qpol.qpol_constraint_expr_node_from_void)
+ def expr_iter(self, *args): return _qpol.qpol_constraint_t_expr_iter(self, *args)
+qpol_constraint_t_swigregister = _qpol.qpol_constraint_t_swigregister
+qpol_constraint_t_swigregister(qpol_constraint_t)
+
+
+def qpol_constraint_from_void(*args):
+ return _qpol.qpol_constraint_from_void(*args)
+qpol_constraint_from_void = _qpol.qpol_constraint_from_void
+class qpol_validatetrans_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_validatetrans_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_validatetrans_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_validatetrans_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_validatetrans_t
+ __del__ = lambda self : None;
+ def object_class(self, *args): return _qpol.qpol_validatetrans_t_object_class(self, *args)
+ @QpolGenerator(_qpol.qpol_constraint_expr_node_from_void)
+ def expr_iter(self, *args): return _qpol.qpol_validatetrans_t_expr_iter(self, *args)
+qpol_validatetrans_t_swigregister = _qpol.qpol_validatetrans_t_swigregister
+qpol_validatetrans_t_swigregister(qpol_validatetrans_t)
+
+
+def qpol_validatetrans_from_void(*args):
+ return _qpol.qpol_validatetrans_from_void(*args)
+qpol_validatetrans_from_void = _qpol.qpol_validatetrans_from_void
+QPOL_CEXPR_TYPE_NOT = _qpol.QPOL_CEXPR_TYPE_NOT
+QPOL_CEXPR_TYPE_AND = _qpol.QPOL_CEXPR_TYPE_AND
+QPOL_CEXPR_TYPE_OR = _qpol.QPOL_CEXPR_TYPE_OR
+QPOL_CEXPR_TYPE_ATTR = _qpol.QPOL_CEXPR_TYPE_ATTR
+QPOL_CEXPR_TYPE_NAMES = _qpol.QPOL_CEXPR_TYPE_NAMES
+QPOL_CEXPR_SYM_USER = _qpol.QPOL_CEXPR_SYM_USER
+QPOL_CEXPR_SYM_ROLE = _qpol.QPOL_CEXPR_SYM_ROLE
+QPOL_CEXPR_SYM_TYPE = _qpol.QPOL_CEXPR_SYM_TYPE
+QPOL_CEXPR_SYM_TARGET = _qpol.QPOL_CEXPR_SYM_TARGET
+QPOL_CEXPR_SYM_XTARGET = _qpol.QPOL_CEXPR_SYM_XTARGET
+QPOL_CEXPR_SYM_L1L2 = _qpol.QPOL_CEXPR_SYM_L1L2
+QPOL_CEXPR_SYM_L1H2 = _qpol.QPOL_CEXPR_SYM_L1H2
+QPOL_CEXPR_SYM_H1L2 = _qpol.QPOL_CEXPR_SYM_H1L2
+QPOL_CEXPR_SYM_H1H2 = _qpol.QPOL_CEXPR_SYM_H1H2
+QPOL_CEXPR_SYM_L1H1 = _qpol.QPOL_CEXPR_SYM_L1H1
+QPOL_CEXPR_SYM_L2H2 = _qpol.QPOL_CEXPR_SYM_L2H2
+QPOL_CEXPR_OP_EQ = _qpol.QPOL_CEXPR_OP_EQ
+QPOL_CEXPR_OP_NEQ = _qpol.QPOL_CEXPR_OP_NEQ
+QPOL_CEXPR_OP_DOM = _qpol.QPOL_CEXPR_OP_DOM
+QPOL_CEXPR_OP_DOMBY = _qpol.QPOL_CEXPR_OP_DOMBY
+QPOL_CEXPR_OP_INCOMP = _qpol.QPOL_CEXPR_OP_INCOMP
+class qpol_constraint_expr_node_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_constraint_expr_node_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_constraint_expr_node_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_constraint_expr_node_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_constraint_expr_node_t
+ __del__ = lambda self : None;
+ def expr_type(self, *args): return _qpol.qpol_constraint_expr_node_t_expr_type(self, *args)
+ def sym_type(self, *args): return _qpol.qpol_constraint_expr_node_t_sym_type(self, *args)
+ def op(self, *args): return _qpol.qpol_constraint_expr_node_t_op(self, *args)
+ @QpolGenerator(_qpol.to_str)
+ def names_iter(self, *args): return _qpol.qpol_constraint_expr_node_t_names_iter(self, *args)
+qpol_constraint_expr_node_t_swigregister = _qpol.qpol_constraint_expr_node_t_swigregister
+qpol_constraint_expr_node_t_swigregister(qpol_constraint_expr_node_t)
+
+
+def qpol_constraint_expr_node_from_void(*args):
+ return _qpol.qpol_constraint_expr_node_from_void(*args)
+qpol_constraint_expr_node_from_void = _qpol.qpol_constraint_expr_node_from_void
+class qpol_role_allow_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_role_allow_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_role_allow_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_role_allow_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_role_allow_t
+ __del__ = lambda self : None;
+ def rule_type(self,policy):
+ return "allow"
+
+ def source_role(self, *args): return _qpol.qpol_role_allow_t_source_role(self, *args)
+ def target_role(self, *args): return _qpol.qpol_role_allow_t_target_role(self, *args)
+qpol_role_allow_t_swigregister = _qpol.qpol_role_allow_t_swigregister
+qpol_role_allow_t_swigregister(qpol_role_allow_t)
+
+
+def qpol_role_allow_from_void(*args):
+ return _qpol.qpol_role_allow_from_void(*args)
+qpol_role_allow_from_void = _qpol.qpol_role_allow_from_void
+class qpol_role_trans_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_role_trans_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_role_trans_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_role_trans_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_role_trans_t
+ __del__ = lambda self : None;
+ def rule_type(self,policy):
+ return "role_transition"
+
+ def source_role(self, *args): return _qpol.qpol_role_trans_t_source_role(self, *args)
+ def target_type(self, *args): return _qpol.qpol_role_trans_t_target_type(self, *args)
+ def object_class(self, *args): return _qpol.qpol_role_trans_t_object_class(self, *args)
+ def default_role(self, *args): return _qpol.qpol_role_trans_t_default_role(self, *args)
+qpol_role_trans_t_swigregister = _qpol.qpol_role_trans_t_swigregister
+qpol_role_trans_t_swigregister(qpol_role_trans_t)
+
+
+def qpol_role_trans_from_void(*args):
+ return _qpol.qpol_role_trans_from_void(*args)
+qpol_role_trans_from_void = _qpol.qpol_role_trans_from_void
+class qpol_range_trans_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_range_trans_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_range_trans_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_range_trans_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_range_trans_t
+ __del__ = lambda self : None;
+ def rule_type(self,policy):
+ return "range_transition"
+
+ def source_type(self, *args): return _qpol.qpol_range_trans_t_source_type(self, *args)
+ def target_type(self, *args): return _qpol.qpol_range_trans_t_target_type(self, *args)
+ def object_class(self, *args): return _qpol.qpol_range_trans_t_object_class(self, *args)
+ def range(self, *args): return _qpol.qpol_range_trans_t_range(self, *args)
+qpol_range_trans_t_swigregister = _qpol.qpol_range_trans_t_swigregister
+qpol_range_trans_t_swigregister(qpol_range_trans_t)
+
+
+def qpol_range_trans_from_void(*args):
+ return _qpol.qpol_range_trans_from_void(*args)
+qpol_range_trans_from_void = _qpol.qpol_range_trans_from_void
+QPOL_RULE_ALLOW = _qpol.QPOL_RULE_ALLOW
+QPOL_RULE_NEVERALLOW = _qpol.QPOL_RULE_NEVERALLOW
+QPOL_RULE_AUDITALLOW = _qpol.QPOL_RULE_AUDITALLOW
+QPOL_RULE_DONTAUDIT = _qpol.QPOL_RULE_DONTAUDIT
+class qpol_avrule_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_avrule_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_avrule_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_avrule_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_avrule_t
+ __del__ = lambda self : None;
+ def rule_type(self, *args): return _qpol.qpol_avrule_t_rule_type(self, *args)
+ def source_type(self, *args): return _qpol.qpol_avrule_t_source_type(self, *args)
+ def target_type(self, *args): return _qpol.qpol_avrule_t_target_type(self, *args)
+ def object_class(self, *args): return _qpol.qpol_avrule_t_object_class(self, *args)
+ @QpolGenerator(_qpol.to_str)
+ def perm_iter(self, *args): return _qpol.qpol_avrule_t_perm_iter(self, *args)
+ def cond(self, *args): return _qpol.qpol_avrule_t_cond(self, *args)
+ def is_enabled(self, *args): return _qpol.qpol_avrule_t_is_enabled(self, *args)
+ def which_list(self, *args): return _qpol.qpol_avrule_t_which_list(self, *args)
+ def syn_avrule_iter(self, *args): return _qpol.qpol_avrule_t_syn_avrule_iter(self, *args)
+qpol_avrule_t_swigregister = _qpol.qpol_avrule_t_swigregister
+qpol_avrule_t_swigregister(qpol_avrule_t)
+
+
+def qpol_avrule_from_void(*args):
+ return _qpol.qpol_avrule_from_void(*args)
+qpol_avrule_from_void = _qpol.qpol_avrule_from_void
+QPOL_RULE_TYPE_TRANS = _qpol.QPOL_RULE_TYPE_TRANS
+QPOL_RULE_TYPE_CHANGE = _qpol.QPOL_RULE_TYPE_CHANGE
+QPOL_RULE_TYPE_MEMBER = _qpol.QPOL_RULE_TYPE_MEMBER
+class qpol_terule_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_terule_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_terule_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_terule_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_terule_t
+ __del__ = lambda self : None;
+ def rule_type(self, *args): return _qpol.qpol_terule_t_rule_type(self, *args)
+ def source_type(self, *args): return _qpol.qpol_terule_t_source_type(self, *args)
+ def target_type(self, *args): return _qpol.qpol_terule_t_target_type(self, *args)
+ def object_class(self, *args): return _qpol.qpol_terule_t_object_class(self, *args)
+ def default_type(self, *args): return _qpol.qpol_terule_t_default_type(self, *args)
+ def cond(self, *args): return _qpol.qpol_terule_t_cond(self, *args)
+ def is_enabled(self, *args): return _qpol.qpol_terule_t_is_enabled(self, *args)
+ def which_list(self, *args): return _qpol.qpol_terule_t_which_list(self, *args)
+ def syn_terule_iter(self, *args): return _qpol.qpol_terule_t_syn_terule_iter(self, *args)
+qpol_terule_t_swigregister = _qpol.qpol_terule_t_swigregister
+qpol_terule_t_swigregister(qpol_terule_t)
+
+
+def qpol_terule_from_void(*args):
+ return _qpol.qpol_terule_from_void(*args)
+qpol_terule_from_void = _qpol.qpol_terule_from_void
+class qpol_cond_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_cond_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_cond_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_cond_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_cond_t
+ __del__ = lambda self : None;
+ @QpolGenerator(_qpol.qpol_cond_expr_node_from_void)
+ def expr_node_iter(self, *args): return _qpol.qpol_cond_t_expr_node_iter(self, *args)
+ def av_true_iter(self, *args): return _qpol.qpol_cond_t_av_true_iter(self, *args)
+ def av_false_iter(self, *args): return _qpol.qpol_cond_t_av_false_iter(self, *args)
+ def te_true_iter(self, *args): return _qpol.qpol_cond_t_te_true_iter(self, *args)
+ def te_false_iter(self, *args): return _qpol.qpol_cond_t_te_false_iter(self, *args)
+ def evaluate(self, *args): return _qpol.qpol_cond_t_evaluate(self, *args)
+qpol_cond_t_swigregister = _qpol.qpol_cond_t_swigregister
+qpol_cond_t_swigregister(qpol_cond_t)
+
+
+def qpol_cond_from_void(*args):
+ return _qpol.qpol_cond_from_void(*args)
+qpol_cond_from_void = _qpol.qpol_cond_from_void
+QPOL_COND_EXPR_BOOL = _qpol.QPOL_COND_EXPR_BOOL
+QPOL_COND_EXPR_NOT = _qpol.QPOL_COND_EXPR_NOT
+QPOL_COND_EXPR_OR = _qpol.QPOL_COND_EXPR_OR
+QPOL_COND_EXPR_AND = _qpol.QPOL_COND_EXPR_AND
+QPOL_COND_EXPR_XOR = _qpol.QPOL_COND_EXPR_XOR
+QPOL_COND_EXPR_EQ = _qpol.QPOL_COND_EXPR_EQ
+QPOL_COND_EXPR_NEQ = _qpol.QPOL_COND_EXPR_NEQ
+class qpol_cond_expr_node_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_cond_expr_node_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_cond_expr_node_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_cond_expr_node_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_cond_expr_node_t
+ __del__ = lambda self : None;
+ def expr_type(self, *args): return _qpol.qpol_cond_expr_node_t_expr_type(self, *args)
+ def get_boolean(self, *args): return _qpol.qpol_cond_expr_node_t_get_boolean(self, *args)
+qpol_cond_expr_node_t_swigregister = _qpol.qpol_cond_expr_node_t_swigregister
+qpol_cond_expr_node_t_swigregister(qpol_cond_expr_node_t)
+
+
+def qpol_cond_expr_node_from_void(*args):
+ return _qpol.qpol_cond_expr_node_from_void(*args)
+qpol_cond_expr_node_from_void = _qpol.qpol_cond_expr_node_from_void
+class qpol_filename_trans_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_filename_trans_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_filename_trans_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_filename_trans_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_filename_trans_t
+ __del__ = lambda self : None;
+ def rule_type(self,policy):
+ return "type_transition"
+
+ def source_type(self, *args): return _qpol.qpol_filename_trans_t_source_type(self, *args)
+ def target_type(self, *args): return _qpol.qpol_filename_trans_t_target_type(self, *args)
+ def object_class(self, *args): return _qpol.qpol_filename_trans_t_object_class(self, *args)
+ def default_type(self, *args): return _qpol.qpol_filename_trans_t_default_type(self, *args)
+ def filename(self, *args): return _qpol.qpol_filename_trans_t_filename(self, *args)
+qpol_filename_trans_t_swigregister = _qpol.qpol_filename_trans_t_swigregister
+qpol_filename_trans_t_swigregister(qpol_filename_trans_t)
+
+
+def qpol_filename_trans_from_void(*args):
+ return _qpol.qpol_filename_trans_from_void(*args)
+qpol_filename_trans_from_void = _qpol.qpol_filename_trans_from_void
+class qpol_polcap_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_polcap_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_polcap_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_polcap_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_polcap_t
+ __del__ = lambda self : None;
+ def name(self, *args): return _qpol.qpol_polcap_t_name(self, *args)
+qpol_polcap_t_swigregister = _qpol.qpol_polcap_t_swigregister
+qpol_polcap_t_swigregister(qpol_polcap_t)
+
+
+def qpol_polcap_from_void(*args):
+ return _qpol.qpol_polcap_from_void(*args)
+qpol_polcap_from_void = _qpol.qpol_polcap_from_void
+class qpol_typebounds_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_typebounds_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_typebounds_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_typebounds_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_typebounds_t
+ __del__ = lambda self : None;
+ def parent_name(self, *args): return _qpol.qpol_typebounds_t_parent_name(self, *args)
+ def child_name(self, *args): return _qpol.qpol_typebounds_t_child_name(self, *args)
+qpol_typebounds_t_swigregister = _qpol.qpol_typebounds_t_swigregister
+qpol_typebounds_t_swigregister(qpol_typebounds_t)
+
+
+def qpol_typebounds_from_void(*args):
+ return _qpol.qpol_typebounds_from_void(*args)
+qpol_typebounds_from_void = _qpol.qpol_typebounds_from_void
+class qpol_rolebounds_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_rolebounds_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_rolebounds_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_rolebounds_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_rolebounds_t
+ __del__ = lambda self : None;
+ def parent_name(self, *args): return _qpol.qpol_rolebounds_t_parent_name(self, *args)
+ def child_name(self, *args): return _qpol.qpol_rolebounds_t_child_name(self, *args)
+qpol_rolebounds_t_swigregister = _qpol.qpol_rolebounds_t_swigregister
+qpol_rolebounds_t_swigregister(qpol_rolebounds_t)
+
+
+def qpol_rolebounds_from_void(*args):
+ return _qpol.qpol_rolebounds_from_void(*args)
+qpol_rolebounds_from_void = _qpol.qpol_rolebounds_from_void
+class qpol_userbounds_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_userbounds_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_userbounds_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_userbounds_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_userbounds_t
+ __del__ = lambda self : None;
+ def parent_name(self, *args): return _qpol.qpol_userbounds_t_parent_name(self, *args)
+ def child_name(self, *args): return _qpol.qpol_userbounds_t_child_name(self, *args)
+qpol_userbounds_t_swigregister = _qpol.qpol_userbounds_t_swigregister
+qpol_userbounds_t_swigregister(qpol_userbounds_t)
+
+
+def qpol_userbounds_from_void(*args):
+ return _qpol.qpol_userbounds_from_void(*args)
+qpol_userbounds_from_void = _qpol.qpol_userbounds_from_void
+class qpol_default_object_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_default_object_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_default_object_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_default_object_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_default_object_t
+ __del__ = lambda self : None;
+ def object_class(self, *args): return _qpol.qpol_default_object_t_object_class(self, *args)
+ def user_default(self, *args): return _qpol.qpol_default_object_t_user_default(self, *args)
+ def role_default(self, *args): return _qpol.qpol_default_object_t_role_default(self, *args)
+ def type_default(self, *args): return _qpol.qpol_default_object_t_type_default(self, *args)
+ def range_default(self, *args): return _qpol.qpol_default_object_t_range_default(self, *args)
+qpol_default_object_t_swigregister = _qpol.qpol_default_object_t_swigregister
+qpol_default_object_t_swigregister(qpol_default_object_t)
+
+
+def qpol_default_object_from_void(*args):
+ return _qpol.qpol_default_object_from_void(*args)
+qpol_default_object_from_void = _qpol.qpol_default_object_from_void
+# This file is compatible with both classic and new-style classes.
+
+
diff --git a/lib/python2.7/site-packages/setools/policyrep/rbacrule.py b/lib/python2.7/site-packages/setools/policyrep/rbacrule.py
new file mode 100644
index 0000000..aa6a0d0
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/rbacrule.py
@@ -0,0 +1,92 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import qpol
+from . import rule
+from . import role
+from . import typeattr
+
+
+def rbac_rule_factory(policy, name):
+ """Factory function for creating RBAC rule objects."""
+
+ if isinstance(name, qpol.qpol_role_allow_t):
+ return RoleAllow(policy, name)
+ elif isinstance(name, qpol.qpol_role_trans_t):
+ return RoleTransition(policy, name)
+ else:
+ raise TypeError("RBAC rules cannot be looked up.")
+
+
+def validate_ruletype(types):
+ """Validate RBAC rule types."""
+ for t in types:
+ if t not in ["allow", "role_transition"]:
+ raise exception.InvalidRBACRuleType("{0} is not a valid RBAC rule type.".format(t))
+
+
+class RoleAllow(rule.PolicyRule):
+
+ """A role allow rule."""
+
+ def __str__(self):
+ return "allow {0.source} {0.target};".format(self)
+
+ @property
+ def source(self):
+ """The rule's source role."""
+ return role.role_factory(self.policy, self.qpol_symbol.source_role(self.policy))
+
+ @property
+ def target(self):
+ """The rule's target role."""
+ return role.role_factory(self.policy, self.qpol_symbol.target_role(self.policy))
+
+ @property
+ def tclass(self):
+ """The rule's object class."""
+ raise exception.RuleUseError("Role allow rules do not have an object class.")
+
+ @property
+ def default(self):
+ """The rule's default role."""
+ raise exception.RuleUseError("Role allow rules do not have a default role.")
+
+
+class RoleTransition(rule.PolicyRule):
+
+ """A role_transition rule."""
+
+ def __str__(self):
+ return "role_transition {0.source} {0.target}:{0.tclass} {0.default};".format(self)
+
+ @property
+ def source(self):
+ """The rule's source role."""
+ return role.role_factory(self.policy, self.qpol_symbol.source_role(self.policy))
+
+ @property
+ def target(self):
+ """The rule's target type/attribute."""
+ return typeattr.type_or_attr_factory(self.policy, self.qpol_symbol.target_type(self.policy))
+
+ @property
+ def default(self):
+ """The rule's default role."""
+ return role.role_factory(self.policy, self.qpol_symbol.default_role(self.policy))
diff --git a/lib/python2.7/site-packages/setools/policyrep/role.py b/lib/python2.7/site-packages/setools/policyrep/role.py
new file mode 100644
index 0000000..1d9fbe1
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/role.py
@@ -0,0 +1,81 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import qpol
+from . import symbol
+from . import typeattr
+
+
+def role_factory(qpol_policy, name):
+ """Factory function for creating Role objects."""
+
+ if isinstance(name, Role):
+ assert name.policy == qpol_policy
+ return name
+ elif isinstance(name, qpol.qpol_role_t):
+ return Role(qpol_policy, name)
+
+ try:
+ return Role(qpol_policy, qpol.qpol_role_t(qpol_policy, str(name)))
+ except ValueError:
+ raise exception.InvalidRole("{0} is not a valid role".format(name))
+
+
+class BaseRole(symbol.PolicySymbol):
+
+ """Role/role attribute base class."""
+
+ def expand(self):
+ raise NotImplementedError
+
+ def types(self):
+ raise NotImplementedError
+
+
+class Role(BaseRole):
+
+ """A role."""
+
+ def expand(self):
+ """Generator that expands this into its member roles."""
+ yield self
+
+ def types(self):
+ """Generator which yields the role's set of types."""
+
+ for type_ in self.qpol_symbol.type_iter(self.policy):
+ yield typeattr.type_or_attr_factory(self.policy, type_)
+
+ def statement(self):
+ types = list(str(t) for t in self.types())
+ stmt = "role {0}".format(self)
+ if types:
+ if (len(types) > 1):
+ stmt += " types {{ {0} }}".format(' '.join(types))
+ else:
+ stmt += " types {0}".format(types[0])
+ stmt += ";"
+ return stmt
+
+
+class RoleAttribute(BaseRole):
+
+ """A role attribute."""
+
+ pass
diff --git a/lib/python2.7/site-packages/setools/policyrep/rule.py b/lib/python2.7/site-packages/setools/policyrep/rule.py
new file mode 100644
index 0000000..73fc812
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/rule.py
@@ -0,0 +1,72 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import symbol
+from . import objclass
+
+
+class PolicyRule(symbol.PolicySymbol):
+
+ """This is base class for policy rules."""
+
+ def __str__(self):
+ raise NotImplementedError
+
+ @property
+ def ruletype(self):
+ """The rule type for the rule."""
+ return self.qpol_symbol.rule_type(self.policy)
+
+ @property
+ def source(self):
+ """
+ The source for the rule. This should be overridden by
+ subclasses.
+ """
+ raise NotImplementedError
+
+ @property
+ def target(self):
+ """
+ The target for the rule. This should be overridden by
+ subclasses.
+ """
+ raise NotImplementedError
+
+ @property
+ def tclass(self):
+ """The object class for the rule."""
+ return objclass.class_factory(self.policy, self.qpol_symbol.object_class(self.policy))
+
+ @property
+ def default(self):
+ """
+ The default for the rule. This should be overridden by
+ subclasses.
+ """
+ raise NotImplementedError
+
+ @property
+ def conditional(self):
+ """The conditional expression for this rule."""
+ # Most rules cannot be conditional.
+ raise exception.RuleNotConditional
+
+ def statement(self):
+ return str(self)
diff --git a/lib/python2.7/site-packages/setools/policyrep/symbol.py b/lib/python2.7/site-packages/setools/policyrep/symbol.py
new file mode 100644
index 0000000..4712d7f
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/symbol.py
@@ -0,0 +1,74 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+
+
+class PolicySymbol(object):
+
+ """This is a base class for all policy objects."""
+
+ def __init__(self, policy, qpol_symbol):
+ """
+ Parameters:
+ policy The low-level policy object.
+ qpol_symbol The low-level policy symbol object.
+ """
+
+ assert qpol_symbol
+
+ self.policy = policy
+ self.qpol_symbol = qpol_symbol
+
+ def __str__(self):
+ return self.qpol_symbol.name(self.policy)
+
+ def __hash__(self):
+ return hash(self.qpol_symbol.name(self.policy))
+
+ def __eq__(self, other):
+ try:
+ return self.qpol_symbol.this == other.qpol_symbol.this
+ except AttributeError:
+ return str(self) == str(other)
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __lt__(self, other):
+ """Comparison used by Python sorting functions."""
+ return str(self) < str(other)
+
+ def __repr__(self):
+ return "<{0.__class__.__name__}(<qpol_policy_t id={1}>,\"{0}\")>".format(
+ self, id(self.policy))
+
+ def __deepcopy__(self, memo):
+ # shallow copy as all of the members are immutable
+ cls = self.__class__
+ newobj = cls.__new__(cls)
+ newobj.policy = self.policy
+ newobj.qpol_symbol = self.qpol_symbol
+ memo[id(self)] = newobj
+ return newobj
+
+ def statement(self):
+ """
+ A rendering of the policy statement. This should be
+ overridden by subclasses.
+ """
+ raise NotImplementedError
diff --git a/lib/python2.7/site-packages/setools/policyrep/terule.py b/lib/python2.7/site-packages/setools/policyrep/terule.py
new file mode 100644
index 0000000..d8a9e94
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/terule.py
@@ -0,0 +1,155 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import qpol
+from . import rule
+from . import typeattr
+from . import boolcond
+
+
+def te_rule_factory(policy, symbol):
+ """Factory function for creating TE rule objects."""
+
+ if isinstance(symbol, qpol.qpol_avrule_t):
+ return AVRule(policy, symbol)
+ elif isinstance(symbol, (qpol.qpol_terule_t, qpol.qpol_filename_trans_t)):
+ return TERule(policy, symbol)
+ else:
+ raise TypeError("TE rules cannot be looked-up.")
+
+
+def validate_ruletype(types):
+ """Validate TE Rule types."""
+ for t in types:
+ if t not in ["allow", "auditallow", "dontaudit", "neverallow",
+ "type_transition", "type_member", "type_change"]:
+ raise exception.InvalidTERuleType("{0} is not a valid TE rule type.".format(t))
+
+
+class BaseTERule(rule.PolicyRule):
+
+ """A type enforcement rule."""
+
+ @property
+ def source(self):
+ """The rule's source type/attribute."""
+ return typeattr.type_or_attr_factory(self.policy, self.qpol_symbol.source_type(self.policy))
+
+ @property
+ def target(self):
+ """The rule's target type/attribute."""
+ return typeattr.type_or_attr_factory(self.policy, self.qpol_symbol.target_type(self.policy))
+
+ @property
+ def filename(self):
+ raise NotImplementedError
+
+ @property
+ def conditional(self):
+ """The rule's conditional expression."""
+ try:
+ return boolcond.condexpr_factory(self.policy, self.qpol_symbol.cond(self.policy))
+ except (AttributeError, ValueError):
+ # AttributeError: name filetrans rules cannot be conditional
+ # so no member function
+ # ValueError: The rule is not conditional
+ raise exception.RuleNotConditional
+
+
+class AVRule(BaseTERule):
+
+ """An access vector type enforcement rule."""
+
+ def __str__(self):
+ rule_string = "{0.ruletype} {0.source} {0.target}:{0.tclass} ".format(
+ self)
+
+ perms = self.perms
+
+ # allow/dontaudit/auditallow/neverallow rules
+ if len(perms) > 1:
+ rule_string += "{{ {0} }};".format(' '.join(perms))
+ else:
+ # convert to list since sets cannot be indexed
+ rule_string += "{0};".format(list(perms)[0])
+
+ try:
+ rule_string += " [ {0} ]".format(self.conditional)
+ except exception.RuleNotConditional:
+ pass
+
+ return rule_string
+
+ @property
+ def perms(self):
+ """The rule's permission set."""
+ return set(self.qpol_symbol.perm_iter(self.policy))
+
+ @property
+ def default(self):
+ """The rule's default type."""
+ raise exception.RuleUseError("{0} rules do not have a default type.".format(self.ruletype))
+
+ @property
+ def filename(self):
+ raise exception.RuleUseError("{0} rules do not have file names".format(self.ruletype))
+
+
+class TERule(BaseTERule):
+
+ """A type_* type enforcement rule."""
+
+ def __str__(self):
+ rule_string = "{0.ruletype} {0.source} {0.target}:{0.tclass} {0.default}".format(self)
+
+ try:
+ rule_string += " \"{0}\";".format(self.filename)
+ except (exception.TERuleNoFilename, exception.RuleUseError):
+ # invalid use for type_change/member
+ rule_string += ";"
+
+ try:
+ rule_string += " [ {0} ]".format(self.conditional)
+ except exception.RuleNotConditional:
+ pass
+
+ return rule_string
+
+ @property
+ def perms(self):
+ """The rule's permission set."""
+ raise exception.RuleUseError(
+ "{0} rules do not have a permission set.".format(self.ruletype))
+
+ @property
+ def default(self):
+ """The rule's default type."""
+ return typeattr.type_factory(self.policy, self.qpol_symbol.default_type(self.policy))
+
+ @property
+ def filename(self):
+ """The type_transition rule's file name."""
+ try:
+ return self.qpol_symbol.filename(self.policy)
+ except AttributeError:
+ if self.ruletype == "type_transition":
+ raise exception.TERuleNoFilename
+ else:
+ raise exception.RuleUseError("{0} rules do not have file names".
+ format(self.ruletype))
diff --git a/lib/python2.7/site-packages/setools/policyrep/typeattr.py b/lib/python2.7/site-packages/setools/policyrep/typeattr.py
new file mode 100644
index 0000000..a52c69a
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/typeattr.py
@@ -0,0 +1,174 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import qpol
+from . import symbol
+
+
+def _symbol_lookup(qpol_policy, name):
+ """Look up the low-level qpol policy reference"""
+ if isinstance(name, qpol.qpol_type_t):
+ return name
+
+ try:
+ return qpol.qpol_type_t(qpol_policy, str(name))
+ except ValueError:
+ raise exception.InvalidType("{0} is not a valid type/attribute".format(name))
+
+
+def attribute_factory(qpol_policy, name):
+ """Factory function for creating attribute objects."""
+
+ if isinstance(name, TypeAttribute):
+ assert name.policy == qpol_policy
+ return name
+
+ qpol_symbol = _symbol_lookup(qpol_policy, name)
+
+ if not qpol_symbol.isattr(qpol_policy):
+ raise TypeError("{0} is a type".format(qpol_symbol.name(qpol_policy)))
+
+ return TypeAttribute(qpol_policy, qpol_symbol)
+
+
+def type_factory(qpol_policy, name, deref=False):
+ """Factory function for creating type objects."""
+
+ if isinstance(name, Type):
+ assert name.policy == qpol_policy
+ return name
+
+ qpol_symbol = _symbol_lookup(qpol_policy, name)
+
+ if qpol_symbol.isattr(qpol_policy):
+ raise TypeError("{0} is an attribute".format(qpol_symbol.name(qpol_policy)))
+ elif qpol_symbol.isalias(qpol_policy) and not deref:
+ raise TypeError("{0} is an alias.".format(qpol_symbol.name(qpol_policy)))
+
+ return Type(qpol_policy, qpol_symbol)
+
+
+def type_or_attr_factory(qpol_policy, name, deref=False):
+ """Factory function for creating type or attribute objects."""
+
+ if isinstance(name, (Type, TypeAttribute)):
+ assert name.policy == qpol_policy
+ return name
+
+ qpol_symbol = _symbol_lookup(qpol_policy, name)
+
+ if qpol_symbol.isalias(qpol_policy) and not deref:
+ raise TypeError("{0} is an alias.".format(qpol_symbol.name(qpol_policy)))
+
+ if qpol_symbol.isattr(qpol_policy):
+ return TypeAttribute(qpol_policy, qpol_symbol)
+ else:
+ return Type(qpol_policy, qpol_symbol)
+
+
+class BaseType(symbol.PolicySymbol):
+
+ """Type/attribute base class."""
+
+ @property
+ def ispermissive(self):
+ raise NotImplementedError
+
+ def expand(self):
+ """Generator that expands this attribute into its member types."""
+ raise NotImplementedError
+
+ def attributes(self):
+ """Generator that yields all attributes for this type."""
+ raise NotImplementedError
+
+ def aliases(self):
+ """Generator that yields all aliases for this type."""
+ raise NotImplementedError
+
+
+class Type(BaseType):
+
+ """A type."""
+
+ @property
+ def ispermissive(self):
+ """(T/F) the type is permissive."""
+ return self.qpol_symbol.ispermissive(self.policy)
+
+ def expand(self):
+ """Generator that expands this into its member types."""
+ yield self
+
+ def attributes(self):
+ """Generator that yields all attributes for this type."""
+ for attr in self.qpol_symbol.attr_iter(self.policy):
+ yield attribute_factory(self.policy, attr)
+
+ def aliases(self):
+ """Generator that yields all aliases for this type."""
+ for alias in self.qpol_symbol.alias_iter(self.policy):
+ yield alias
+
+ def statement(self):
+ attrs = list(self.attributes())
+ aliases = list(self.aliases())
+ stmt = "type {0}".format(self)
+ if aliases:
+ if len(aliases) > 1:
+ stmt += " alias {{ {0} }}".format(' '.join(aliases))
+ else:
+ stmt += " alias {0}".format(aliases[0])
+ for attr in attrs:
+ stmt += ", {0}".format(attr)
+ stmt += ";"
+ return stmt
+
+
+class TypeAttribute(BaseType):
+
+ """An attribute."""
+
+ def __contains__(self, other):
+ for type_ in self.expand():
+ if other == type_:
+ return True
+
+ return False
+
+ def expand(self):
+ """Generator that expands this attribute into its member types."""
+ for type_ in self.qpol_symbol.type_iter(self.policy):
+ yield type_factory(self.policy, type_)
+
+ def attributes(self):
+ """Generator that yields all attributes for this type."""
+ raise TypeError("{0} is an attribute, thus does not have attributes.".format(self))
+
+ def aliases(self):
+ """Generator that yields all aliases for this type."""
+ raise TypeError("{0} is an attribute, thus does not have aliases.".format(self))
+
+ @property
+ def ispermissive(self):
+ """(T/F) the type is permissive."""
+ raise TypeError("{0} is an attribute, thus cannot be permissive.".format(self))
+
+ def statement(self):
+ return "attribute {0};".format(self)
diff --git a/lib/python2.7/site-packages/setools/policyrep/user.py b/lib/python2.7/site-packages/setools/policyrep/user.py
new file mode 100644
index 0000000..94f81bc
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/policyrep/user.py
@@ -0,0 +1,86 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import qpol
+from . import role
+from . import mls
+from . import symbol
+
+
+def user_factory(qpol_policy, name):
+ """Factory function for creating User objects."""
+
+ if isinstance(name, User):
+ assert name.policy == qpol_policy
+ return name
+ elif isinstance(name, qpol.qpol_user_t):
+ return User(qpol_policy, name)
+
+ try:
+ return User(qpol_policy, qpol.qpol_user_t(qpol_policy, str(name)))
+ except ValueError:
+ raise exception.InvalidUser("{0} is not a valid user".format(name))
+
+
+class User(symbol.PolicySymbol):
+
+ """A user."""
+
+ @property
+ def roles(self):
+ """The user's set of roles."""
+
+ roleset = set()
+
+ for role_ in self.qpol_symbol.role_iter(self.policy):
+ item = role.role_factory(self.policy, role_)
+
+ # object_r is implicitly added to all roles by the compiler.
+ # technically it is incorrect to skip it, but policy writers
+ # and analysts don't expect to see it in results, and it
+ # will confuse, especially for role set equality user queries.
+ if item != "object_r":
+ roleset.add(item)
+
+ return roleset
+
+ @property
+ def mls_level(self):
+ """The user's default MLS level."""
+ return mls.level_factory(self.policy, self.qpol_symbol.dfltlevel(self.policy))
+
+ @property
+ def mls_range(self):
+ """The user's MLS range."""
+ return mls.range_factory(self.policy, self.qpol_symbol.range(self.policy))
+
+ def statement(self):
+ roles = list(str(r) for r in self.roles)
+ stmt = "user {0} roles ".format(self)
+ if len(roles) > 1:
+ stmt += "{{ {0} }}".format(' '.join(roles))
+ else:
+ stmt += roles[0]
+
+ try:
+ stmt += " level {0.mls_level} range {0.mls_range};".format(self)
+ except exception.MLSDisabled:
+ stmt += ";"
+
+ return stmt
diff --git a/lib/python2.7/site-packages/setools/portconquery.py b/lib/python2.7/site-packages/setools/portconquery.py
new file mode 100644
index 0000000..798a828
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/portconquery.py
@@ -0,0 +1,146 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+from socket import IPPROTO_TCP, IPPROTO_UDP
+
+from . import contextquery
+from .policyrep.netcontext import port_range
+
+
+class PortconQuery(contextquery.ContextQuery):
+
+ """
+ Port context query.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ protocol The protocol to match (socket.IPPROTO_TCP for
+ TCP or socket.IPPROTO_UDP for UDP)
+
+ ports A 2-tuple of the port range to match. (Set both to
+ the same value for a single port)
+ ports_subset If true, the criteria will match if it is a subset
+ of the portcon's range.
+ ports_overlap If true, the criteria will match if it overlaps
+ any of the portcon's range.
+ ports_superset If true, the criteria will match if it is a superset
+ of the portcon's range.
+ ports_proper If true, use proper superset/subset operations.
+ No effect if not using set operations.
+
+ user The criteria to match the context's user.
+ user_regex If true, regular expression matching
+ will be used on the user.
+
+ role The criteria to match the context's role.
+ role_regex If true, regular expression matching
+ will be used on the role.
+
+ type_ The criteria to match the context's type.
+ type_regex If true, regular expression matching
+ will be used on the type.
+
+ range_ The criteria to match the context's range.
+ range_subset If true, the criteria will match if it is a subset
+ of the context's range.
+ range_overlap If true, the criteria will match if it overlaps
+ any of the context's range.
+ range_superset If true, the criteria will match if it is a superset
+ of the context's range.
+ range_proper If true, use proper superset/subset operations.
+ No effect if not using set operations.
+ """
+
+ _protocol = None
+ _ports = None
+ ports_subset = False
+ ports_overlap = False
+ ports_superset = False
+ ports_proper = False
+
+ @property
+ def ports(self):
+ return self._ports
+
+ @ports.setter
+ def ports(self, value):
+ pending_ports = port_range(*value)
+
+ if all(pending_ports):
+ if pending_ports.low < 1 or pending_ports.high < 1:
+ raise ValueError("Port numbers must be positive: {0.low}-{0.high}".
+ format(pending_ports))
+
+ if pending_ports.low > pending_ports.high:
+ raise ValueError(
+ "The low port must be smaller than the high port: {0.low}-{0.high}".
+ format(pending_ports))
+
+ self._ports = pending_ports
+ else:
+ self._ports = None
+
+ @property
+ def protocol(self):
+ return self._protocol
+
+ @protocol.setter
+ def protocol(self, value):
+ if value:
+ if not (value == IPPROTO_TCP or value == IPPROTO_UDP):
+ raise ValueError(
+ "The protocol must be {0} for TCP or {1} for UDP.".
+ format(IPPROTO_TCP, IPPROTO_UDP))
+
+ self._protocol = value
+ else:
+ self._protocol = None
+
+ def results(self):
+ """Generator which yields all matching portcons."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Ports: {0.ports}, overlap: {0.ports_overlap}, "
+ "subset: {0.ports_subset}, superset: {0.ports_superset}, "
+ "proper: {0.ports_proper}".format(self))
+ self.log.debug("User: {0.user!r}, regex: {0.user_regex}".format(self))
+ self.log.debug("Role: {0.role!r}, regex: {0.role_regex}".format(self))
+ self.log.debug("Type: {0.type_!r}, regex: {0.type_regex}".format(self))
+ self.log.debug("Range: {0.range_!r}, subset: {0.range_subset}, overlap: {0.range_overlap}, "
+ "superset: {0.range_superset}, proper: {0.range_proper}".format(self))
+
+ for portcon in self.policy.portcons():
+
+ if self.ports and not self._match_range(
+ portcon.ports,
+ self.ports,
+ self.ports_subset,
+ self.ports_overlap,
+ self.ports_superset,
+ self.ports_proper):
+ continue
+
+ if self.protocol and self.protocol != portcon.protocol:
+ continue
+
+ if not self._match_context(portcon.context):
+ continue
+
+ yield portcon
diff --git a/lib/python2.7/site-packages/setools/query.py b/lib/python2.7/site-packages/setools/query.py
new file mode 100644
index 0000000..358a095
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/query.py
@@ -0,0 +1,192 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+
+
+class PolicyQuery(object):
+
+ """Base class for SELinux policy queries."""
+
+ def __init__(self, policy, **kwargs):
+ self.log = logging.getLogger(self.__class__.__name__)
+
+ self.policy = policy
+
+ # keys are sorted in reverse order so regex settings
+ # are set before the criteria, e.g. name_regex
+ # is set before name. This ensures correct behavior
+ # since the criteria descriptors are sensitve to
+ # regex settings.
+ for name in sorted(kwargs.keys(), reverse=True):
+ attr = getattr(self, name, None) # None is not callable
+ if callable(attr):
+ raise ValueError("Keyword parameter {0} conflicts with a callable.".format(name))
+
+ setattr(self, name, kwargs[name])
+
+ @staticmethod
+ def _match_regex(obj, criteria, regex):
+ """
+ Match the object with optional regular expression.
+
+ Parameters:
+ obj The object to match.
+ criteria The criteria to match.
+ regex If regular expression matching should be used.
+ """
+
+ if regex:
+ return bool(criteria.search(str(obj)))
+ else:
+ return obj == criteria
+
+ @staticmethod
+ def _match_set(obj, criteria, equal):
+ """
+ Match the object (a set) with optional set equality.
+
+ Parameters:
+ obj The object to match. (a set)
+ criteria The criteria to match. (a set)
+ equal If set equality should be used. Otherwise
+ any set intersection will match.
+ """
+
+ if equal:
+ return obj == criteria
+ else:
+ return bool(obj.intersection(criteria))
+
+ @staticmethod
+ def _match_in_set(obj, criteria, regex):
+ """
+ Match if the criteria is in the list, with optional
+ regular expression matching.
+
+ Parameters:
+ obj The object to match.
+ criteria The criteria to match.
+ regex If regular expression matching should be used.
+ """
+
+ if regex:
+ return [m for m in obj if criteria.search(str(m))]
+ else:
+ return criteria in obj
+
+ @staticmethod
+ def _match_indirect_regex(obj, criteria, indirect, regex):
+ """
+ Match the object with optional regular expression and indirection.
+
+ Parameters:
+ obj The object to match.
+ criteria The criteria to match.
+ regex If regular expression matching should be used.
+ indirect If object indirection should be used, e.g.
+ expanding an attribute.
+ """
+
+ if indirect:
+ return PolicyQuery._match_in_set((obj.expand()), criteria, regex)
+ else:
+ return PolicyQuery._match_regex(obj, criteria, regex)
+
+ @staticmethod
+ def _match_regex_or_set(obj, criteria, equal, regex):
+ """
+ Match the object (a set) with either set comparisons
+ (equality or intersection) or by regex matching of the
+ set members. Regular expression matching will override
+ the set equality option.
+
+ Parameters:
+ obj The object to match. (a set)
+ criteria The criteria to match.
+ equal If set equality should be used. Otherwise
+ any set intersection will match. Ignored
+ if regular expression matching is used.
+ regex If regular expression matching should be used.
+ """
+
+ if regex:
+ return [m for m in obj if criteria.search(str(m))]
+ else:
+ return PolicyQuery._match_set(obj, set(criteria), equal)
+
+ @staticmethod
+ def _match_range(obj, criteria, subset, overlap, superset, proper):
+ """
+ Match ranges of objects.
+
+ obj An object with attributes named "low" and "high", representing the range.
+ criteria An object with attributes named "low" and "high", representing the criteria.
+ subset If true, the criteria will match if it is a subset obj's range.
+ overlap If true, the criteria will match if it overlaps any of the obj's range.
+ superset If true, the criteria will match if it is a superset of the obj's range.
+ proper If true, use proper superset/subset operations.
+ No effect if not using set operations.
+ """
+
+ if overlap:
+ return ((obj.low <= criteria.low <= obj.high) or (
+ obj.low <= criteria.high <= obj.high) or (
+ criteria.low <= obj.low and obj.high <= criteria.high))
+ elif subset:
+ if proper:
+ return ((obj.low < criteria.low and criteria.high <= obj.high) or (
+ obj.low <= criteria.low and criteria.high < obj.high))
+ else:
+ return obj.low <= criteria.low and criteria.high <= obj.high
+ elif superset:
+ if proper:
+ return ((criteria.low < obj.low and obj.high <= criteria.high) or (
+ criteria.low <= obj.low and obj.high < criteria.high))
+ else:
+ return (criteria.low <= obj.low and obj.high <= criteria.high)
+ else:
+ return criteria.low == obj.low and obj.high == criteria.high
+
+ @staticmethod
+ def _match_level(obj, criteria, dom, domby, incomp):
+ """
+ Match the an MLS level.
+
+ obj The level to match.
+ criteria The criteria to match. (a level)
+ dom If true, the criteria will match if it dominates obj.
+ domby If true, the criteria will match if it is dominated by obj.
+ incomp If true, the criteria will match if it is incomparable to obj.
+ """
+
+ if dom:
+ return (criteria >= obj)
+ elif domby:
+ return (criteria <= obj)
+ elif incomp:
+ return (criteria ^ obj)
+ else:
+ return (criteria == obj)
+
+ def results(self):
+ """
+ Generator which returns the matches for the query. This method
+ should be overridden by subclasses.
+ """
+ raise NotImplementedError
diff --git a/lib/python2.7/site-packages/setools/rbacrulequery.py b/lib/python2.7/site-packages/setools/rbacrulequery.py
new file mode 100644
index 0000000..240b921
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/rbacrulequery.py
@@ -0,0 +1,147 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import mixins, query
+from .descriptors import CriteriaDescriptor, CriteriaSetDescriptor, RuletypeDescriptor
+from .policyrep.exception import InvalidType, RuleUseError
+
+
+class RBACRuleQuery(mixins.MatchObjClass, query.PolicyQuery):
+
+ """
+ Query the RBAC rules.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ ruletype The list of rule type(s) to match.
+ source The name of the source role/attribute to match.
+ source_indirect If true, members of an attribute will be
+ matched rather than the attribute itself.
+ source_regex If true, regular expression matching will
+ be used on the source role/attribute.
+ Obeys the source_indirect option.
+ target The name of the target role/attribute to match.
+ target_indirect If true, members of an attribute will be
+ matched rather than the attribute itself.
+ target_regex If true, regular expression matching will
+ be used on the target role/attribute.
+ Obeys target_indirect option.
+ tclass The object class(es) to match.
+ tclass_regex If true, use a regular expression for
+ matching the rule's object class.
+ default The name of the default role to match.
+ default_regex If true, regular expression matching will
+ be used on the default role.
+ """
+
+ ruletype = RuletypeDescriptor("validate_rbac_ruletype")
+ source = CriteriaDescriptor("source_regex", "lookup_role")
+ source_regex = False
+ source_indirect = True
+ _target = None
+ target_regex = False
+ target_indirect = True
+ tclass = CriteriaSetDescriptor("tclass_regex", "lookup_class")
+ tclass_regex = False
+ default = CriteriaDescriptor("default_regex", "lookup_role")
+ default_regex = False
+
+ @property
+ def target(self):
+ return self._target
+
+ @target.setter
+ def target(self, value):
+ if not value:
+ self._target = None
+ elif self.target_regex:
+ self._target = re.compile(value)
+ else:
+ try:
+ self._target = self.policy.lookup_type_or_attr(value)
+ except InvalidType:
+ self._target = self.policy.lookup_role(value)
+
+ def results(self):
+ """Generator which yields all matching RBAC rules."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Ruletypes: {0.ruletype}".format(self))
+ self.log.debug("Source: {0.source!r}, indirect: {0.source_indirect}, "
+ "regex: {0.source_regex}".format(self))
+ self.log.debug("Target: {0.target!r}, indirect: {0.target_indirect}, "
+ "regex: {0.target_regex}".format(self))
+ self.log.debug("Class: {0.tclass!r}, regex: {0.tclass_regex}".format(self))
+ self.log.debug("Default: {0.default!r}, regex: {0.default_regex}".format(self))
+
+ for rule in self.policy.rbacrules():
+ #
+ # Matching on rule type
+ #
+ if self.ruletype:
+ if rule.ruletype not in self.ruletype:
+ continue
+
+ #
+ # Matching on source role
+ #
+ if self.source and not self._match_indirect_regex(
+ rule.source,
+ self.source,
+ self.source_indirect,
+ self.source_regex):
+ continue
+
+ #
+ # Matching on target type (role_transition)/role(allow)
+ #
+ if self.target and not self._match_indirect_regex(
+ rule.target,
+ self.target,
+ self.target_indirect,
+ self.target_regex):
+ continue
+
+ #
+ # Matching on object class
+ #
+ try:
+ if not self._match_object_class(rule):
+ continue
+ except RuleUseError:
+ continue
+
+ #
+ # Matching on default role
+ #
+ if self.default:
+ try:
+ if not self._match_regex(
+ rule.default,
+ self.default,
+ self.default_regex):
+ continue
+ except RuleUseError:
+ continue
+
+ # if we get here, we have matched all available criteria
+ yield rule
diff --git a/lib/python2.7/site-packages/setools/rolequery.py b/lib/python2.7/site-packages/setools/rolequery.py
new file mode 100644
index 0000000..e95dfa6
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/rolequery.py
@@ -0,0 +1,77 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import compquery
+from .descriptors import CriteriaSetDescriptor
+
+
+class RoleQuery(compquery.ComponentQuery):
+
+ """
+ Query SELinux policy roles.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The role name to match.
+ name_regex If true, regular expression matching
+ will be used on the role names.
+ types The type to match.
+ types_equal If true, only roles with type sets
+ that are equal to the criteria will
+ match. Otherwise, any intersection
+ will match.
+ types_regex If true, regular expression matching
+ will be used on the type names instead
+ of set logic.
+ """
+
+ types = CriteriaSetDescriptor("types_regex", "lookup_type")
+ types_equal = False
+ types_regex = False
+
+ def results(self):
+ """Generator which yields all matching roles."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("Types: {0.types!r}, regex: {0.types_regex}, "
+ "eq: {0.types_equal}".format(self))
+
+ for r in self.policy.roles():
+ if r == "object_r":
+ # all types are implicitly added to object_r by the compiler.
+ # technically it is incorrect to skip it, but policy writers
+ # and analysts don't expect to see it in results, and it
+ # will confuse, especially for set equality type queries.
+ continue
+
+ if not self._match_name(r):
+ continue
+
+ if self.types and not self._match_regex_or_set(
+ set(r.types()),
+ self.types,
+ self.types_equal,
+ self.types_regex):
+ continue
+
+ yield r
diff --git a/lib/python2.7/site-packages/setools/sensitivityquery.py b/lib/python2.7/site-packages/setools/sensitivityquery.py
new file mode 100644
index 0000000..a102836
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/sensitivityquery.py
@@ -0,0 +1,74 @@
+# Copyright 2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+
+from . import compquery
+from . import mixins
+from .descriptors import CriteriaDescriptor
+
+
+class SensitivityQuery(mixins.MatchAlias, compquery.ComponentQuery):
+
+ """
+ Query MLS Sensitivities
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The name of the category to match.
+ name_regex If true, regular expression matching will
+ be used for matching the name.
+ alias The alias name to match.
+ alias_regex If true, regular expression matching
+ will be used on the alias names.
+ sens The criteria to match the sensitivity by dominance.
+ sens_dom If true, the criteria will match if it dominates
+ the sensitivity.
+ sens_domby If true, the criteria will match if it is dominated
+ by the sensitivity.
+ """
+
+ sens = CriteriaDescriptor(lookup_function="lookup_sensitivity")
+ sens_dom = False
+ sens_domby = False
+
+ def results(self):
+ """Generator which yields all matching sensitivities."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("Alias: {0.alias}, regex: {0.alias_regex}".format(self))
+ self.log.debug("Sens: {0.sens!r}, dom: {0.sens_dom}, domby: {0.sens_domby}".format(self))
+
+ for s in self.policy.sensitivities():
+ if not self._match_name(s):
+ continue
+
+ if not self._match_alias(s):
+ continue
+
+ if self.sens and not self._match_level(
+ s,
+ self.sens,
+ self.sens_dom,
+ self.sens_domby,
+ False):
+ continue
+
+ yield s
diff --git a/lib/python2.7/site-packages/setools/terulequery.py b/lib/python2.7/site-packages/setools/terulequery.py
new file mode 100644
index 0000000..7f3eccf
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/terulequery.py
@@ -0,0 +1,178 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import mixins, query
+from .descriptors import CriteriaDescriptor, CriteriaSetDescriptor, RuletypeDescriptor
+from .policyrep.exception import RuleUseError, RuleNotConditional
+
+
+class TERuleQuery(mixins.MatchObjClass, mixins.MatchPermission, query.PolicyQuery):
+
+ """
+ Query the Type Enforcement rules.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ ruletype The list of rule type(s) to match.
+ source The name of the source type/attribute to match.
+ source_indirect If true, members of an attribute will be
+ matched rather than the attribute itself.
+ Default is true.
+ source_regex If true, regular expression matching will
+ be used on the source type/attribute.
+ Obeys the source_indirect option.
+ Default is false.
+ target The name of the target type/attribute to match.
+ target_indirect If true, members of an attribute will be
+ matched rather than the attribute itself.
+ Default is true.
+ target_regex If true, regular expression matching will
+ be used on the target type/attribute.
+ Obeys target_indirect option.
+ Default is false.
+ tclass The object class(es) to match.
+ tclass_regex If true, use a regular expression for
+ matching the rule's object class.
+ Default is false.
+ perms The set of permission(s) to match.
+ perms_equal If true, the permission set of the rule
+ must exactly match the permissions
+ criteria. If false, any set intersection
+ will match.
+ Default is false.
+ perms_regex If true, regular expression matching will be used
+ on the permission names instead of set logic.
+ default The name of the default type to match.
+ default_regex If true, regular expression matching will be
+ used on the default type.
+ Default is false.
+ boolean The set of boolean(s) to match.
+ boolean_regex If true, regular expression matching will be
+ used on the booleans.
+ Default is false.
+ boolean_equal If true, the booleans in the conditional
+ expression of the rule must exactly match the
+ criteria. If false, any set intersection
+ will match. Default is false.
+ """
+
+ ruletype = RuletypeDescriptor("validate_te_ruletype")
+ source = CriteriaDescriptor("source_regex", "lookup_type_or_attr")
+ source_regex = False
+ source_indirect = True
+ target = CriteriaDescriptor("target_regex", "lookup_type_or_attr")
+ target_regex = False
+ target_indirect = True
+ default = CriteriaDescriptor("default_regex", "lookup_type")
+ default_regex = False
+ boolean = CriteriaSetDescriptor("boolean_regex", "lookup_boolean")
+ boolean_regex = False
+ boolean_equal = False
+
+ def results(self):
+ """Generator which yields all matching TE rules."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Ruletypes: {0.ruletype}".format(self))
+ self.log.debug("Source: {0.source!r}, indirect: {0.source_indirect}, "
+ "regex: {0.source_regex}".format(self))
+ self.log.debug("Target: {0.target!r}, indirect: {0.target_indirect}, "
+ "regex: {0.target_regex}".format(self))
+ self.log.debug("Class: {0.tclass!r}, regex: {0.tclass_regex}".format(self))
+ self.log.debug("Perms: {0.perms!r}, regex: {0.perms_regex}, eq: {0.perms_equal}".
+ format(self))
+ self.log.debug("Default: {0.default!r}, regex: {0.default_regex}".format(self))
+ self.log.debug("Boolean: {0.boolean!r}, eq: {0.boolean_equal}, "
+ "regex: {0.boolean_regex}".format(self))
+
+ for rule in self.policy.terules():
+ #
+ # Matching on rule type
+ #
+ if self.ruletype:
+ if rule.ruletype not in self.ruletype:
+ continue
+
+ #
+ # Matching on source type
+ #
+ if self.source and not self._match_indirect_regex(
+ rule.source,
+ self.source,
+ self.source_indirect,
+ self.source_regex):
+ continue
+
+ #
+ # Matching on target type
+ #
+ if self.target and not self._match_indirect_regex(
+ rule.target,
+ self.target,
+ self.target_indirect,
+ self.target_regex):
+ continue
+
+ #
+ # Matching on object class
+ #
+ if not self._match_object_class(rule):
+ continue
+
+ #
+ # Matching on permission set
+ #
+ try:
+ if not self._match_perms(rule):
+ continue
+ except RuleUseError:
+ continue
+
+ #
+ # Matching on default type
+ #
+ if self.default:
+ try:
+ if not self._match_regex(
+ rule.default,
+ self.default,
+ self.default_regex):
+ continue
+ except RuleUseError:
+ continue
+
+ #
+ # Match on Boolean in conditional expression
+ #
+ if self.boolean:
+ try:
+ if not self._match_regex_or_set(
+ rule.conditional.booleans,
+ self.boolean,
+ self.boolean_equal,
+ self.boolean_regex):
+ continue
+ except RuleNotConditional:
+ continue
+
+ # if we get here, we have matched all available criteria
+ yield rule
diff --git a/lib/python2.7/site-packages/setools/typeattrquery.py b/lib/python2.7/site-packages/setools/typeattrquery.py
new file mode 100644
index 0000000..a91026c
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/typeattrquery.py
@@ -0,0 +1,70 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import compquery
+from .descriptors import CriteriaSetDescriptor
+
+
+class TypeAttributeQuery(compquery.ComponentQuery):
+
+ """
+ Query SELinux policy type attributes.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The type name to match.
+ name_regex If true, regular expression matching
+ will be used on the type names.
+ types The type to match.
+ types_equal If true, only attributes with type sets
+ that are equal to the criteria will
+ match. Otherwise, any intersection
+ will match.
+ types_regex If true, regular expression matching
+ will be used on the type names instead
+ of set logic.
+ """
+
+ types = CriteriaSetDescriptor("types_regex", "lookup_type")
+ types_equal = False
+ types_regex = False
+
+ def results(self):
+ """Generator which yields all matching types."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("Types: {0.types!r}, regex: {0.types_regex}, "
+ "eq: {0.types_equal}".format(self))
+
+ for attr in self.policy.typeattributes():
+ if not self._match_name(attr):
+ continue
+
+ if self.types and not self._match_regex_or_set(
+ set(attr.expand()),
+ self.types,
+ self.types_equal,
+ self.types_regex):
+ continue
+
+ yield attr
diff --git a/lib/python2.7/site-packages/setools/typequery.py b/lib/python2.7/site-packages/setools/typequery.py
new file mode 100644
index 0000000..6634f76
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/typequery.py
@@ -0,0 +1,96 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import compquery
+from . import mixins
+from .descriptors import CriteriaSetDescriptor
+
+
+class TypeQuery(mixins.MatchAlias, compquery.ComponentQuery):
+
+ """
+ Query SELinux policy types.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The type name to match.
+ name_regex If true, regular expression matching
+ will be used on the type names.
+ alias The alias name to match.
+ alias_regex If true, regular expression matching
+ will be used on the alias names.
+ attrs The attribute to match.
+ attrs_equal If true, only types with attribute sets
+ that are equal to the criteria will
+ match. Otherwise, any intersection
+ will match.
+ attrs_regex If true, regular expression matching
+ will be used on the attribute names instead
+ of set logic.
+ permissive The permissive state to match. If this
+ is None, the state is not matched.
+ """
+
+ attrs = CriteriaSetDescriptor("attrs_regex", "lookup_typeattr")
+ attrs_regex = False
+ attrs_equal = False
+ _permissive = None
+
+ @property
+ def permissive(self):
+ return self._permissive
+
+ @permissive.setter
+ def permissive(self, value):
+ if value is None:
+ self._permissive = None
+ else:
+ self._permissive = bool(value)
+
+ def results(self):
+ """Generator which yields all matching types."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("Alias: {0.alias}, regex: {0.alias_regex}".format(self))
+ self.log.debug("Attrs: {0.attrs!r}, regex: {0.attrs_regex}, "
+ "eq: {0.attrs_equal}".format(self))
+ self.log.debug("Permissive: {0.permissive}".format(self))
+
+ for t in self.policy.types():
+ if not self._match_name(t):
+ continue
+
+ if not self._match_alias(t):
+ continue
+
+ if self.attrs and not self._match_regex_or_set(
+ set(t.attributes()),
+ self.attrs,
+ self.attrs_equal,
+ self.attrs_regex):
+ continue
+
+ if self.permissive is not None and t.ispermissive != self.permissive:
+ continue
+
+ yield t
diff --git a/lib/python2.7/site-packages/setools/userquery.py b/lib/python2.7/site-packages/setools/userquery.py
new file mode 100644
index 0000000..00910cf
--- /dev/null
+++ b/lib/python2.7/site-packages/setools/userquery.py
@@ -0,0 +1,116 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import compquery
+from .descriptors import CriteriaDescriptor, CriteriaSetDescriptor
+
+
+class UserQuery(compquery.ComponentQuery):
+
+ """
+ Query SELinux policy users.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The user name to match.
+ name_regex If true, regular expression matching
+ will be used on the user names.
+ roles The attribute to match.
+ roles_equal If true, only types with role sets
+ that are equal to the criteria will
+ match. Otherwise, any intersection
+ will match.
+ roles_regex If true, regular expression matching
+ will be used on the role names instead
+ of set logic.
+ level The criteria to match the user's default level.
+ level_dom If true, the criteria will match if it dominates
+ the user's default level.
+ level_domby If true, the criteria will match if it is dominated
+ by the user's default level.
+ level_incomp If true, the criteria will match if it is incomparable
+ to the user's default level.
+ range_ The criteria to match the user's range.
+ range_subset If true, the criteria will match if it is a subset
+ of the user's range.
+ range_overlap If true, the criteria will match if it overlaps
+ any of the user's range.
+ range_superset If true, the criteria will match if it is a superset
+ of the user's range.
+ range_proper If true, use proper superset/subset operations.
+ No effect if not using set operations.
+ """
+
+ level = CriteriaDescriptor(lookup_function="lookup_level")
+ level_dom = False
+ level_domby = False
+ level_incomp = False
+ range_ = CriteriaDescriptor(lookup_function="lookup_range")
+ range_overlap = False
+ range_subset = False
+ range_superset = False
+ range_proper = False
+ roles = CriteriaSetDescriptor("roles_regex", "lookup_role")
+ roles_equal = False
+ roles_regex = False
+
+ def results(self):
+ """Generator which yields all matching users."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("Roles: {0.roles!r}, regex: {0.roles_regex}, "
+ "eq: {0.roles_equal}".format(self))
+ self.log.debug("Level: {0.level!r}, dom: {0.level_dom}, domby: {0.level_domby}, "
+ "incomp: {0.level_incomp}".format(self))
+ self.log.debug("Range: {0.range_!r}, subset: {0.range_subset}, overlap: {0.range_overlap}, "
+ "superset: {0.range_superset}, proper: {0.range_proper}".format(self))
+
+ for user in self.policy.users():
+ if not self._match_name(user):
+ continue
+
+ if self.roles and not self._match_regex_or_set(
+ user.roles,
+ self.roles,
+ self.roles_equal,
+ self.roles_regex):
+ continue
+
+ if self.level and not self._match_level(
+ user.mls_level,
+ self.level,
+ self.level_dom,
+ self.level_domby,
+ self.level_incomp):
+ continue
+
+ if self.range_ and not self._match_range(
+ user.mls_range,
+ self.range_,
+ self.range_subset,
+ self.range_overlap,
+ self.range_superset,
+ self.range_proper):
+ continue
+
+ yield user
diff --git a/lib/python2.7/site-packages/setoolsgui/__init__.py b/lib/python2.7/site-packages/setoolsgui/__init__.py
new file mode 100644
index 0000000..ea702ec
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/__init__.py
@@ -0,0 +1,21 @@
+# Copyright 2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+
+from .apol import ApolMainWindow
+from . import widget
diff --git a/lib/python2.7/site-packages/setoolsgui/apol/__init__.py b/lib/python2.7/site-packages/setoolsgui/apol/__init__.py
new file mode 100644
index 0000000..22c8f40
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/apol/__init__.py
@@ -0,0 +1,24 @@
+# Copyright 2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+
+from .mainwindow import ApolMainWindow, ChooseAnalysis
+
+from .models import PermListModel, SEToolsListModel
+from .rulemodels import TERuleListModel
+from .terulequery import TERuleQueryTab
diff --git a/lib/python2.7/site-packages/setoolsgui/apol/mainwindow.py b/lib/python2.7/site-packages/setoolsgui/apol/mainwindow.py
new file mode 100644
index 0000000..53b9f87
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/apol/mainwindow.py
@@ -0,0 +1,261 @@
+# Copyright 2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+
+import logging
+
+from PyQt5.QtCore import Qt
+from PyQt5.QtWidgets import QAction, QDialog, QFileDialog, QLineEdit, QMainWindow, QMenu, \
+ QMessageBox, QTreeWidgetItem, QVBoxLayout, QWidget
+from setools import PermissionMap, SELinuxPolicy
+
+from ..widget import SEToolsWidget
+from .terulequery import TERuleQueryTab
+
+
+class ApolMainWindow(SEToolsWidget, QMainWindow):
+
+ def __init__(self, filename):
+ super(ApolMainWindow, self).__init__()
+ self.log = logging.getLogger(self.__class__.__name__)
+
+ if filename:
+ self._policy = SELinuxPolicy(filename)
+ else:
+ self._policy = None
+
+ try:
+ # try to load default permission map
+ self._permmap = PermissionMap()
+ except (IOError, OSError) as ex:
+ self.log.info("Failed to load default permission map: {0}".format(ex))
+ self._permmap = None
+
+ self.setupUi()
+
+ def setupUi(self):
+ self.load_ui("apol.ui")
+
+ self.tab_counter = 0
+
+ self.update_window_title()
+
+ # set up error message dialog
+ self.error_msg = QMessageBox(self)
+ self.error_msg.setStandardButtons(QMessageBox.Ok)
+
+ # set up tab name editor
+ self.tab_editor = QLineEdit(self.AnalysisTabs)
+ self.tab_editor.setWindowFlags(Qt.Popup)
+
+ # configure tab bar context menu
+ tabBar = self.AnalysisTabs.tabBar()
+ self.rename_tab_action = QAction("&Rename active tab", tabBar)
+ self.close_tab_action = QAction("&Close active tab", tabBar)
+ tabBar.addAction(self.rename_tab_action)
+ tabBar.addAction(self.close_tab_action)
+ tabBar.setContextMenuPolicy(Qt.ActionsContextMenu)
+
+ # connect signals
+ self.open_policy.triggered.connect(self.select_policy)
+ self.open_permmap.triggered.connect(self.select_permmap)
+ self.new_analysis.triggered.connect(self.choose_analysis)
+ self.AnalysisTabs.tabCloseRequested.connect(self.close_tab)
+ self.AnalysisTabs.tabBarDoubleClicked.connect(self.tab_name_editor)
+ self.tab_editor.editingFinished.connect(self.rename_tab)
+ self.rename_tab_action.triggered.connect(self.rename_active_tab)
+ self.close_tab_action.triggered.connect(self.close_active_tab)
+
+ self.show()
+
+ def update_window_title(self):
+ if self._policy:
+ self.setWindowTitle("{0} - apol".format(self._policy))
+ else:
+ self.setWindowTitle("apol")
+
+ def select_policy(self):
+ filename = QFileDialog.getOpenFileName(self, "Open policy file", ".")[0]
+ if filename:
+ try:
+ self._policy = SELinuxPolicy(filename)
+ except Exception as ex:
+ self.error_msg.critical(self, "Policy loading error", str(ex))
+ else:
+ self.update_window_title()
+
+ if self._permmap:
+ self._permmap.map_policy(self._policy)
+
+ def select_permmap(self):
+ filename = QFileDialog.getOpenFileName(self, "Open permission map file", ".")[0]
+ if filename:
+ try:
+ self._permmap = PermissionMap(filename)
+ except Exception as ex:
+ self.error_msg.critical(self, "Permission map loading error", str(ex))
+ else:
+
+ if self._policy:
+ self._permmap.map_policy(self._policy)
+
+ def choose_analysis(self):
+ if not self._policy:
+ self.error_msg.critical(self, "No open policy",
+ "Cannot start a new analysis. Please open a policy first.")
+
+ self.select_policy()
+
+ if self._policy:
+ # this check of self._policy is here in case someone
+ # tries to start an analysis with no policy open, but then
+ # cancels out of the policy file chooser or there is an
+ # error opening the policy file.
+ chooser = ChooseAnalysis(self)
+ chooser.show()
+
+ def create_new_analysis(self, tabtitle, tabclass):
+ self.tab_counter += 1
+ counted_name = "{0}: {1}".format(self.tab_counter, tabtitle)
+
+ newtab = QWidget()
+ newtab.setObjectName(counted_name)
+
+ newanalysis = tabclass(newtab, self._policy)
+
+ # create a vertical layout in the tab, place the analysis ui inside.
+ tabLayout = QVBoxLayout()
+ tabLayout.setContentsMargins(0, 0, 0, 0)
+ tabLayout.addWidget(newanalysis)
+ newtab.setLayout(tabLayout)
+
+ index = self.AnalysisTabs.addTab(newtab, counted_name)
+ self.AnalysisTabs.setTabToolTip(index, tabtitle)
+
+ def tab_name_editor(self, index):
+ if index >= 0:
+ tab_area = self.AnalysisTabs.tabBar().tabRect(index)
+ self.tab_editor.move(self.AnalysisTabs.mapToGlobal(tab_area.topLeft()))
+ self.tab_editor.setText(self.AnalysisTabs.tabText(index))
+ self.tab_editor.selectAll()
+ self.tab_editor.show()
+ self.tab_editor.setFocus()
+
+ def close_active_tab(self):
+ index = self.AnalysisTabs.currentIndex()
+ if index >= 0:
+ self.close_tab(index)
+
+ def rename_active_tab(self):
+ index = self.AnalysisTabs.currentIndex()
+ if index >= 0:
+ self.tab_name_editor(index)
+
+ def close_tab(self, index):
+ widget = self.AnalysisTabs.widget(index)
+ widget.close()
+ widget.deleteLater()
+ self.AnalysisTabs.removeTab(index)
+
+ def rename_tab(self):
+ # this should never be negative since the editor is modal
+ index = self.AnalysisTabs.currentIndex()
+
+ self.tab_editor.hide()
+ self.AnalysisTabs.setTabText(index, self.tab_editor.text())
+
+
+class ChooseAnalysis(SEToolsWidget, QDialog):
+
+ """
+ Dialog for choosing a new analysis
+
+ The below class attributes are used for populating
+ the GUI contents and mapping them to the appropriate
+ tab widget class for the analysis.
+
+ The item_mapping attribute will be populated to
+ map the tree list items to the analysis tab widgets.
+ """
+ _components_map = {"Attributes (Type)": TERuleQueryTab,
+ "Booleans": TERuleQueryTab,
+ "Categories": TERuleQueryTab,
+ "Common Permission Sets": TERuleQueryTab,
+ "Object Classes": TERuleQueryTab,
+ "Policy Capabilities": TERuleQueryTab,
+ "Roles": TERuleQueryTab,
+ "Types": TERuleQueryTab,
+ "Users": TERuleQueryTab}
+
+ _rule_map = {"TE Rules": TERuleQueryTab,
+ "RBAC Rules": TERuleQueryTab,
+ "MLS Rules": TERuleQueryTab,
+ "Constraints": TERuleQueryTab}
+
+ _analysis_map = {"Domain Transition Analysis": TERuleQueryTab,
+ "Information Flow Analysis": TERuleQueryTab}
+
+ _labeling_map = {"fs_use Statements": TERuleQueryTab,
+ "Genfscon Statements": TERuleQueryTab,
+ "Initial SID Statements": TERuleQueryTab,
+ "Netifcon Statements": TERuleQueryTab,
+ "Nodecon Statements": TERuleQueryTab,
+ "Portcon Statements": TERuleQueryTab}
+
+ _analysis_choices = {"Components": _components_map,
+ "Rules": _rule_map,
+ "Analysis": _analysis_map,
+ "Labeling Statements": _labeling_map}
+
+ def __init__(self, parent):
+ super(ChooseAnalysis, self).__init__(parent)
+ self.item_mapping = {}
+ self.parent = parent
+ self.setupUi()
+
+ def setupUi(self):
+ self.load_ui("choose_analysis.ui")
+ self.buttonBox.accepted.connect(self.ok_clicked)
+ self.analysisTypes.doubleClicked.connect(self.ok_clicked)
+
+ # populate the item list:
+ self.analysisTypes.clear()
+ for groupname, group in self._analysis_choices.items():
+ groupitem = QTreeWidgetItem(self.analysisTypes)
+ groupitem.setText(0, groupname)
+ groupitem._tab_class = None
+ for entryname, cls in group.items():
+ item = QTreeWidgetItem(groupitem)
+ item.setText(0, entryname)
+ item._tab_class = cls
+ groupitem.addChild(item)
+
+ self.analysisTypes.expandAll()
+
+ def ok_clicked(self):
+ try:
+ # .ui is set for single item selection.
+ item = self.analysisTypes.selectedItems()[0]
+ title = item.text(0)
+ self.parent.create_new_analysis(title, item._tab_class)
+ except (IndexError, TypeError):
+ # IndexError: nothing is selected
+ # TypeError: one of the group items was selected.
+ pass
+ else:
+ self.accept()
diff --git a/lib/python2.7/site-packages/setoolsgui/apol/models.py b/lib/python2.7/site-packages/setoolsgui/apol/models.py
new file mode 100644
index 0000000..2744ad6
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/apol/models.py
@@ -0,0 +1,103 @@
+# Copyright 2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+
+from PyQt5 import QtCore
+from PyQt5.QtCore import QAbstractListModel, QModelIndex, QStringListModel, Qt
+from setools.policyrep.exception import NoCommon
+
+
+class SEToolsListModel(QAbstractListModel):
+
+ """
+ The purpose of this model is to have the
+ objects return their string representations
+ for Qt.DisplayRole and return the object
+ for Qt.UserRole.
+ """
+
+ def __init__(self, parent):
+ super(SEToolsListModel, self).__init__(parent)
+ self._item_list = None
+
+ @property
+ def item_list(self):
+ return self._item_list
+
+ @item_list.setter
+ def item_list(self, item_list):
+ self.beginResetModel()
+ self._item_list = item_list
+ self.endResetModel()
+
+ def rowCount(self, parent=QModelIndex()):
+ if self.item_list:
+ return len(self.item_list)
+ else:
+ return 0
+
+ def columnCount(self, parent=QModelIndex()):
+ return 1
+
+ def data(self, index, role):
+ if self.item_list:
+ row = index.row()
+
+ if role == Qt.DisplayRole:
+ return str(self.item_list[row])
+ elif role == Qt.UserRole:
+ return self.item_list[row]
+
+
+class PermListModel(SEToolsListModel):
+
+ """
+ A model that will return the intersection of permissions
+ for the selected classes. If no classes are
+ set, all permissions in the policy will be returned.
+ """
+
+ def __init__(self, parent, policy):
+ super(PermListModel, self).__init__(parent)
+ self.policy = policy
+ self.set_classes()
+
+ def set_classes(self, classes=[]):
+ permlist = set()
+
+ # start will all permissions.
+ for cls in self.policy.classes():
+ permlist.update(cls.perms)
+
+ try:
+ permlist.update(cls.common.perms)
+ except NoCommon:
+ pass
+
+ # create intersection
+ for cls in classes:
+ cls_perms = cls.perms
+
+ try:
+ cls_perms.update(cls.common.perms)
+ except NoCommon:
+ pass
+
+ permlist.intersection_update(cls_perms)
+
+ self.item_list = sorted(permlist)
diff --git a/lib/python2.7/site-packages/setoolsgui/apol/rulemodels.py b/lib/python2.7/site-packages/setoolsgui/apol/rulemodels.py
new file mode 100644
index 0000000..4367cfb
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/apol/rulemodels.py
@@ -0,0 +1,116 @@
+# Copyright 2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+
+from PyQt5.QtCore import Qt, QAbstractTableModel, QModelIndex
+from setools.policyrep.exception import RuleNotConditional, RuleUseError
+
+
+class RuleResultModel(QAbstractTableModel):
+ def __init__(self, parent):
+ super(RuleResultModel, self).__init__(parent)
+ self.resultlist = None
+
+ def rowCount(self, parent=QModelIndex()):
+ if self.resultlist:
+ return len(self.resultlist)
+ else:
+ return 0
+
+ def columnCount(self, parent=QModelIndex()):
+ return 5
+
+ def headerData(self, section, orientation, role):
+ raise NotImplementedError
+
+ def data(self, index, role):
+ if role == Qt.DisplayRole:
+ if not self.resultlist:
+ return None
+
+ row = index.row()
+ col = index.column()
+
+ if col == 0:
+ return self.resultlist[row].ruletype
+ elif col == 1:
+ return str(self.resultlist[row].source)
+ elif col == 2:
+ return str(self.resultlist[row].target)
+ elif col == 3:
+ try:
+ return str(self.resultlist[row].tclass)
+ except RuleUseError:
+ # role allow
+ return None
+ elif col == 4:
+ # most common: permissions
+ try:
+ return ", ".join(sorted(self.resultlist[row].perms))
+ except RuleUseError:
+ pass
+
+ # next most common: default
+ # TODO: figure out filename trans
+ try:
+ return str(self.resultlist[row].default)
+ except RuleUseError:
+ pass
+
+ # least common: nothing (role allow)
+ return None
+ elif col == 5:
+ try:
+ return str(self.resultlist[row].conditional)
+ except RuleNotConditional:
+ return None
+ else:
+ raise ValueError("Invalid column number")
+ elif role == Qt.UserRole:
+ # get the whole rule for user role
+ return self.resultlist[row].statement()
+
+ def set_rules(self, result_list):
+ self.beginResetModel()
+ self.resultlist = result_list
+ self.endResetModel()
+
+
+class TERuleListModel(RuleResultModel):
+
+ """Type Enforcement rule model. Represents rules as a column."""
+
+ def columnCount(self, parent=QModelIndex()):
+ return 6
+
+ def headerData(self, section, orientation, role):
+ if role == Qt.DisplayRole and orientation == Qt.Horizontal:
+ if section == 0:
+ return "Rule Type"
+ elif section == 1:
+ return "Source"
+ elif section == 2:
+ return "Target"
+ elif section == 3:
+ return "Object Class"
+ elif section == 4:
+ return "Permissons/Default Type"
+ elif section == 5:
+ return "Conditional Expression"
+ else:
+ raise ValueError("Invalid column number")
diff --git a/lib/python2.7/site-packages/setoolsgui/apol/terulequery.py b/lib/python2.7/site-packages/setoolsgui/apol/terulequery.py
new file mode 100644
index 0000000..75148fc
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/apol/terulequery.py
@@ -0,0 +1,271 @@
+# Copyright 2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+
+import logging
+
+from PyQt5.QtCore import Qt, QSortFilterProxyModel, QStringListModel
+from PyQt5.QtGui import QPalette, QTextCursor
+from PyQt5.QtWidgets import QCompleter, QHeaderView, QScrollArea
+from setools import TERuleQuery
+
+from ..widget import SEToolsWidget
+from .rulemodels import TERuleListModel
+from .models import PermListModel, SEToolsListModel
+
+
+class TERuleQueryTab(SEToolsWidget, QScrollArea):
+ def __init__(self, parent, policy):
+ super(TERuleQueryTab, self).__init__(parent)
+ self.log = logging.getLogger(self.__class__.__name__)
+ self.policy = policy
+ self.query = TERuleQuery(policy)
+ self.setupUi()
+
+ def setupUi(self):
+ self.load_ui("terulequery.ui")
+
+ # set up source/target autocompletion
+ typeattr_completion_list = [str(t) for t in self.policy.types()]
+ typeattr_completion_list.extend(str(a) for a in self.policy.typeattributes())
+ typeattr_completer_model = QStringListModel(self)
+ typeattr_completer_model.setStringList(sorted(typeattr_completion_list))
+ self.typeattr_completion = QCompleter()
+ self.typeattr_completion.setModel(typeattr_completer_model)
+ self.source.setCompleter(self.typeattr_completion)
+ self.target.setCompleter(self.typeattr_completion)
+
+ # set up default autocompletion
+ type_completion_list = [str(t) for t in self.policy.types()]
+ type_completer_model = QStringListModel(self)
+ type_completer_model.setStringList(sorted(type_completion_list))
+ self.type_completion = QCompleter()
+ self.type_completion.setModel(type_completer_model)
+ self.default_type.setCompleter(self.type_completion)
+
+ # setup indications of errors on source/target/default
+ self.orig_palette = self.source.palette()
+ self.error_palette = self.source.palette()
+ self.error_palette.setColor(QPalette.Base, Qt.red)
+ self.clear_source_error()
+ self.clear_target_error()
+ self.clear_default_error()
+
+ # populate class list
+ self.class_model = SEToolsListModel(self)
+ self.class_model.item_list = sorted(self.policy.classes())
+ self.tclass.setModel(self.class_model)
+
+ # populate perm list
+ self.perms_model = PermListModel(self, self.policy)
+ self.perms.setModel(self.perms_model)
+
+ # populate bool list
+ self.bool_model = SEToolsListModel(self)
+ self.bool_model.item_list = sorted(self.policy.bools())
+ self.bool_criteria.setModel(self.bool_model)
+
+ # set up results
+ self.table_results_model = TERuleListModel(self)
+ self.sort_proxy = QSortFilterProxyModel(self)
+ self.sort_proxy.setSourceModel(self.table_results_model)
+ self.table_results.setModel(self.sort_proxy)
+
+ # Ensure settings are consistent with the initial .ui state
+ self.set_source_regex(self.source_regex.isChecked())
+ self.set_target_regex(self.target_regex.isChecked())
+ self.set_default_regex(self.default_regex.isChecked())
+ self.criteria_frame.setHidden(not self.criteria_expander.isChecked())
+ self.results_frame.setHidden(not self.results_expander.isChecked())
+ self.notes.setHidden(not self.notes_expander.isChecked())
+
+ # connect signals
+ self.buttonBox.clicked.connect(self.run)
+ self.clear_ruletypes.clicked.connect(self.clear_all_ruletypes)
+ self.all_ruletypes.clicked.connect(self.set_all_ruletypes)
+ self.source.textEdited.connect(self.clear_source_error)
+ self.source.editingFinished.connect(self.set_source)
+ self.source_regex.toggled.connect(self.set_source_regex)
+ self.target.textEdited.connect(self.clear_target_error)
+ self.target.editingFinished.connect(self.set_target)
+ self.target_regex.toggled.connect(self.set_target_regex)
+ self.tclass.selectionModel().selectionChanged.connect(self.set_tclass)
+ self.perms.selectionModel().selectionChanged.connect(self.set_perms)
+ self.default_type.textEdited.connect(self.clear_default_error)
+ self.default_type.editingFinished.connect(self.set_default_type)
+ self.default_regex.toggled.connect(self.set_default_regex)
+ self.bool_criteria.selectionModel().selectionChanged.connect(self.set_bools)
+
+ #
+ # Ruletype criteria
+ #
+
+ def _set_ruletypes(self, value):
+ self.allow.setChecked(value)
+ self.auditallow.setChecked(value)
+ self.neverallow.setChecked(value)
+ self.dontaudit.setChecked(value)
+ self.type_transition.setChecked(value)
+ self.type_member.setChecked(value)
+ self.type_change.setChecked(value)
+
+ def set_all_ruletypes(self):
+ self._set_ruletypes(True)
+
+ def clear_all_ruletypes(self):
+ self._set_ruletypes(False)
+
+ #
+ # Source criteria
+ #
+
+ def clear_source_error(self):
+ self.source.setToolTip("Match the source type/attribute of the rule.")
+ self.source.setPalette(self.orig_palette)
+
+ def set_source(self):
+ try:
+ self.query.source = self.source.text()
+ except Exception as ex:
+ self.source.setToolTip("Error: " + str(ex))
+ self.source.setPalette(self.error_palette)
+
+ def set_source_regex(self, state):
+ self.log.debug("Setting source_regex {0}".format(state))
+ self.query.source_regex = state
+ self.clear_source_error()
+ self.set_source()
+
+ #
+ # Target criteria
+ #
+
+ def clear_target_error(self):
+ self.target.setToolTip("Match the target type/attribute of the rule.")
+ self.target.setPalette(self.orig_palette)
+
+ def set_target(self):
+ try:
+ self.query.target = self.target.text()
+ except Exception as ex:
+ self.target.setToolTip("Error: " + str(ex))
+ self.target.setPalette(self.error_palette)
+
+ def set_target_regex(self, state):
+ self.log.debug("Setting target_regex {0}".format(state))
+ self.query.target_regex = state
+ self.clear_target_error()
+ self.set_target()
+
+ #
+ # Class criteria
+ #
+
+ def set_tclass(self):
+ selected_classes = []
+ for index in self.tclass.selectionModel().selectedIndexes():
+ selected_classes.append(self.class_model.data(index, Qt.UserRole))
+
+ self.query.tclass = selected_classes
+ self.perms_model.set_classes(selected_classes)
+
+ #
+ # Permissions criteria
+ #
+
+ def set_perms(self):
+ selected_perms = []
+ for index in self.perms.selectionModel().selectedIndexes():
+ selected_perms.append(self.perms_model.data(index, Qt.UserRole))
+
+ self.query.perms = selected_perms
+
+ #
+ # Default criteria
+ #
+
+ def clear_default_error(self):
+ self.default_type.setToolTip("Match the default type the rule.")
+ self.default_type.setPalette(self.orig_palette)
+
+ def set_default_type(self):
+ self.query.default_regex = self.default_regex.isChecked()
+
+ try:
+ self.query.default = self.default_type.text()
+ except Exception as ex:
+ self.default_type.setToolTip("Error: " + str(ex))
+ self.default_type.setPalette(self.error_palette)
+
+ def set_default_regex(self, state):
+ self.log.debug("Setting default_regex {0}".format(state))
+ self.query.default_regex = state
+ self.clear_default_error()
+ self.set_default_type()
+
+ #
+ # Boolean criteria
+ #
+
+ def set_bools(self):
+ selected_bools = []
+ for index in self.bool_criteria.selectionModel().selectedIndexes():
+ selected_bools.append(self.bool_model.data(index, Qt.UserRole))
+
+ self.query.boolean = selected_bools
+
+ #
+ # Results runner
+ #
+
+ def run(self, button):
+ # right now there is only one button.
+ rule_types = []
+
+ if self.allow.isChecked():
+ rule_types.append("allow")
+ if self.auditallow.isChecked():
+ rule_types.append("auditallow")
+ if self.neverallow.isChecked():
+ rule_types.append("neverallow")
+ if self.dontaudit.isChecked():
+ rule_types.append("dontaudit")
+ if self.type_transition.isChecked():
+ rule_types.append("type_transition")
+ if self.type_member.isChecked():
+ rule_types.append("type_member")
+ if self.type_change.isChecked():
+ rule_types.append("type_change")
+
+ self.query.ruletype = rule_types
+ self.query.source_indirect = self.source_indirect.isChecked()
+ self.query.target_indirect = self.target_indirect.isChecked()
+ self.query.perms_equal = self.perms_equal.isChecked()
+ self.query.boolean_equal = self.bools_equal.isChecked()
+
+ # update results table
+ results = list(self.query.results())
+ self.table_results_model.set_rules(results)
+ self.table_results.resizeColumnsToContents()
+
+ # update raw results
+ self.raw_results.clear()
+ for line in results:
+ self.raw_results.appendPlainText(str(line))
+
+ self.raw_results.moveCursor(QTextCursor.Start)
diff --git a/lib/python2.7/site-packages/setoolsgui/libselinux.so.1 b/lib/python2.7/site-packages/setoolsgui/libselinux.so.1
new file mode 100755
index 0000000..dc9280d
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/libselinux.so.1
Binary files differ
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/__init__.py
new file mode 100644
index 0000000..7090794
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/__init__.py
@@ -0,0 +1,85 @@
+"""
+NetworkX
+========
+
+ NetworkX (NX) is a Python package for the creation, manipulation, and
+ study of the structure, dynamics, and functions of complex networks.
+
+ https://networkx.lanl.gov/
+
+Using
+-----
+
+ Just write in Python
+
+ >>> import networkx as nx
+ >>> G=nx.Graph()
+ >>> G.add_edge(1,2)
+ >>> G.add_node(42)
+ >>> print(sorted(G.nodes()))
+ [1, 2, 42]
+ >>> print(sorted(G.edges()))
+ [(1, 2)]
+"""
+# Copyright (C) 2004-2010 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+#
+# Add platform dependent shared library path to sys.path
+#
+
+from __future__ import absolute_import
+
+import sys
+if sys.version_info[:2] < (2, 6):
+ m = "Python version 2.6 or later is required for NetworkX (%d.%d detected)."
+ raise ImportError(m % sys.version_info[:2])
+del sys
+
+# Release data
+from networkx import release
+
+__author__ = '%s <%s>\n%s <%s>\n%s <%s>' % \
+ ( release.authors['Hagberg'] + release.authors['Schult'] + \
+ release.authors['Swart'] )
+__license__ = release.license
+
+__date__ = release.date
+__version__ = release.version
+
+#These are import orderwise
+from networkx.exception import *
+import networkx.external
+import networkx.utils
+# these packages work with Python >= 2.6
+
+import networkx.classes
+from networkx.classes import *
+
+
+import networkx.convert
+from networkx.convert import *
+
+import networkx.relabel
+from networkx.relabel import *
+
+import networkx.generators
+from networkx.generators import *
+
+import networkx.readwrite
+from networkx.readwrite import *
+
+#Need to test with SciPy, when available
+import networkx.algorithms
+from networkx.algorithms import *
+import networkx.linalg
+
+from networkx.linalg import *
+from networkx.tests.test import run as test
+
+import networkx.drawing
+from networkx.drawing import *
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/__init__.py
new file mode 100644
index 0000000..6230da3
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/__init__.py
@@ -0,0 +1,51 @@
+from networkx.algorithms.assortativity import *
+from networkx.algorithms.block import *
+from networkx.algorithms.boundary import *
+from networkx.algorithms.centrality import *
+from networkx.algorithms.cluster import *
+from networkx.algorithms.clique import *
+from networkx.algorithms.community import *
+from networkx.algorithms.components import *
+from networkx.algorithms.connectivity import *
+from networkx.algorithms.core import *
+from networkx.algorithms.cycles import *
+from networkx.algorithms.dag import *
+from networkx.algorithms.distance_measures import *
+from networkx.algorithms.flow import *
+from networkx.algorithms.hierarchy import *
+from networkx.algorithms.matching import *
+from networkx.algorithms.mis import *
+from networkx.algorithms.mst import *
+from networkx.algorithms.link_analysis import *
+from networkx.algorithms.operators import *
+from networkx.algorithms.shortest_paths import *
+from networkx.algorithms.smetric import *
+from networkx.algorithms.traversal import *
+from networkx.algorithms.isolate import *
+from networkx.algorithms.euler import *
+from networkx.algorithms.vitality import *
+from networkx.algorithms.chordal import *
+from networkx.algorithms.richclub import *
+from networkx.algorithms.distance_regular import *
+from networkx.algorithms.swap import *
+from networkx.algorithms.graphical import *
+from networkx.algorithms.simple_paths import *
+
+import networkx.algorithms.assortativity
+import networkx.algorithms.bipartite
+import networkx.algorithms.centrality
+import networkx.algorithms.cluster
+import networkx.algorithms.clique
+import networkx.algorithms.components
+import networkx.algorithms.connectivity
+import networkx.algorithms.flow
+import networkx.algorithms.isomorphism
+import networkx.algorithms.link_analysis
+import networkx.algorithms.shortest_paths
+import networkx.algorithms.traversal
+import networkx.algorithms.chordal
+import networkx.algorithms.operators
+
+from networkx.algorithms.bipartite import projected_graph,project,is_bipartite
+from networkx.algorithms.isomorphism import is_isomorphic,could_be_isomorphic,\
+ fast_could_be_isomorphic,faster_could_be_isomorphic
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/__init__.py
new file mode 100644
index 0000000..eb797c2
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/__init__.py
@@ -0,0 +1,6 @@
+from networkx.algorithms.approximation.clique import *
+from networkx.algorithms.approximation.dominating_set import *
+from networkx.algorithms.approximation.independent_set import *
+from networkx.algorithms.approximation.matching import *
+from networkx.algorithms.approximation.ramsey import *
+from networkx.algorithms.approximation.vertex_cover import *
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/clique.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/clique.py
new file mode 100644
index 0000000..be363f6
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/clique.py
@@ -0,0 +1,97 @@
+# -*- coding: utf-8 -*-
+"""
+Cliques.
+"""
+# Copyright (C) 2011-2012 by
+# Nicholas Mancuso <nick.mancuso@gmail.com>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+from networkx.algorithms.approximation import ramsey
+__author__ = """Nicholas Mancuso (nick.mancuso@gmail.com)"""
+__all__ = ["clique_removal","max_clique"]
+
+def max_clique(G):
+ r"""Find the Maximum Clique
+
+ Finds the `O(|V|/(log|V|)^2)` apx of maximum clique/independent set
+ in the worst case.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ Returns
+ -------
+ clique : set
+ The apx-maximum clique of the graph
+
+ Notes
+ ------
+ A clique in an undirected graph G = (V, E) is a subset of the vertex set
+ `C \subseteq V`, such that for every two vertices in C, there exists an edge
+ connecting the two. This is equivalent to saying that the subgraph
+ induced by C is complete (in some cases, the term clique may also refer
+ to the subgraph).
+
+ A maximum clique is a clique of the largest possible size in a given graph.
+ The clique number `\omega(G)` of a graph G is the number of
+ vertices in a maximum clique in G. The intersection number of
+ G is the smallest number of cliques that together cover all edges of G.
+
+ http://en.wikipedia.org/wiki/Maximum_clique
+
+ References
+ ----------
+ .. [1] Boppana, R., & Halldórsson, M. M. (1992).
+ Approximating maximum independent sets by excluding subgraphs.
+ BIT Numerical Mathematics, 32(2), 180–196. Springer.
+ doi:10.1007/BF01994876
+ """
+ if G is None:
+ raise ValueError("Expected NetworkX graph!")
+
+ # finding the maximum clique in a graph is equivalent to finding
+ # the independent set in the complementary graph
+ cgraph = nx.complement(G)
+ iset, _ = clique_removal(cgraph)
+ return iset
+
+def clique_removal(G):
+ """ Repeatedly remove cliques from the graph.
+
+ Results in a `O(|V|/(\log |V|)^2)` approximation of maximum clique
+ & independent set. Returns the largest independent set found, along
+ with found maximal cliques.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ Returns
+ -------
+ max_ind_cliques : (set, list) tuple
+ Maximal independent set and list of maximal cliques (sets) in the graph.
+
+ References
+ ----------
+ .. [1] Boppana, R., & Halldórsson, M. M. (1992).
+ Approximating maximum independent sets by excluding subgraphs.
+ BIT Numerical Mathematics, 32(2), 180–196. Springer.
+ """
+ graph = G.copy()
+ c_i, i_i = ramsey.ramsey_R2(graph)
+ cliques = [c_i]
+ isets = [i_i]
+ while graph:
+ graph.remove_nodes_from(c_i)
+ c_i, i_i = ramsey.ramsey_R2(graph)
+ if c_i:
+ cliques.append(c_i)
+ if i_i:
+ isets.append(i_i)
+
+ maxiset = max(isets)
+ return maxiset, cliques
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/dominating_set.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/dominating_set.py
new file mode 100644
index 0000000..6a167e2
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/dominating_set.py
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+"""
+**************************************
+Minimum Vertex and Edge Dominating Set
+**************************************
+
+
+A dominating set for a graph G = (V, E) is a subset D of V such that every
+vertex not in D is joined to at least one member of D by some edge. The
+domination number gamma(G) is the number of vertices in a smallest dominating
+set for G. Given a graph G = (V, E) find a minimum weight dominating set V'.
+
+http://en.wikipedia.org/wiki/Dominating_set
+
+An edge dominating set for a graph G = (V, E) is a subset D of E such that
+every edge not in D is adjacent to at least one edge in D.
+
+http://en.wikipedia.org/wiki/Edge_dominating_set
+"""
+# Copyright (C) 2011-2012 by
+# Nicholas Mancuso <nick.mancuso@gmail.com>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+__all__ = ["min_weighted_dominating_set",
+ "min_edge_dominating_set"]
+__author__ = """Nicholas Mancuso (nick.mancuso@gmail.com)"""
+
+
+def min_weighted_dominating_set(G, weight=None):
+ r"""Return minimum weight vertex dominating set.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ weight : None or string, optional (default = None)
+ If None, every edge has weight/distance/weight 1. If a string, use this
+ edge attribute as the edge weight. Any edge attribute not present
+ defaults to 1.
+
+ Returns
+ -------
+ min_weight_dominating_set : set
+ Returns a set of vertices whose weight sum is no more than log w(V) * OPT
+
+ Notes
+ -----
+ This algorithm computes an approximate minimum weighted dominating set
+ for the graph G. The upper-bound on the size of the solution is
+ log w(V) * OPT. Runtime of the algorithm is `O(|E|)`.
+
+ References
+ ----------
+ .. [1] Vazirani, Vijay Approximation Algorithms (2001)
+ """
+ if not G:
+ raise ValueError("Expected non-empty NetworkX graph!")
+
+ # min cover = min dominating set
+ dom_set = set([])
+ cost_func = dict((node, nd.get(weight, 1)) \
+ for node, nd in G.nodes_iter(data=True))
+
+ vertices = set(G)
+ sets = dict((node, set([node]) | set(G[node])) for node in G)
+
+ def _cost(subset):
+ """ Our cost effectiveness function for sets given its weight
+ """
+ cost = sum(cost_func[node] for node in subset)
+ return cost / float(len(subset - dom_set))
+
+ while vertices:
+ # find the most cost effective set, and the vertex that for that set
+ dom_node, min_set = min(sets.items(),
+ key=lambda x: (x[0], _cost(x[1])))
+ alpha = _cost(min_set)
+
+ # reduce the cost for the rest
+ for node in min_set - dom_set:
+ cost_func[node] = alpha
+
+ # add the node to the dominating set and reduce what we must cover
+ dom_set.add(dom_node)
+ del sets[dom_node]
+ vertices = vertices - min_set
+
+ return dom_set
+
+
+def min_edge_dominating_set(G):
+ r"""Return minimum cardinality edge dominating set.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ Returns
+ -------
+ min_edge_dominating_set : set
+ Returns a set of dominating edges whose size is no more than 2 * OPT.
+
+ Notes
+ -----
+ The algorithm computes an approximate solution to the edge dominating set
+ problem. The result is no more than 2 * OPT in terms of size of the set.
+ Runtime of the algorithm is `O(|E|)`.
+ """
+ if not G:
+ raise ValueError("Expected non-empty NetworkX graph!")
+ return nx.maximal_matching(G)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/independent_set.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/independent_set.py
new file mode 100644
index 0000000..3b18ade
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/independent_set.py
@@ -0,0 +1,63 @@
+# -*- coding: utf-8 -*-
+"""
+Independent Set
+
+Independent set or stable set is a set of vertices in a graph, no two of
+which are adjacent. That is, it is a set I of vertices such that for every
+two vertices in I, there is no edge connecting the two. Equivalently, each
+edge in the graph has at most one endpoint in I. The size of an independent
+set is the number of vertices it contains.
+
+A maximum independent set is a largest independent set for a given graph G
+and its size is denoted α(G). The problem of finding such a set is called
+the maximum independent set problem and is an NP-hard optimization problem.
+As such, it is unlikely that there exists an efficient algorithm for finding
+a maximum independent set of a graph.
+
+http://en.wikipedia.org/wiki/Independent_set_(graph_theory)
+
+Independent set algorithm is based on the following paper:
+
+`O(|V|/(log|V|)^2)` apx of maximum clique/independent set.
+
+Boppana, R., & Halldórsson, M. M. (1992).
+Approximating maximum independent sets by excluding subgraphs.
+BIT Numerical Mathematics, 32(2), 180–196. Springer.
+doi:10.1007/BF01994876
+
+"""
+# Copyright (C) 2011-2012 by
+# Nicholas Mancuso <nick.mancuso@gmail.com>
+# All rights reserved.
+# BSD license.
+from networkx.algorithms.approximation import clique_removal
+__all__ = ["maximum_independent_set"]
+__author__ = """Nicholas Mancuso (nick.mancuso@gmail.com)"""
+
+
+def maximum_independent_set(G):
+ """Return an approximate maximum independent set.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ Returns
+ -------
+ iset : Set
+ The apx-maximum independent set
+
+ Notes
+ -----
+ Finds the `O(|V|/(log|V|)^2)` apx of independent set in the worst case.
+
+
+ References
+ ----------
+ .. [1] Boppana, R., & Halldórsson, M. M. (1992).
+ Approximating maximum independent sets by excluding subgraphs.
+ BIT Numerical Mathematics, 32(2), 180–196. Springer.
+ """
+ iset, _ = clique_removal(G)
+ return iset
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/matching.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/matching.py
new file mode 100644
index 0000000..231d501
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/matching.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+"""
+**************
+Graph Matching
+**************
+
+Given a graph G = (V,E), a matching M in G is a set of pairwise non-adjacent
+edges; that is, no two edges share a common vertex.
+
+http://en.wikipedia.org/wiki/Matching_(graph_theory)
+"""
+# Copyright (C) 2011-2012 by
+# Nicholas Mancuso <nick.mancuso@gmail.com>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+__all__ = ["min_maximal_matching"]
+__author__ = """Nicholas Mancuso (nick.mancuso@gmail.com)"""
+
+def min_maximal_matching(G):
+ r"""Returns the minimum maximal matching of G. That is, out of all maximal
+ matchings of the graph G, the smallest is returned.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ Returns
+ -------
+ min_maximal_matching : set
+ Returns a set of edges such that no two edges share a common endpoint
+ and every edge not in the set shares some common endpoint in the set.
+ Cardinality will be 2*OPT in the worst case.
+
+ Notes
+ -----
+ The algorithm computes an approximate solution fo the minimum maximal
+ cardinality matching problem. The solution is no more than 2 * OPT in size.
+ Runtime is `O(|E|)`.
+
+ References
+ ----------
+ .. [1] Vazirani, Vijay Approximation Algorithms (2001)
+ """
+ return nx.maximal_matching(G)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/ramsey.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/ramsey.py
new file mode 100644
index 0000000..03535ce
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/ramsey.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+"""
+Ramsey numbers.
+"""
+# Copyright (C) 2011 by
+# Nicholas Mancuso <nick.mancuso@gmail.com>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+__all__ = ["ramsey_R2"]
+__author__ = """Nicholas Mancuso (nick.mancuso@gmail.com)"""
+
+def ramsey_R2(G):
+ r"""Approximately computes the Ramsey number `R(2;s,t)` for graph.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ Returns
+ -------
+ max_pair : (set, set) tuple
+ Maximum clique, Maximum independent set.
+ """
+ if not G:
+ return (set([]), set([]))
+
+ node = next(G.nodes_iter())
+ nbrs = nx.all_neighbors(G, node)
+ nnbrs = nx.non_neighbors(G, node)
+ c_1, i_1 = ramsey_R2(G.subgraph(nbrs))
+ c_2, i_2 = ramsey_R2(G.subgraph(nnbrs))
+
+ c_1.add(node)
+ i_2.add(node)
+ return (max([c_1, c_2]), max([i_1, i_2]))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_clique.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_clique.py
new file mode 100644
index 0000000..0f384a5
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_clique.py
@@ -0,0 +1,41 @@
+from nose.tools import *
+import networkx as nx
+import networkx.algorithms.approximation as apxa
+
+def test_clique_removal():
+ graph = nx.complete_graph(10)
+ i, cs = apxa.clique_removal(graph)
+ idens = nx.density(graph.subgraph(i))
+ eq_(idens, 0.0, "i-set not found by clique_removal!")
+ for clique in cs:
+ cdens = nx.density(graph.subgraph(clique))
+ eq_(cdens, 1.0, "clique not found by clique_removal!")
+
+ graph = nx.trivial_graph(nx.Graph())
+ i, cs = apxa.clique_removal(graph)
+ idens = nx.density(graph.subgraph(i))
+ eq_(idens, 0.0, "i-set not found by ramsey!")
+ # we should only have 1-cliques. Just singleton nodes.
+ for clique in cs:
+ cdens = nx.density(graph.subgraph(clique))
+ eq_(cdens, 0.0, "clique not found by clique_removal!")
+
+ graph = nx.barbell_graph(10, 5, nx.Graph())
+ i, cs = apxa.clique_removal(graph)
+ idens = nx.density(graph.subgraph(i))
+ eq_(idens, 0.0, "i-set not found by ramsey!")
+ for clique in cs:
+ cdens = nx.density(graph.subgraph(clique))
+ eq_(cdens, 1.0, "clique not found by clique_removal!")
+
+def test_max_clique_smoke():
+ # smoke test
+ G = nx.Graph()
+ assert_equal(len(apxa.max_clique(G)),0)
+
+def test_max_clique():
+ # create a complete graph
+ graph = nx.complete_graph(30)
+ # this should return the entire graph
+ mc = apxa.max_clique(graph)
+ assert_equals(30, len(mc))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_dominating_set.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_dominating_set.py
new file mode 100644
index 0000000..0dbc79f
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_dominating_set.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+import networkx.algorithms.approximation as apxa
+
+
+class TestMinWeightDominatingSet:
+
+ def test_min_weighted_dominating_set(self):
+ graph = nx.Graph()
+ graph.add_edge(1, 2)
+ graph.add_edge(1, 5)
+ graph.add_edge(2, 3)
+ graph.add_edge(2, 5)
+ graph.add_edge(3, 4)
+ graph.add_edge(3, 6)
+ graph.add_edge(5, 6)
+
+ vertices = set([1, 2, 3, 4, 5, 6])
+ # due to ties, this might be hard to test tight bounds
+ dom_set = apxa.min_weighted_dominating_set(graph)
+ for vertex in vertices - dom_set:
+ neighbors = set(graph.neighbors(vertex))
+ ok_(len(neighbors & dom_set) > 0, "Non dominating set found!")
+
+ def test_min_edge_dominating_set(self):
+ graph = nx.path_graph(5)
+ dom_set = apxa.min_edge_dominating_set(graph)
+
+ # this is a crappy way to test, but good enough for now.
+ for edge in graph.edges_iter():
+ if edge in dom_set:
+ continue
+ else:
+ u, v = edge
+ found = False
+ for dom_edge in dom_set:
+ found |= u == dom_edge[0] or u == dom_edge[1]
+ ok_(found, "Non adjacent edge found!")
+
+ graph = nx.complete_graph(10)
+ dom_set = apxa.min_edge_dominating_set(graph)
+
+ # this is a crappy way to test, but good enough for now.
+ for edge in graph.edges_iter():
+ if edge in dom_set:
+ continue
+ else:
+ u, v = edge
+ found = False
+ for dom_edge in dom_set:
+ found |= u == dom_edge[0] or u == dom_edge[1]
+ ok_(found, "Non adjacent edge found!")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_independent_set.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_independent_set.py
new file mode 100644
index 0000000..8825ec8
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_independent_set.py
@@ -0,0 +1,8 @@
+from nose.tools import *
+import networkx as nx
+import networkx.algorithms.approximation as a
+
+def test_independent_set():
+ # smoke test
+ G = nx.Graph()
+ assert_equal(len(a.maximum_independent_set(G)),0)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_matching.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_matching.py
new file mode 100644
index 0000000..b768c39
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_matching.py
@@ -0,0 +1,8 @@
+from nose.tools import *
+import networkx as nx
+import networkx.algorithms.approximation as a
+
+def test_min_maximal_matching():
+ # smoke test
+ G = nx.Graph()
+ assert_equal(len(a.min_maximal_matching(G)),0)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_ramsey.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_ramsey.py
new file mode 100644
index 0000000..7ab8dac
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_ramsey.py
@@ -0,0 +1,27 @@
+from nose.tools import *
+import networkx as nx
+import networkx.algorithms.approximation as apxa
+
+def test_ramsey():
+ # this should only find the complete graph
+ graph = nx.complete_graph(10)
+ c, i = apxa.ramsey_R2(graph)
+ cdens = nx.density(graph.subgraph(c))
+ eq_(cdens, 1.0, "clique not found by ramsey!")
+ idens = nx.density(graph.subgraph(i))
+ eq_(idens, 0.0, "i-set not found by ramsey!")
+
+ # this trival graph has no cliques. should just find i-sets
+ graph = nx.trivial_graph(nx.Graph())
+ c, i = apxa.ramsey_R2(graph)
+ cdens = nx.density(graph.subgraph(c))
+ eq_(cdens, 0.0, "clique not found by ramsey!")
+ idens = nx.density(graph.subgraph(i))
+ eq_(idens, 0.0, "i-set not found by ramsey!")
+
+ graph = nx.barbell_graph(10, 5, nx.Graph())
+ c, i = apxa.ramsey_R2(graph)
+ cdens = nx.density(graph.subgraph(c))
+ eq_(cdens, 1.0, "clique not found by ramsey!")
+ idens = nx.density(graph.subgraph(i))
+ eq_(idens, 0.0, "i-set not found by ramsey!")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_vertex_cover.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_vertex_cover.py
new file mode 100644
index 0000000..74b3f51
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/tests/test_vertex_cover.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+from networkx.algorithms import approximation as a
+
+class TestMWVC:
+
+ def test_min_vertex_cover(self):
+ # create a simple star graph
+ size = 50
+ sg = nx.star_graph(size)
+ cover = a.min_weighted_vertex_cover(sg)
+ assert_equals(2, len(cover))
+ for u, v in sg.edges_iter():
+ ok_((u in cover or v in cover), "Node node covered!")
+
+ wg = nx.Graph()
+ wg.add_node(0, weight=10)
+ wg.add_node(1, weight=1)
+ wg.add_node(2, weight=1)
+ wg.add_node(3, weight=1)
+ wg.add_node(4, weight=1)
+
+ wg.add_edge(0, 1)
+ wg.add_edge(0, 2)
+ wg.add_edge(0, 3)
+ wg.add_edge(0, 4)
+
+ wg.add_edge(1,2)
+ wg.add_edge(2,3)
+ wg.add_edge(3,4)
+ wg.add_edge(4,1)
+
+ cover = a.min_weighted_vertex_cover(wg, weight="weight")
+ csum = sum(wg.node[node]["weight"] for node in cover)
+ assert_equals(4, csum)
+
+ for u, v in wg.edges_iter():
+ ok_((u in cover or v in cover), "Node node covered!")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/vertex_cover.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/vertex_cover.py
new file mode 100644
index 0000000..c588e18
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/approximation/vertex_cover.py
@@ -0,0 +1,65 @@
+# -*- coding: utf-8 -*-
+"""
+************
+Vertex Cover
+************
+
+Given an undirected graph `G = (V, E)` and a function w assigning nonnegative
+weights to its vertices, find a minimum weight subset of V such that each edge
+in E is incident to at least one vertex in the subset.
+
+http://en.wikipedia.org/wiki/Vertex_cover
+"""
+# Copyright (C) 2011-2012 by
+# Nicholas Mancuso <nick.mancuso@gmail.com>
+# All rights reserved.
+# BSD license.
+from networkx.utils import *
+__all__ = ["min_weighted_vertex_cover"]
+__author__ = """Nicholas Mancuso (nick.mancuso@gmail.com)"""
+
+@not_implemented_for('directed')
+def min_weighted_vertex_cover(G, weight=None):
+ r"""2-OPT Local Ratio for Minimum Weighted Vertex Cover
+
+ Find an approximate minimum weighted vertex cover of a graph.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ weight : None or string, optional (default = None)
+ If None, every edge has weight/distance/cost 1. If a string, use this
+ edge attribute as the edge weight. Any edge attribute not present
+ defaults to 1.
+
+ Returns
+ -------
+ min_weighted_cover : set
+ Returns a set of vertices whose weight sum is no more than 2 * OPT.
+
+ Notes
+ -----
+ Local-Ratio algorithm for computing an approximate vertex cover.
+ Algorithm greedily reduces the costs over edges and iteratively
+ builds a cover. Worst-case runtime is `O(|E|)`.
+
+ References
+ ----------
+ .. [1] Bar-Yehuda, R., & Even, S. (1985). A local-ratio theorem for
+ approximating the weighted vertex cover problem.
+ Annals of Discrete Mathematics, 25, 27–46
+ http://www.cs.technion.ac.il/~reuven/PDF/vc_lr.pdf
+ """
+ weight_func = lambda nd: nd.get(weight, 1)
+ cost = dict((n, weight_func(nd)) for n, nd in G.nodes(data=True))
+
+ # while there are edges uncovered, continue
+ for u,v in G.edges_iter():
+ # select some uncovered edge
+ min_cost = min([cost[u], cost[v]])
+ cost[u] -= min_cost
+ cost[v] -= min_cost
+
+ return set(u for u in cost if cost[u] == 0)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/__init__.py
new file mode 100644
index 0000000..4d98886
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/__init__.py
@@ -0,0 +1,5 @@
+from networkx.algorithms.assortativity.connectivity import *
+from networkx.algorithms.assortativity.correlation import *
+from networkx.algorithms.assortativity.mixing import *
+from networkx.algorithms.assortativity.neighbor_degree import *
+from networkx.algorithms.assortativity.pairs import *
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/connectivity.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/connectivity.py
new file mode 100644
index 0000000..17b0265
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/connectivity.py
@@ -0,0 +1,123 @@
+#-*- coding: utf-8 -*-
+# Copyright (C) 2011 by
+# Jordi Torrents <jtorrents@milnou.net>
+# Aric Hagberg <hagberg@lanl.gov>
+# All rights reserved.
+# BSD license.
+from collections import defaultdict
+import networkx as nx
+__author__ = """\n""".join(['Jordi Torrents <jtorrents@milnou.net>',
+ 'Aric Hagberg (hagberg@lanl.gov)'])
+__all__ = ['average_degree_connectivity',
+ 'k_nearest_neighbors']
+
+def _avg_deg_conn(G, neighbors, source_degree, target_degree,
+ nodes=None, weight=None):
+ # "k nearest neighbors, or neighbor_connectivity
+ dsum = defaultdict(float)
+ dnorm = defaultdict(float)
+ for n,k in source_degree(nodes).items():
+ nbrdeg = target_degree(neighbors(n))
+ if weight is None:
+ s = float(sum(nbrdeg.values()))
+ else: # weight nbr degree by weight of (n,nbr) edge
+ if neighbors == G.neighbors:
+ s = float(sum((G[n][nbr].get(weight,1)*d
+ for nbr,d in nbrdeg.items())))
+ elif neighbors == G.successors:
+ s = float(sum((G[n][nbr].get(weight,1)*d
+ for nbr,d in nbrdeg.items())))
+ elif neighbors == G.predecessors:
+ s = float(sum((G[nbr][n].get(weight,1)*d
+ for nbr,d in nbrdeg.items())))
+ dnorm[k] += source_degree(n, weight=weight)
+ dsum[k] += s
+
+ # normalize
+ dc = {}
+ for k,avg in dsum.items():
+ dc[k]=avg
+ norm = dnorm[k]
+ if avg > 0 and norm > 0:
+ dc[k]/=norm
+ return dc
+
+def average_degree_connectivity(G, source="in+out", target="in+out",
+ nodes=None, weight=None):
+ r"""Compute the average degree connectivity of graph.
+
+ The average degree connectivity is the average nearest neighbor degree of
+ nodes with degree k. For weighted graphs, an analogous measure can
+ be computed using the weighted average neighbors degree defined in
+ [1]_, for a node `i`, as:
+
+ .. math::
+
+ k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j
+
+ where `s_i` is the weighted degree of node `i`,
+ `w_{ij}` is the weight of the edge that links `i` and `j`,
+ and `N(i)` are the neighbors of node `i`.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : "in"|"out"|"in+out" (default:"in+out")
+ Directed graphs only. Use "in"- or "out"-degree for source node.
+
+ target : "in"|"out"|"in+out" (default:"in+out"
+ Directed graphs only. Use "in"- or "out"-degree for target node.
+
+ nodes: list or iterable (optional)
+ Compute neighbor connectivity for these nodes. The default is all nodes.
+
+ weight : string or None, optional (default=None)
+ The edge attribute that holds the numerical value used as a weight.
+ If None, then each edge has weight 1.
+
+ Returns
+ -------
+ d: dict
+ A dictionary keyed by degree k with the value of average connectivity.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> G.edge[1][2]['weight'] = 3
+ >>> nx.k_nearest_neighbors(G)
+ {1: 2.0, 2: 1.5}
+ >>> nx.k_nearest_neighbors(G, weight='weight')
+ {1: 2.0, 2: 1.75}
+
+ See also
+ --------
+ neighbors_average_degree
+
+ Notes
+ -----
+ This algorithm is sometimes called "k nearest neighbors'.
+
+ References
+ ----------
+ .. [1] A. Barrat, M. Barthélemy, R. Pastor-Satorras, and A. Vespignani,
+ "The architecture of complex weighted networks".
+ PNAS 101 (11): 3747–3752 (2004).
+ """
+ source_degree = G.degree
+ target_degree = G.degree
+ neighbors = G.neighbors
+ if G.is_directed():
+ direction = {'out':G.out_degree,
+ 'in':G.in_degree,
+ 'in+out': G.degree}
+ source_degree = direction[source]
+ target_degree = direction[target]
+ if source == 'in':
+ neighbors=G.predecessors
+ elif source == 'out':
+ neighbors=G.successors
+ return _avg_deg_conn(G, neighbors, source_degree, target_degree,
+ nodes=nodes, weight=weight)
+
+k_nearest_neighbors=average_degree_connectivity
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/correlation.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/correlation.py
new file mode 100644
index 0000000..6d471c9
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/correlation.py
@@ -0,0 +1,298 @@
+#-*- coding: utf-8 -*-
+"""Node assortativity coefficients and correlation measures.
+"""
+import networkx as nx
+from networkx.algorithms.assortativity.mixing import degree_mixing_matrix, \
+ attribute_mixing_matrix, numeric_mixing_matrix
+from networkx.algorithms.assortativity.pairs import node_degree_xy, \
+ node_attribute_xy
+__author__ = ' '.join(['Aric Hagberg <aric.hagberg@gmail.com>',
+ 'Oleguer Sagarra <oleguer.sagarra@gmail.com>'])
+__all__ = ['degree_pearson_correlation_coefficient',
+ 'degree_assortativity_coefficient',
+ 'attribute_assortativity_coefficient',
+ 'numeric_assortativity_coefficient']
+
+def degree_assortativity_coefficient(G, x='out', y='in', weight=None,
+ nodes=None):
+ """Compute degree assortativity of graph.
+
+ Assortativity measures the similarity of connections
+ in the graph with respect to the node degree.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ x: string ('in','out')
+ The degree type for source node (directed graphs only).
+
+ y: string ('in','out')
+ The degree type for target node (directed graphs only).
+
+ weight: string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ nodes: list or iterable (optional)
+ Compute degree assortativity only for nodes in container.
+ The default is all nodes.
+
+ Returns
+ -------
+ r : float
+ Assortativity of graph by degree.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> r=nx.degree_assortativity_coefficient(G)
+ >>> print("%3.1f"%r)
+ -0.5
+
+ See Also
+ --------
+ attribute_assortativity_coefficient
+ numeric_assortativity_coefficient
+ neighbor_connectivity
+ degree_mixing_dict
+ degree_mixing_matrix
+
+ Notes
+ -----
+ This computes Eq. (21) in Ref. [1]_ , where e is the joint
+ probability distribution (mixing matrix) of the degrees. If G is
+ directed than the matrix e is the joint probability of the
+ user-specified degree type for the source and target.
+
+ References
+ ----------
+ .. [1] M. E. J. Newman, Mixing patterns in networks,
+ Physical Review E, 67 026126, 2003
+ .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M.
+ Edge direction and the structure of networks, PNAS 107, 10815-20 (2010).
+ """
+ M = degree_mixing_matrix(G, x=x, y=y, nodes=nodes, weight=weight)
+ return numeric_ac(M)
+
+
+def degree_pearson_correlation_coefficient(G, x='out', y='in',
+ weight=None, nodes=None):
+ """Compute degree assortativity of graph.
+
+ Assortativity measures the similarity of connections
+ in the graph with respect to the node degree.
+
+ This is the same as degree_assortativity_coefficient but uses the
+ potentially faster scipy.stats.pearsonr function.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ x: string ('in','out')
+ The degree type for source node (directed graphs only).
+
+ y: string ('in','out')
+ The degree type for target node (directed graphs only).
+
+ weight: string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ nodes: list or iterable (optional)
+ Compute pearson correlation of degrees only for specified nodes.
+ The default is all nodes.
+
+ Returns
+ -------
+ r : float
+ Assortativity of graph by degree.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> r=nx.degree_pearson_correlation_coefficient(G)
+ >>> r
+ -0.5
+
+ Notes
+ -----
+ This calls scipy.stats.pearsonr.
+
+ References
+ ----------
+ .. [1] M. E. J. Newman, Mixing patterns in networks
+ Physical Review E, 67 026126, 2003
+ .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M.
+ Edge direction and the structure of networks, PNAS 107, 10815-20 (2010).
+ """
+ try:
+ import scipy.stats as stats
+ except ImportError:
+ raise ImportError(
+ "Assortativity requires SciPy: http://scipy.org/ ")
+ xy=node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight)
+ x,y=zip(*xy)
+ return stats.pearsonr(x,y)[0]
+
+
+def attribute_assortativity_coefficient(G,attribute,nodes=None):
+ """Compute assortativity for node attributes.
+
+ Assortativity measures the similarity of connections
+ in the graph with respect to the given attribute.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ attribute : string
+ Node attribute key
+
+ nodes: list or iterable (optional)
+ Compute attribute assortativity for nodes in container.
+ The default is all nodes.
+
+ Returns
+ -------
+ r: float
+ Assortativity of graph for given attribute
+
+ Examples
+ --------
+ >>> G=nx.Graph()
+ >>> G.add_nodes_from([0,1],color='red')
+ >>> G.add_nodes_from([2,3],color='blue')
+ >>> G.add_edges_from([(0,1),(2,3)])
+ >>> print(nx.attribute_assortativity_coefficient(G,'color'))
+ 1.0
+
+ Notes
+ -----
+ This computes Eq. (2) in Ref. [1]_ , trace(M)-sum(M))/(1-sum(M),
+ where M is the joint probability distribution (mixing matrix)
+ of the specified attribute.
+
+ References
+ ----------
+ .. [1] M. E. J. Newman, Mixing patterns in networks,
+ Physical Review E, 67 026126, 2003
+ """
+ M = attribute_mixing_matrix(G,attribute,nodes)
+ return attribute_ac(M)
+
+
+def numeric_assortativity_coefficient(G, attribute, nodes=None):
+ """Compute assortativity for numerical node attributes.
+
+ Assortativity measures the similarity of connections
+ in the graph with respect to the given numeric attribute.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ attribute : string
+ Node attribute key
+
+ nodes: list or iterable (optional)
+ Compute numeric assortativity only for attributes of nodes in
+ container. The default is all nodes.
+
+ Returns
+ -------
+ r: float
+ Assortativity of graph for given attribute
+
+ Examples
+ --------
+ >>> G=nx.Graph()
+ >>> G.add_nodes_from([0,1],size=2)
+ >>> G.add_nodes_from([2,3],size=3)
+ >>> G.add_edges_from([(0,1),(2,3)])
+ >>> print(nx.numeric_assortativity_coefficient(G,'size'))
+ 1.0
+
+ Notes
+ -----
+ This computes Eq. (21) in Ref. [1]_ , for the mixing matrix of
+ of the specified attribute.
+
+ References
+ ----------
+ .. [1] M. E. J. Newman, Mixing patterns in networks
+ Physical Review E, 67 026126, 2003
+ """
+ a = numeric_mixing_matrix(G,attribute,nodes)
+ return numeric_ac(a)
+
+
+def attribute_ac(M):
+ """Compute assortativity for attribute matrix M.
+
+ Parameters
+ ----------
+ M : numpy array or matrix
+ Attribute mixing matrix.
+
+ Notes
+ -----
+ This computes Eq. (2) in Ref. [1]_ , (trace(e)-sum(e))/(1-sum(e)),
+ where e is the joint probability distribution (mixing matrix)
+ of the specified attribute.
+
+ References
+ ----------
+ .. [1] M. E. J. Newman, Mixing patterns in networks,
+ Physical Review E, 67 026126, 2003
+ """
+ try:
+ import numpy
+ except ImportError:
+ raise ImportError(
+ "attribute_assortativity requires NumPy: http://scipy.org/ ")
+ if M.sum() != 1.0:
+ M=M/float(M.sum())
+ M=numpy.asmatrix(M)
+ s=(M*M).sum()
+ t=M.trace()
+ r=(t-s)/(1-s)
+ return float(r)
+
+
+def numeric_ac(M):
+ # M is a numpy matrix or array
+ # numeric assortativity coefficient, pearsonr
+ try:
+ import numpy
+ except ImportError:
+ raise ImportError('numeric_assortativity requires ',
+ 'NumPy: http://scipy.org/')
+ if M.sum() != 1.0:
+ M=M/float(M.sum())
+ nx,ny=M.shape # nx=ny
+ x=numpy.arange(nx)
+ y=numpy.arange(ny)
+ a=M.sum(axis=0)
+ b=M.sum(axis=1)
+ vara=(a*x**2).sum()-((a*x).sum())**2
+ varb=(b*x**2).sum()-((b*x).sum())**2
+ xy=numpy.outer(x,y)
+ ab=numpy.outer(a,b)
+ return (xy*(M-ab)).sum()/numpy.sqrt(vara*varb)
+
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import numpy
+ except:
+ raise SkipTest("NumPy not available")
+ try:
+ import scipy
+ except:
+ raise SkipTest("SciPy not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/mixing.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/mixing.py
new file mode 100644
index 0000000..2c0e4f0
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/mixing.py
@@ -0,0 +1,248 @@
+#-*- coding: utf-8 -*-
+"""
+Mixing matrices for node attributes and degree.
+"""
+import networkx as nx
+from networkx.utils import dict_to_numpy_array
+from networkx.algorithms.assortativity.pairs import node_degree_xy, \
+ node_attribute_xy
+__author__ = ' '.join(['Aric Hagberg <aric.hagberg@gmail.com>'])
+__all__ = ['attribute_mixing_matrix',
+ 'attribute_mixing_dict',
+ 'degree_mixing_matrix',
+ 'degree_mixing_dict',
+ 'numeric_mixing_matrix',
+ 'mixing_dict']
+
+def attribute_mixing_dict(G,attribute,nodes=None,normalized=False):
+ """Return dictionary representation of mixing matrix for attribute.
+
+ Parameters
+ ----------
+ G : graph
+ NetworkX graph object.
+
+ attribute : string
+ Node attribute key.
+
+ nodes: list or iterable (optional)
+ Unse nodes in container to build the dict. The default is all nodes.
+
+ normalized : bool (default=False)
+ Return counts if False or probabilities if True.
+
+ Examples
+ --------
+ >>> G=nx.Graph()
+ >>> G.add_nodes_from([0,1],color='red')
+ >>> G.add_nodes_from([2,3],color='blue')
+ >>> G.add_edge(1,3)
+ >>> d=nx.attribute_mixing_dict(G,'color')
+ >>> print(d['red']['blue'])
+ 1
+ >>> print(d['blue']['red']) # d symmetric for undirected graphs
+ 1
+
+ Returns
+ -------
+ d : dictionary
+ Counts or joint probability of occurrence of attribute pairs.
+ """
+ xy_iter=node_attribute_xy(G,attribute,nodes)
+ return mixing_dict(xy_iter,normalized=normalized)
+
+
+def attribute_mixing_matrix(G,attribute,nodes=None,mapping=None,
+ normalized=True):
+ """Return mixing matrix for attribute.
+
+ Parameters
+ ----------
+ G : graph
+ NetworkX graph object.
+
+ attribute : string
+ Node attribute key.
+
+ nodes: list or iterable (optional)
+ Use only nodes in container to build the matrix. The default is
+ all nodes.
+
+ mapping : dictionary, optional
+ Mapping from node attribute to integer index in matrix.
+ If not specified, an arbitrary ordering will be used.
+
+ normalized : bool (default=False)
+ Return counts if False or probabilities if True.
+
+ Returns
+ -------
+ m: numpy array
+ Counts or joint probability of occurrence of attribute pairs.
+ """
+ d=attribute_mixing_dict(G,attribute,nodes)
+ a=dict_to_numpy_array(d,mapping=mapping)
+ if normalized:
+ a=a/a.sum()
+ return a
+
+
+def degree_mixing_dict(G, x='out', y='in', weight=None,
+ nodes=None, normalized=False):
+ """Return dictionary representation of mixing matrix for degree.
+
+ Parameters
+ ----------
+ G : graph
+ NetworkX graph object.
+
+ x: string ('in','out')
+ The degree type for source node (directed graphs only).
+
+ y: string ('in','out')
+ The degree type for target node (directed graphs only).
+
+ weight: string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ normalized : bool (default=False)
+ Return counts if False or probabilities if True.
+
+ Returns
+ -------
+ d: dictionary
+ Counts or joint probability of occurrence of degree pairs.
+ """
+ xy_iter=node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight)
+ return mixing_dict(xy_iter,normalized=normalized)
+
+
+
+def degree_mixing_matrix(G, x='out', y='in', weight=None,
+ nodes=None, normalized=True):
+ """Return mixing matrix for attribute.
+
+ Parameters
+ ----------
+ G : graph
+ NetworkX graph object.
+
+ x: string ('in','out')
+ The degree type for source node (directed graphs only).
+
+ y: string ('in','out')
+ The degree type for target node (directed graphs only).
+
+ nodes: list or iterable (optional)
+ Build the matrix using only nodes in container.
+ The default is all nodes.
+
+ weight: string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ normalized : bool (default=False)
+ Return counts if False or probabilities if True.
+
+ Returns
+ -------
+ m: numpy array
+ Counts, or joint probability, of occurrence of node degree.
+ """
+ d=degree_mixing_dict(G, x=x, y=y, nodes=nodes, weight=weight)
+ s=set(d.keys())
+ for k,v in d.items():
+ s.update(v.keys())
+ m=max(s)
+ mapping=dict(zip(range(m+1),range(m+1)))
+ a=dict_to_numpy_array(d,mapping=mapping)
+ if normalized:
+ a=a/a.sum()
+ return a
+
+def numeric_mixing_matrix(G,attribute,nodes=None,normalized=True):
+ """Return numeric mixing matrix for attribute.
+
+ Parameters
+ ----------
+ G : graph
+ NetworkX graph object.
+
+ attribute : string
+ Node attribute key.
+
+ nodes: list or iterable (optional)
+ Build the matrix only with nodes in container. The default is all nodes.
+
+ normalized : bool (default=False)
+ Return counts if False or probabilities if True.
+
+ Returns
+ -------
+ m: numpy array
+ Counts, or joint, probability of occurrence of node attribute pairs.
+ """
+ d=attribute_mixing_dict(G,attribute,nodes)
+ s=set(d.keys())
+ for k,v in d.items():
+ s.update(v.keys())
+ m=max(s)
+ mapping=dict(zip(range(m+1),range(m+1)))
+ a=dict_to_numpy_array(d,mapping=mapping)
+ if normalized:
+ a=a/a.sum()
+ return a
+
+def mixing_dict(xy,normalized=False):
+ """Return a dictionary representation of mixing matrix.
+
+ Parameters
+ ----------
+ xy : list or container of two-tuples
+ Pairs of (x,y) items.
+
+ attribute : string
+ Node attribute key
+
+ normalized : bool (default=False)
+ Return counts if False or probabilities if True.
+
+ Returns
+ -------
+ d: dictionary
+ Counts or Joint probability of occurrence of values in xy.
+ """
+ d={}
+ psum=0.0
+ for x,y in xy:
+ if x not in d:
+ d[x]={}
+ if y not in d:
+ d[y]={}
+ v = d[x].get(y,0)
+ d[x][y] = v+1
+ psum+=1
+
+
+ if normalized:
+ for k,jdict in d.items():
+ for j in jdict:
+ jdict[j]/=psum
+ return d
+
+
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import numpy
+ except:
+ raise SkipTest("NumPy not available")
+ try:
+ import scipy
+ except:
+ raise SkipTest("SciPy not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/neighbor_degree.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/neighbor_degree.py
new file mode 100644
index 0000000..9257954
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/neighbor_degree.py
@@ -0,0 +1,133 @@
+#-*- coding: utf-8 -*-
+# Copyright (C) 2011 by
+# Jordi Torrents <jtorrents@milnou.net>
+# Aric Hagberg <hagberg@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+__author__ = """\n""".join(['Jordi Torrents <jtorrents@milnou.net>',
+ 'Aric Hagberg (hagberg@lanl.gov)'])
+__all__ = ["average_neighbor_degree"]
+
+
+def _average_nbr_deg(G, source_degree, target_degree, nodes=None, weight=None):
+ # average degree of neighbors
+ avg = {}
+ for n,deg in source_degree(nodes,weight=weight).items():
+ # normalize but not by zero degree
+ if deg == 0:
+ deg = 1
+ nbrdeg = target_degree(G[n])
+ if weight is None:
+ avg[n] = sum(nbrdeg.values())/float(deg)
+ else:
+ avg[n] = sum((G[n][nbr].get(weight,1)*d
+ for nbr,d in nbrdeg.items()))/float(deg)
+ return avg
+
+def average_neighbor_degree(G, source='out', target='out',
+ nodes=None, weight=None):
+ r"""Returns the average degree of the neighborhood of each node.
+
+ The average degree of a node `i` is
+
+ .. math::
+
+ k_{nn,i} = \frac{1}{|N(i)|} \sum_{j \in N(i)} k_j
+
+ where `N(i)` are the neighbors of node `i` and `k_j` is
+ the degree of node `j` which belongs to `N(i)`. For weighted
+ graphs, an analogous measure can be defined [1]_,
+
+ .. math::
+
+ k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j
+
+ where `s_i` is the weighted degree of node `i`, `w_{ij}`
+ is the weight of the edge that links `i` and `j` and
+ `N(i)` are the neighbors of node `i`.
+
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : string ("in"|"out")
+ Directed graphs only.
+ Use "in"- or "out"-degree for source node.
+
+ target : string ("in"|"out")
+ Directed graphs only.
+ Use "in"- or "out"-degree for target node.
+
+ nodes : list or iterable, optional
+ Compute neighbor degree for specified nodes. The default is
+ all nodes in the graph.
+
+
+ weight : string or None, optional (default=None)
+ The edge attribute that holds the numerical value used as a weight.
+ If None, then each edge has weight 1.
+
+ Returns
+ -------
+ d: dict
+ A dictionary keyed by node with average neighbors degree value.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> G.edge[0][1]['weight'] = 5
+ >>> G.edge[2][3]['weight'] = 3
+
+ >>> nx.average_neighbor_degree(G)
+ {0: 2.0, 1: 1.5, 2: 1.5, 3: 2.0}
+ >>> nx.average_neighbor_degree(G, weight='weight')
+ {0: 2.0, 1: 1.1666666666666667, 2: 1.25, 3: 2.0}
+
+ >>> G=nx.DiGraph()
+ >>> G.add_path([0,1,2,3])
+ >>> nx.average_neighbor_degree(G, source='in', target='in')
+ {0: 1.0, 1: 1.0, 2: 1.0, 3: 0.0}
+
+ >>> nx.average_neighbor_degree(G, source='out', target='out')
+ {0: 1.0, 1: 1.0, 2: 0.0, 3: 0.0}
+
+ Notes
+ -----
+ For directed graphs you can also specify in-degree or out-degree
+ by passing keyword arguments.
+
+ See Also
+ --------
+ average_degree_connectivity
+
+ References
+ ----------
+ .. [1] A. Barrat, M. Barthélemy, R. Pastor-Satorras, and A. Vespignani,
+ "The architecture of complex weighted networks".
+ PNAS 101 (11): 3747–3752 (2004).
+ """
+ source_degree = G.degree
+ target_degree = G.degree
+ if G.is_directed():
+ direction = {'out':G.out_degree,
+ 'in':G.in_degree}
+ source_degree = direction[source]
+ target_degree = direction[target]
+ return _average_nbr_deg(G, source_degree, target_degree,
+ nodes=nodes, weight=weight)
+
+# obsolete
+# def average_neighbor_in_degree(G, nodes=None, weight=None):
+# if not G.is_directed():
+# raise nx.NetworkXError("Not defined for undirected graphs.")
+# return _average_nbr_deg(G, G.in_degree, G.in_degree, nodes, weight)
+# average_neighbor_in_degree.__doc__=average_neighbor_degree.__doc__
+
+# def average_neighbor_out_degree(G, nodes=None, weight=None):
+# if not G.is_directed():
+# raise nx.NetworkXError("Not defined for undirected graphs.")
+# return _average_nbr_deg(G, G.out_degree, G.out_degree, nodes, weight)
+# average_neighbor_out_degree.__doc__=average_neighbor_degree.__doc__
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/pairs.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/pairs.py
new file mode 100644
index 0000000..0ed0fa9
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/pairs.py
@@ -0,0 +1,134 @@
+#-*- coding: utf-8 -*-
+"""Generators of x-y pairs of node data."""
+import networkx as nx
+from networkx.utils import dict_to_numpy_array
+__author__ = ' '.join(['Aric Hagberg <aric.hagberg@gmail.com>'])
+__all__ = ['node_attribute_xy',
+ 'node_degree_xy']
+
+def node_attribute_xy(G, attribute, nodes=None):
+ """Return iterator of node-attribute pairs for all edges in G.
+
+ Parameters
+ ----------
+ G: NetworkX graph
+
+ attribute: key
+ The node attribute key.
+
+ nodes: list or iterable (optional)
+ Use only edges that are adjacency to specified nodes.
+ The default is all nodes.
+
+ Returns
+ -------
+ (x,y): 2-tuple
+ Generates 2-tuple of (attribute,attribute) values.
+
+ Examples
+ --------
+ >>> G = nx.DiGraph()
+ >>> G.add_node(1,color='red')
+ >>> G.add_node(2,color='blue')
+ >>> G.add_edge(1,2)
+ >>> list(nx.node_attribute_xy(G,'color'))
+ [('red', 'blue')]
+
+ Notes
+ -----
+ For undirected graphs each edge is produced twice, once for each edge
+ representation (u,v) and (v,u), with the exception of self-loop edges
+ which only appear once.
+ """
+ if nodes is None:
+ nodes = set(G)
+ else:
+ nodes = set(nodes)
+ node = G.node
+ for u,nbrsdict in G.adjacency_iter():
+ if u not in nodes:
+ continue
+ uattr = node[u].get(attribute,None)
+ if G.is_multigraph():
+ for v,keys in nbrsdict.items():
+ vattr = node[v].get(attribute,None)
+ for k,d in keys.items():
+ yield (uattr,vattr)
+ else:
+ for v,eattr in nbrsdict.items():
+ vattr = node[v].get(attribute,None)
+ yield (uattr,vattr)
+
+
+def node_degree_xy(G, x='out', y='in', weight=None, nodes=None):
+ """Generate node degree-degree pairs for edges in G.
+
+ Parameters
+ ----------
+ G: NetworkX graph
+
+ x: string ('in','out')
+ The degree type for source node (directed graphs only).
+
+ y: string ('in','out')
+ The degree type for target node (directed graphs only).
+
+ weight: string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ nodes: list or iterable (optional)
+ Use only edges that are adjacency to specified nodes.
+ The default is all nodes.
+
+ Returns
+ -------
+ (x,y): 2-tuple
+ Generates 2-tuple of (degree,degree) values.
+
+
+ Examples
+ --------
+ >>> G = nx.DiGraph()
+ >>> G.add_edge(1,2)
+ >>> list(nx.node_degree_xy(G,x='out',y='in'))
+ [(1, 1)]
+ >>> list(nx.node_degree_xy(G,x='in',y='out'))
+ [(0, 0)]
+
+ Notes
+ -----
+ For undirected graphs each edge is produced twice, once for each edge
+ representation (u,v) and (v,u), with the exception of self-loop edges
+ which only appear once.
+ """
+ if nodes is None:
+ nodes = set(G)
+ else:
+ nodes = set(nodes)
+ xdeg = G.degree_iter
+ ydeg = G.degree_iter
+ if G.is_directed():
+ direction = {'out':G.out_degree_iter,
+ 'in':G.in_degree_iter}
+ xdeg = direction[x]
+ ydeg = direction[y]
+
+ for u,degu in xdeg(nodes, weight=weight):
+ neighbors = (nbr for _,nbr in G.edges_iter(u) if nbr in nodes)
+ for v,degv in ydeg(neighbors, weight=weight):
+ yield degu,degv
+
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import numpy
+ except:
+ raise SkipTest("NumPy not available")
+ try:
+ import scipy
+ except:
+ raise SkipTest("SciPy not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/base_test.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/base_test.py
new file mode 100644
index 0000000..2e16544
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/base_test.py
@@ -0,0 +1,50 @@
+import networkx as nx
+
+class BaseTestAttributeMixing(object):
+
+ def setUp(self):
+ G=nx.Graph()
+ G.add_nodes_from([0,1],fish='one')
+ G.add_nodes_from([2,3],fish='two')
+ G.add_nodes_from([4],fish='red')
+ G.add_nodes_from([5],fish='blue')
+ G.add_edges_from([(0,1),(2,3),(0,4),(2,5)])
+ self.G=G
+
+ D=nx.DiGraph()
+ D.add_nodes_from([0,1],fish='one')
+ D.add_nodes_from([2,3],fish='two')
+ D.add_nodes_from([4],fish='red')
+ D.add_nodes_from([5],fish='blue')
+ D.add_edges_from([(0,1),(2,3),(0,4),(2,5)])
+ self.D=D
+
+ M=nx.MultiGraph()
+ M.add_nodes_from([0,1],fish='one')
+ M.add_nodes_from([2,3],fish='two')
+ M.add_nodes_from([4],fish='red')
+ M.add_nodes_from([5],fish='blue')
+ M.add_edges_from([(0,1),(0,1),(2,3)])
+ self.M=M
+
+ S=nx.Graph()
+ S.add_nodes_from([0,1],fish='one')
+ S.add_nodes_from([2,3],fish='two')
+ S.add_nodes_from([4],fish='red')
+ S.add_nodes_from([5],fish='blue')
+ S.add_edge(0,0)
+ S.add_edge(2,2)
+ self.S=S
+
+class BaseTestDegreeMixing(object):
+
+ def setUp(self):
+ self.P4=nx.path_graph(4)
+ self.D=nx.DiGraph()
+ self.D.add_edges_from([(0, 2), (0, 3), (1, 3), (2, 3)])
+ self.M=nx.MultiGraph()
+ self.M.add_path(list(range(4)))
+ self.M.add_edge(0,1)
+ self.S=nx.Graph()
+ self.S.add_edges_from([(0,0),(1,1)])
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/test_connectivity.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/test_connectivity.py
new file mode 100644
index 0000000..5091161
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/test_connectivity.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+class TestNeighborConnectivity(object):
+
+ def test_degree_p4(self):
+ G=nx.path_graph(4)
+ answer={1:2.0,2:1.5}
+ nd = nx.average_degree_connectivity(G)
+ assert_equal(nd,answer)
+
+ D=G.to_directed()
+ answer={2:2.0,4:1.5}
+ nd = nx.average_degree_connectivity(D)
+ assert_equal(nd,answer)
+
+ answer={1:2.0,2:1.5}
+ D=G.to_directed()
+ nd = nx.average_degree_connectivity(D, source='in', target='in')
+ assert_equal(nd,answer)
+
+ D=G.to_directed()
+ nd = nx.average_degree_connectivity(D, source='in', target='in')
+ assert_equal(nd,answer)
+
+ def test_degree_p4_weighted(self):
+ G=nx.path_graph(4)
+ G[1][2]['weight']=4
+ answer={1:2.0,2:1.8}
+ nd = nx.average_degree_connectivity(G,weight='weight')
+ assert_equal(nd,answer)
+ answer={1:2.0,2:1.5}
+ nd = nx.average_degree_connectivity(G)
+ assert_equal(nd,answer)
+
+ D=G.to_directed()
+ answer={2:2.0,4:1.8}
+ nd = nx.average_degree_connectivity(D,weight='weight')
+ assert_equal(nd,answer)
+
+ answer={1:2.0,2:1.8}
+ D=G.to_directed()
+ nd = nx.average_degree_connectivity(D,weight='weight', source='in',
+ target='in')
+ assert_equal(nd,answer)
+
+ D=G.to_directed()
+ nd = nx.average_degree_connectivity(D,source='in',target='out',
+ weight='weight')
+ assert_equal(nd,answer)
+
+ def test_weight_keyword(self):
+ G=nx.path_graph(4)
+ G[1][2]['other']=4
+ answer={1:2.0,2:1.8}
+ nd = nx.average_degree_connectivity(G,weight='other')
+ assert_equal(nd,answer)
+ answer={1:2.0,2:1.5}
+ nd = nx.average_degree_connectivity(G,weight=None)
+ assert_equal(nd,answer)
+
+ D=G.to_directed()
+ answer={2:2.0,4:1.8}
+ nd = nx.average_degree_connectivity(D,weight='other')
+ assert_equal(nd,answer)
+
+ answer={1:2.0,2:1.8}
+ D=G.to_directed()
+ nd = nx.average_degree_connectivity(D,weight='other', source='in',
+ target='in')
+ assert_equal(nd,answer)
+
+ D=G.to_directed()
+ nd = nx.average_degree_connectivity(D,weight='other',source='in',
+ target='in')
+ assert_equal(nd,answer)
+
+ def test_degree_barrat(self):
+ G=nx.star_graph(5)
+ G.add_edges_from([(5,6),(5,7),(5,8),(5,9)])
+ G[0][5]['weight']=5
+ nd = nx.average_degree_connectivity(G)[5]
+ assert_equal(nd,1.8)
+ nd = nx.average_degree_connectivity(G,weight='weight')[5]
+ assert_almost_equal(nd,3.222222,places=5)
+ nd = nx.k_nearest_neighbors(G,weight='weight')[5]
+ assert_almost_equal(nd,3.222222,places=5)
+
+ def test_zero_deg(self):
+ G=nx.DiGraph()
+ G.add_edge(1,2)
+ G.add_edge(1,3)
+ G.add_edge(1,4)
+ c = nx.average_degree_connectivity(G)
+ assert_equal(c,{1:0,3:1})
+ c = nx.average_degree_connectivity(G, source='in', target='in')
+ assert_equal(c,{0:0,1:0})
+ c = nx.average_degree_connectivity(G, source='in', target='out')
+ assert_equal(c,{0:0,1:3})
+ c = nx.average_degree_connectivity(G, source='in', target='in+out')
+ assert_equal(c,{0:0,1:3})
+ c = nx.average_degree_connectivity(G, source='out', target='out')
+ assert_equal(c,{0:0,3:0})
+ c = nx.average_degree_connectivity(G, source='out', target='in')
+ assert_equal(c,{0:0,3:1})
+ c = nx.average_degree_connectivity(G, source='out', target='in+out')
+ assert_equal(c,{0:0,3:1})
+
+
+ def test_in_out_weight(self):
+ from itertools import permutations
+ G=nx.DiGraph()
+ G.add_edge(1,2,weight=1)
+ G.add_edge(1,3,weight=1)
+ G.add_edge(3,1,weight=1)
+ for s,t in permutations(['in','out','in+out'],2):
+ c = nx.average_degree_connectivity(G, source=s, target=t)
+ cw = nx.average_degree_connectivity(G,source=s, target=t,
+ weight='weight')
+ assert_equal(c,cw)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/test_correlation.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/test_correlation.py
new file mode 100644
index 0000000..fbb2d51
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/test_correlation.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+from nose.tools import *
+from nose import SkipTest
+import networkx as nx
+from base_test import BaseTestAttributeMixing,BaseTestDegreeMixing
+from networkx.algorithms.assortativity.correlation import attribute_ac
+
+
+class TestDegreeMixingCorrelation(BaseTestDegreeMixing):
+ @classmethod
+ def setupClass(cls):
+ global np
+ global npt
+ try:
+ import numpy as np
+ import numpy.testing as npt
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+ try:
+ import scipy
+ import scipy.stats
+ except ImportError:
+ raise SkipTest('SciPy not available.')
+
+
+
+ def test_degree_assortativity_undirected(self):
+ r=nx.degree_assortativity_coefficient(self.P4)
+ npt.assert_almost_equal(r,-1.0/2,decimal=4)
+
+ def test_degree_assortativity_directed(self):
+ r=nx.degree_assortativity_coefficient(self.D)
+ npt.assert_almost_equal(r,-0.57735,decimal=4)
+
+ def test_degree_assortativity_multigraph(self):
+ r=nx.degree_assortativity_coefficient(self.M)
+ npt.assert_almost_equal(r,-1.0/7.0,decimal=4)
+
+
+ def test_degree_assortativity_undirected(self):
+ r=nx.degree_pearson_correlation_coefficient(self.P4)
+ npt.assert_almost_equal(r,-1.0/2,decimal=4)
+
+ def test_degree_assortativity_directed(self):
+ r=nx.degree_pearson_correlation_coefficient(self.D)
+ npt.assert_almost_equal(r,-0.57735,decimal=4)
+
+ def test_degree_assortativity_multigraph(self):
+ r=nx.degree_pearson_correlation_coefficient(self.M)
+ npt.assert_almost_equal(r,-1.0/7.0,decimal=4)
+
+
+
+class TestAttributeMixingCorrelation(BaseTestAttributeMixing):
+ @classmethod
+ def setupClass(cls):
+ global np
+ global npt
+ try:
+ import numpy as np
+ import numpy.testing as npt
+
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+
+ def test_attribute_assortativity_undirected(self):
+ r=nx.attribute_assortativity_coefficient(self.G,'fish')
+ assert_equal(r,6.0/22.0)
+
+ def test_attribute_assortativity_directed(self):
+ r=nx.attribute_assortativity_coefficient(self.D,'fish')
+ assert_equal(r,1.0/3.0)
+
+ def test_attribute_assortativity_multigraph(self):
+ r=nx.attribute_assortativity_coefficient(self.M,'fish')
+ assert_equal(r,1.0)
+
+ def test_attribute_assortativity_coefficient(self):
+ # from "Mixing patterns in networks"
+ a=np.array([[0.258,0.016,0.035,0.013],
+ [0.012,0.157,0.058,0.019],
+ [0.013,0.023,0.306,0.035],
+ [0.005,0.007,0.024,0.016]])
+ r=attribute_ac(a)
+ npt.assert_almost_equal(r,0.623,decimal=3)
+
+ def test_attribute_assortativity_coefficient2(self):
+ a=np.array([[0.18,0.02,0.01,0.03],
+ [0.02,0.20,0.03,0.02],
+ [0.01,0.03,0.16,0.01],
+ [0.03,0.02,0.01,0.22]])
+
+ r=attribute_ac(a)
+ npt.assert_almost_equal(r,0.68,decimal=2)
+
+ def test_attribute_assortativity(self):
+ a=np.array([[50,50,0],[50,50,0],[0,0,2]])
+ r=attribute_ac(a)
+ npt.assert_almost_equal(r,0.029,decimal=3)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/test_mixing.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/test_mixing.py
new file mode 100644
index 0000000..ce60a94
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/test_mixing.py
@@ -0,0 +1,186 @@
+#!/usr/bin/env python
+from nose.tools import *
+from nose import SkipTest
+import networkx as nx
+from base_test import BaseTestAttributeMixing,BaseTestDegreeMixing
+
+
+class TestDegreeMixingDict(BaseTestDegreeMixing):
+
+
+ def test_degree_mixing_dict_undirected(self):
+ d=nx.degree_mixing_dict(self.P4)
+ d_result={1:{2:2},
+ 2:{1:2,2:2},
+ }
+ assert_equal(d,d_result)
+
+ def test_degree_mixing_dict_undirected_normalized(self):
+ d=nx.degree_mixing_dict(self.P4, normalized=True)
+ d_result={1:{2:1.0/3},
+ 2:{1:1.0/3,2:1.0/3},
+ }
+ assert_equal(d,d_result)
+
+ def test_degree_mixing_dict_directed(self):
+ d=nx.degree_mixing_dict(self.D)
+ print(d)
+ d_result={1:{3:2},
+ 2:{1:1,3:1},
+ 3:{}
+ }
+ assert_equal(d,d_result)
+
+ def test_degree_mixing_dict_multigraph(self):
+ d=nx.degree_mixing_dict(self.M)
+ d_result={1:{2:1},
+ 2:{1:1,3:3},
+ 3:{2:3}
+ }
+ assert_equal(d,d_result)
+
+
+class TestDegreeMixingMatrix(BaseTestDegreeMixing):
+
+ @classmethod
+ def setupClass(cls):
+ global np
+ global npt
+ try:
+ import numpy as np
+ import numpy.testing as npt
+
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+ def test_degree_mixing_matrix_undirected(self):
+ a_result=np.array([[0,0,0],
+ [0,0,2],
+ [0,2,2]]
+ )
+ a=nx.degree_mixing_matrix(self.P4,normalized=False)
+ npt.assert_equal(a,a_result)
+ a=nx.degree_mixing_matrix(self.P4)
+ npt.assert_equal(a,a_result/float(a_result.sum()))
+
+ def test_degree_mixing_matrix_directed(self):
+ a_result=np.array([[0,0,0,0],
+ [0,0,0,2],
+ [0,1,0,1],
+ [0,0,0,0]]
+ )
+ a=nx.degree_mixing_matrix(self.D,normalized=False)
+ npt.assert_equal(a,a_result)
+ a=nx.degree_mixing_matrix(self.D)
+ npt.assert_equal(a,a_result/float(a_result.sum()))
+
+ def test_degree_mixing_matrix_multigraph(self):
+ a_result=np.array([[0,0,0,0],
+ [0,0,1,0],
+ [0,1,0,3],
+ [0,0,3,0]]
+ )
+ a=nx.degree_mixing_matrix(self.M,normalized=False)
+ npt.assert_equal(a,a_result)
+ a=nx.degree_mixing_matrix(self.M)
+ npt.assert_equal(a,a_result/float(a_result.sum()))
+
+
+ def test_degree_mixing_matrix_selfloop(self):
+ a_result=np.array([[0,0,0],
+ [0,0,0],
+ [0,0,2]]
+ )
+ a=nx.degree_mixing_matrix(self.S,normalized=False)
+ npt.assert_equal(a,a_result)
+ a=nx.degree_mixing_matrix(self.S)
+ npt.assert_equal(a,a_result/float(a_result.sum()))
+
+
+class TestAttributeMixingDict(BaseTestAttributeMixing):
+
+ def test_attribute_mixing_dict_undirected(self):
+ d=nx.attribute_mixing_dict(self.G,'fish')
+ d_result={'one':{'one':2,'red':1},
+ 'two':{'two':2,'blue':1},
+ 'red':{'one':1},
+ 'blue':{'two':1}
+ }
+ assert_equal(d,d_result)
+
+ def test_attribute_mixing_dict_directed(self):
+ d=nx.attribute_mixing_dict(self.D,'fish')
+ d_result={'one':{'one':1,'red':1},
+ 'two':{'two':1,'blue':1},
+ 'red':{},
+ 'blue':{}
+ }
+ assert_equal(d,d_result)
+
+
+ def test_attribute_mixing_dict_multigraph(self):
+ d=nx.attribute_mixing_dict(self.M,'fish')
+ d_result={'one':{'one':4},
+ 'two':{'two':2},
+ }
+ assert_equal(d,d_result)
+
+
+
+class TestAttributeMixingMatrix(BaseTestAttributeMixing):
+ @classmethod
+ def setupClass(cls):
+ global np
+ global npt
+ try:
+ import numpy as np
+ import numpy.testing as npt
+
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+ def test_attribute_mixing_matrix_undirected(self):
+ mapping={'one':0,'two':1,'red':2,'blue':3}
+ a_result=np.array([[2,0,1,0],
+ [0,2,0,1],
+ [1,0,0,0],
+ [0,1,0,0]]
+ )
+ a=nx.attribute_mixing_matrix(self.G,'fish',
+ mapping=mapping,
+ normalized=False)
+ npt.assert_equal(a,a_result)
+ a=nx.attribute_mixing_matrix(self.G,'fish',
+ mapping=mapping)
+ npt.assert_equal(a,a_result/float(a_result.sum()))
+
+ def test_attribute_mixing_matrix_directed(self):
+ mapping={'one':0,'two':1,'red':2,'blue':3}
+ a_result=np.array([[1,0,1,0],
+ [0,1,0,1],
+ [0,0,0,0],
+ [0,0,0,0]]
+ )
+ a=nx.attribute_mixing_matrix(self.D,'fish',
+ mapping=mapping,
+ normalized=False)
+ npt.assert_equal(a,a_result)
+ a=nx.attribute_mixing_matrix(self.D,'fish',
+ mapping=mapping)
+ npt.assert_equal(a,a_result/float(a_result.sum()))
+
+ def test_attribute_mixing_matrix_multigraph(self):
+ mapping={'one':0,'two':1,'red':2,'blue':3}
+ a_result=np.array([[4,0,0,0],
+ [0,2,0,0],
+ [0,0,0,0],
+ [0,0,0,0]]
+ )
+ a=nx.attribute_mixing_matrix(self.M,'fish',
+ mapping=mapping,
+ normalized=False)
+ npt.assert_equal(a,a_result)
+ a=nx.attribute_mixing_matrix(self.M,'fish',
+ mapping=mapping)
+ npt.assert_equal(a,a_result/float(a_result.sum()))
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/test_neighbor_degree.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/test_neighbor_degree.py
new file mode 100644
index 0000000..7ab99fb
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/test_neighbor_degree.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+class TestAverageNeighbor(object):
+
+ def test_degree_p4(self):
+ G=nx.path_graph(4)
+ answer={0:2,1:1.5,2:1.5,3:2}
+ nd = nx.average_neighbor_degree(G)
+ assert_equal(nd,answer)
+
+ D=G.to_directed()
+ nd = nx.average_neighbor_degree(D)
+ assert_equal(nd,answer)
+
+ D=G.to_directed()
+ nd = nx.average_neighbor_degree(D)
+ assert_equal(nd,answer)
+
+ D=G.to_directed()
+ nd = nx.average_neighbor_degree(D, source='in', target='in')
+ assert_equal(nd,answer)
+
+ def test_degree_p4_weighted(self):
+ G=nx.path_graph(4)
+ G[1][2]['weight']=4
+ answer={0:2,1:1.8,2:1.8,3:2}
+ nd = nx.average_neighbor_degree(G,weight='weight')
+ assert_equal(nd,answer)
+
+ D=G.to_directed()
+ nd = nx.average_neighbor_degree(D,weight='weight')
+ assert_equal(nd,answer)
+
+ D=G.to_directed()
+ nd = nx.average_neighbor_degree(D,weight='weight')
+ assert_equal(nd,answer)
+ nd = nx.average_neighbor_degree(D,source='out',target='out',
+ weight='weight')
+ assert_equal(nd,answer)
+
+ D=G.to_directed()
+ nd = nx.average_neighbor_degree(D,source='in',target='in',
+ weight='weight')
+ assert_equal(nd,answer)
+
+
+ def test_degree_k4(self):
+ G=nx.complete_graph(4)
+ answer={0:3,1:3,2:3,3:3}
+ nd = nx.average_neighbor_degree(G)
+ assert_equal(nd,answer)
+
+ D=G.to_directed()
+ nd = nx.average_neighbor_degree(D)
+ assert_equal(nd,answer)
+
+ D=G.to_directed()
+ nd = nx.average_neighbor_degree(D)
+ assert_equal(nd,answer)
+
+ D=G.to_directed()
+ nd = nx.average_neighbor_degree(D,source='in',target='in')
+ assert_equal(nd,answer)
+
+ def test_degree_k4_nodes(self):
+ G=nx.complete_graph(4)
+ answer={1:3.0,2:3.0}
+ nd = nx.average_neighbor_degree(G,nodes=[1,2])
+ assert_equal(nd,answer)
+
+ def test_degree_barrat(self):
+ G=nx.star_graph(5)
+ G.add_edges_from([(5,6),(5,7),(5,8),(5,9)])
+ G[0][5]['weight']=5
+ nd = nx.average_neighbor_degree(G)[5]
+ assert_equal(nd,1.8)
+ nd = nx.average_neighbor_degree(G,weight='weight')[5]
+ assert_almost_equal(nd,3.222222,places=5)
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/test_pairs.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/test_pairs.py
new file mode 100644
index 0000000..fa67a45
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/assortativity/tests/test_pairs.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+from base_test import BaseTestAttributeMixing,BaseTestDegreeMixing
+
+class TestAttributeMixingXY(BaseTestAttributeMixing):
+
+ def test_node_attribute_xy_undirected(self):
+ attrxy=sorted(nx.node_attribute_xy(self.G,'fish'))
+ attrxy_result=sorted([('one','one'),
+ ('one','one'),
+ ('two','two'),
+ ('two','two'),
+ ('one','red'),
+ ('red','one'),
+ ('blue','two'),
+ ('two','blue')
+ ])
+ assert_equal(attrxy,attrxy_result)
+
+ def test_node_attribute_xy_undirected_nodes(self):
+ attrxy=sorted(nx.node_attribute_xy(self.G,'fish',
+ nodes=['one','yellow']))
+ attrxy_result=sorted( [
+ ])
+ assert_equal(attrxy,attrxy_result)
+
+
+ def test_node_attribute_xy_directed(self):
+ attrxy=sorted(nx.node_attribute_xy(self.D,'fish'))
+ attrxy_result=sorted([('one','one'),
+ ('two','two'),
+ ('one','red'),
+ ('two','blue')
+ ])
+ assert_equal(attrxy,attrxy_result)
+
+ def test_node_attribute_xy_multigraph(self):
+ attrxy=sorted(nx.node_attribute_xy(self.M,'fish'))
+ attrxy_result=[('one','one'),
+ ('one','one'),
+ ('one','one'),
+ ('one','one'),
+ ('two','two'),
+ ('two','two')
+ ]
+ assert_equal(attrxy,attrxy_result)
+
+ def test_node_attribute_xy_selfloop(self):
+ attrxy=sorted(nx.node_attribute_xy(self.S,'fish'))
+ attrxy_result=[('one','one'),
+ ('two','two')
+ ]
+ assert_equal(attrxy,attrxy_result)
+
+
+class TestDegreeMixingXY(BaseTestDegreeMixing):
+
+ def test_node_degree_xy_undirected(self):
+ xy=sorted(nx.node_degree_xy(self.P4))
+ xy_result=sorted([(1,2),
+ (2,1),
+ (2,2),
+ (2,2),
+ (1,2),
+ (2,1)])
+ assert_equal(xy,xy_result)
+
+ def test_node_degree_xy_undirected_nodes(self):
+ xy=sorted(nx.node_degree_xy(self.P4,nodes=[0,1,-1]))
+ xy_result=sorted([(1,2),
+ (2,1),])
+ assert_equal(xy,xy_result)
+
+
+ def test_node_degree_xy_directed(self):
+ xy=sorted(nx.node_degree_xy(self.D))
+ xy_result=sorted([(2,1),
+ (2,3),
+ (1,3),
+ (1,3)])
+ assert_equal(xy,xy_result)
+
+ def test_node_degree_xy_multigraph(self):
+ xy=sorted(nx.node_degree_xy(self.M))
+ xy_result=sorted([(2,3),
+ (2,3),
+ (3,2),
+ (3,2),
+ (2,3),
+ (3,2),
+ (1,2),
+ (2,1)])
+ assert_equal(xy,xy_result)
+
+
+ def test_node_degree_xy_selfloop(self):
+ xy=sorted(nx.node_degree_xy(self.S))
+ xy_result=sorted([(2,2),
+ (2,2)])
+ assert_equal(xy,xy_result)
+
+ def test_node_degree_xy_weighted(self):
+ G = nx.Graph()
+ G.add_edge(1,2,weight=7)
+ G.add_edge(2,3,weight=10)
+ xy=sorted(nx.node_degree_xy(G,weight='weight'))
+ xy_result=sorted([(7,17),
+ (17,10),
+ (17,7),
+ (10,17)])
+ assert_equal(xy,xy_result)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/__init__.py
new file mode 100644
index 0000000..53ba9d3
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/__init__.py
@@ -0,0 +1,93 @@
+r""" This module provides functions and operations for bipartite
+graphs. Bipartite graphs `B = (U, V, E)` have two node sets `U,V` and edges in
+`E` that only connect nodes from opposite sets. It is common in the literature
+to use an spatial analogy referring to the two node sets as top and bottom nodes.
+
+The bipartite algorithms are not imported into the networkx namespace
+at the top level so the easiest way to use them is with:
+
+>>> import networkx as nx
+>>> from networkx.algorithms import bipartite
+
+NetworkX does not have a custom bipartite graph class but the Graph()
+or DiGraph() classes can be used to represent bipartite graphs. However,
+you have to keep track of which set each node belongs to, and make
+sure that there is no edge between nodes of the same set. The convention used
+in NetworkX is to use a node attribute named "bipartite" with values 0 or 1 to
+identify the sets each node belongs to.
+
+For example:
+
+>>> B = nx.Graph()
+>>> B.add_nodes_from([1,2,3,4], bipartite=0) # Add the node attribute "bipartite"
+>>> B.add_nodes_from(['a','b','c'], bipartite=1)
+>>> B.add_edges_from([(1,'a'), (1,'b'), (2,'b'), (2,'c'), (3,'c'), (4,'a')])
+
+Many algorithms of the bipartite module of NetworkX require, as an argument, a
+container with all the nodes that belong to one set, in addition to the bipartite
+graph `B`. If `B` is connected, you can find the node sets using a two-coloring
+algorithm:
+
+>>> nx.is_connected(B)
+True
+>>> bottom_nodes, top_nodes = bipartite.sets(B)
+
+list(top_nodes)
+[1, 2, 3, 4]
+list(bottom_nodes)
+['a', 'c', 'b']
+
+However, if the input graph is not connected, there are more than one possible
+colorations. Thus, the following result is correct:
+
+>>> B.remove_edge(2,'c')
+>>> nx.is_connected(B)
+False
+>>> bottom_nodes, top_nodes = bipartite.sets(B)
+
+list(top_nodes)
+[1, 2, 4, 'c']
+list(bottom_nodes)
+['a', 3, 'b']
+
+Using the "bipartite" node attribute, you can easily get the two node sets:
+
+>>> top_nodes = set(n for n,d in B.nodes(data=True) if d['bipartite']==0)
+>>> bottom_nodes = set(B) - top_nodes
+
+list(top_nodes)
+[1, 2, 3, 4]
+list(bottom_nodes)
+['a', 'c', 'b']
+
+So you can easily use the bipartite algorithms that require, as an argument, a
+container with all nodes that belong to one node set:
+
+>>> print(round(bipartite.density(B, bottom_nodes),2))
+0.42
+>>> G = bipartite.projected_graph(B, top_nodes)
+>>> G.edges()
+[(1, 2), (1, 4)]
+
+All bipartite graph generators in NetworkX build bipartite graphs with the
+"bipartite" node attribute. Thus, you can use the same approach:
+
+>>> RB = nx.bipartite_random_graph(5, 7, 0.2)
+>>> RB_top = set(n for n,d in RB.nodes(data=True) if d['bipartite']==0)
+>>> RB_bottom = set(RB) - RB_top
+>>> list(RB_top)
+[0, 1, 2, 3, 4]
+>>> list(RB_bottom)
+[5, 6, 7, 8, 9, 10, 11]
+
+For other bipartite graph generators see the bipartite section of
+:doc:`generators`.
+
+"""
+
+from networkx.algorithms.bipartite.basic import *
+from networkx.algorithms.bipartite.centrality import *
+from networkx.algorithms.bipartite.cluster import *
+from networkx.algorithms.bipartite.projection import *
+from networkx.algorithms.bipartite.redundancy import *
+from networkx.algorithms.bipartite.spectral import *
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/basic.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/basic.py
new file mode 100644
index 0000000..e902889
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/basic.py
@@ -0,0 +1,335 @@
+# -*- coding: utf-8 -*-
+"""
+==========================
+Bipartite Graph Algorithms
+==========================
+"""
+# Copyright (C) 2012 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+from itertools import count
+__author__ = """\n""".join(['Jordi Torrents <jtorrents@milnou.net>',
+ 'Aric Hagberg <aric.hagberg@gmail.com>'])
+__all__ = [ 'is_bipartite',
+ 'is_bipartite_node_set',
+ 'color',
+ 'sets',
+ 'density',
+ 'degrees',
+ 'biadjacency_matrix']
+
+def biadjacency_matrix(G, row_order, column_order=None,
+ weight='weight', dtype=None):
+ r"""Return the biadjacency matrix of the bipartite graph G.
+
+ Let `G = (U, V, E)` be a bipartite graph with node sets
+ `U = u_{1},...,u_{r}` and `V = v_{1},...,v_{s}`. The biadjacency
+ matrix [1] is the `r` x `s` matrix `B` in which `b_{i,j} = 1`
+ if, and only if, `(u_i, v_j) \in E`. If the parameter `weight` is
+ not `None` and matches the name of an edge attribute, its value is
+ used instead of 1.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ row_order : list of nodes
+ The rows of the matrix are ordered according to the list of nodes.
+
+ column_order : list, optional
+ The columns of the matrix are ordered according to the list of nodes.
+ If column_order is None, then the ordering of columns is arbitrary.
+
+ weight : string or None, optional (default='weight')
+ The edge data key used to provide each value in the matrix.
+ If None, then each edge has weight 1.
+
+ dtype : NumPy data type, optional
+ A valid single NumPy data type used to initialize the array.
+ This must be a simple type such as int or numpy.float64 and
+ not a compound data type (see to_numpy_recarray)
+ If None, then the NumPy default is used.
+
+ Returns
+ -------
+ B : numpy matrix
+ Biadjacency matrix representation of the bipartite graph G.
+
+ Notes
+ -----
+ No attempt is made to check that the input graph is bipartite.
+
+ For directed bipartite graphs only successors are considered as neighbors.
+ To obtain an adjacency matrix with ones (or weight values) for both
+ predecessors and successors you have to generate two biadjacency matrices
+ where the rows of one of them are the columns of the other, and then add
+ one to the transpose of the other.
+
+ See Also
+ --------
+ to_numpy_matrix
+ adjacency_matrix
+
+ References
+ ----------
+ [1] http://en.wikipedia.org/wiki/Adjacency_matrix#Adjacency_matrix_of_a_bipartite_graph
+ """
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError('adjacency_matrix() requires numpy ',
+ 'http://scipy.org/')
+ if column_order is None:
+ column_order = list(set(G) - set(row_order))
+ row = dict(zip(row_order,count()))
+ col = dict(zip(column_order,count()))
+ M = np.zeros((len(row),len(col)), dtype=dtype)
+ for u in row_order:
+ for v, d in G[u].items():
+ M[row[u],col[v]] = d.get(weight, 1)
+ return np.asmatrix(M)
+
+def color(G):
+ """Returns a two-coloring of the graph.
+
+ Raises an exception if the graph is not bipartite.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ Returns
+ -------
+ color : dictionary
+ A dictionary keyed by node with a 1 or 0 as data for each node color.
+
+ Raises
+ ------
+ NetworkXError if the graph is not two-colorable.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.path_graph(4)
+ >>> c = bipartite.color(G)
+ >>> print(c)
+ {0: 1, 1: 0, 2: 1, 3: 0}
+
+ You can use this to set a node attribute indicating the biparite set:
+
+ >>> nx.set_node_attributes(G, 'bipartite', c)
+ >>> print(G.node[0]['bipartite'])
+ 1
+ >>> print(G.node[1]['bipartite'])
+ 0
+ """
+ if G.is_directed():
+ import itertools
+ def neighbors(v):
+ return itertools.chain.from_iterable([G.predecessors_iter(v),
+ G.successors_iter(v)])
+ else:
+ neighbors=G.neighbors_iter
+
+ color = {}
+ for n in G: # handle disconnected graphs
+ if n in color or len(G[n])==0: # skip isolates
+ continue
+ queue = [n]
+ color[n] = 1 # nodes seen with color (1 or 0)
+ while queue:
+ v = queue.pop()
+ c = 1 - color[v] # opposite color of node v
+ for w in neighbors(v):
+ if w in color:
+ if color[w] == color[v]:
+ raise nx.NetworkXError("Graph is not bipartite.")
+ else:
+ color[w] = c
+ queue.append(w)
+ # color isolates with 0
+ color.update(dict.fromkeys(nx.isolates(G),0))
+ return color
+
+def is_bipartite(G):
+ """ Returns True if graph G is bipartite, False if not.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.path_graph(4)
+ >>> print(bipartite.is_bipartite(G))
+ True
+
+ See Also
+ --------
+ color, is_bipartite_node_set
+ """
+ try:
+ color(G)
+ return True
+ except nx.NetworkXError:
+ return False
+
+def is_bipartite_node_set(G,nodes):
+ """Returns True if nodes and G/nodes are a bipartition of G.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ nodes: list or container
+ Check if nodes are a one of a bipartite set.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.path_graph(4)
+ >>> X = set([1,3])
+ >>> bipartite.is_bipartite_node_set(G,X)
+ True
+
+ Notes
+ -----
+ For connected graphs the bipartite sets are unique. This function handles
+ disconnected graphs.
+ """
+ S=set(nodes)
+ for CC in nx.connected_component_subgraphs(G):
+ X,Y=sets(CC)
+ if not ( (X.issubset(S) and Y.isdisjoint(S)) or
+ (Y.issubset(S) and X.isdisjoint(S)) ):
+ return False
+ return True
+
+
+def sets(G):
+ """Returns bipartite node sets of graph G.
+
+ Raises an exception if the graph is not bipartite.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ Returns
+ -------
+ (X,Y) : two-tuple of sets
+ One set of nodes for each part of the bipartite graph.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.path_graph(4)
+ >>> X, Y = bipartite.sets(G)
+ >>> list(X)
+ [0, 2]
+ >>> list(Y)
+ [1, 3]
+
+ See Also
+ --------
+ color
+ """
+ c = color(G)
+ X = set(n for n in c if c[n]) # c[n] == 1
+ Y = set(n for n in c if not c[n]) # c[n] == 0
+ return (X, Y)
+
+def density(B, nodes):
+ """Return density of bipartite graph B.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ nodes: list or container
+ Nodes in one set of the bipartite graph.
+
+ Returns
+ -------
+ d : float
+ The bipartite density
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.complete_bipartite_graph(3,2)
+ >>> X=set([0,1,2])
+ >>> bipartite.density(G,X)
+ 1.0
+ >>> Y=set([3,4])
+ >>> bipartite.density(G,Y)
+ 1.0
+
+ See Also
+ --------
+ color
+ """
+ n=len(B)
+ m=nx.number_of_edges(B)
+ nb=len(nodes)
+ nt=n-nb
+ if m==0: # includes cases n==0 and n==1
+ d=0.0
+ else:
+ if B.is_directed():
+ d=m/(2.0*float(nb*nt))
+ else:
+ d= m/float(nb*nt)
+ return d
+
+def degrees(B, nodes, weight=None):
+ """Return the degrees of the two node sets in the bipartite graph B.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ nodes: list or container
+ Nodes in one set of the bipartite graph.
+
+ weight : string or None, optional (default=None)
+ The edge attribute that holds the numerical value used as a weight.
+ If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ Returns
+ -------
+ (degX,degY) : tuple of dictionaries
+ The degrees of the two bipartite sets as dictionaries keyed by node.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.complete_bipartite_graph(3,2)
+ >>> Y=set([3,4])
+ >>> degX,degY=bipartite.degrees(G,Y)
+ >>> degX
+ {0: 2, 1: 2, 2: 2}
+
+ See Also
+ --------
+ color, density
+ """
+ bottom=set(nodes)
+ top=set(B)-bottom
+ return (B.degree(top,weight),B.degree(bottom,weight))
+
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import numpy
+ except:
+ raise SkipTest("NumPy not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/centrality.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/centrality.py
new file mode 100644
index 0000000..0b885a7
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/centrality.py
@@ -0,0 +1,266 @@
+#-*- coding: utf-8 -*-
+# Copyright (C) 2011 by
+# Jordi Torrents <jtorrents@milnou.net>
+# Aric Hagberg <hagberg@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+__author__ = """\n""".join(['Jordi Torrents <jtorrents@milnou.net>',
+ 'Aric Hagberg (hagberg@lanl.gov)'])
+__all__=['degree_centrality',
+ 'betweenness_centrality',
+ 'closeness_centrality']
+
+def degree_centrality(G, nodes):
+ r"""Compute the degree centrality for nodes in a bipartite network.
+
+ The degree centrality for a node `v` is the fraction of nodes
+ connected to it.
+
+ Parameters
+ ----------
+ G : graph
+ A bipartite network
+
+ nodes : list or container
+ Container with all nodes in one bipartite node set.
+
+ Returns
+ -------
+ centrality : dictionary
+ Dictionary keyed by node with bipartite degree centrality as the value.
+
+ See Also
+ --------
+ betweenness_centrality,
+ closeness_centrality,
+ sets,
+ is_bipartite
+
+ Notes
+ -----
+ The nodes input parameter must conatin all nodes in one bipartite node set,
+ but the dictionary returned contains all nodes from both bipartite node
+ sets.
+
+ For unipartite networks, the degree centrality values are
+ normalized by dividing by the maximum possible degree (which is
+ `n-1` where `n` is the number of nodes in G).
+
+ In the bipartite case, the maximum possible degree of a node in a
+ bipartite node set is the number of nodes in the opposite node set
+ [1]_. The degree centrality for a node `v` in the bipartite
+ sets `U` with `n` nodes and `V` with `m` nodes is
+
+ .. math::
+
+ d_{v} = \frac{deg(v)}{m}, \mbox{for} v \in U ,
+
+ d_{v} = \frac{deg(v)}{n}, \mbox{for} v \in V ,
+
+
+ where `deg(v)` is the degree of node `v`.
+
+ References
+ ----------
+ .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
+ Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
+ of Social Network Analysis. Sage Publications.
+ http://www.steveborgatti.com/papers/bhaffiliations.pdf
+ """
+ top = set(nodes)
+ bottom = set(G) - top
+ s = 1.0/len(bottom)
+ centrality = dict((n,d*s) for n,d in G.degree_iter(top))
+ s = 1.0/len(top)
+ centrality.update(dict((n,d*s) for n,d in G.degree_iter(bottom)))
+ return centrality
+
+
+def betweenness_centrality(G, nodes):
+ r"""Compute betweenness centrality for nodes in a bipartite network.
+
+ Betweenness centrality of a node `v` is the sum of the
+ fraction of all-pairs shortest paths that pass through `v`.
+
+ Values of betweenness are normalized by the maximum possible
+ value which for bipartite graphs is limited by the relative size
+ of the two node sets [1]_.
+
+ Let `n` be the number of nodes in the node set `U` and
+ `m` be the number of nodes in the node set `V`, then
+ nodes in `U` are normalized by dividing by
+
+ .. math::
+
+ \frac{1}{2} [m^2 (s + 1)^2 + m (s + 1)(2t - s - 1) - t (2s - t + 3)] ,
+
+ where
+
+ .. math::
+
+ s = (n - 1) \div m , t = (n - 1) \mod m ,
+
+ and nodes in `V` are normalized by dividing by
+
+ .. math::
+
+ \frac{1}{2} [n^2 (p + 1)^2 + n (p + 1)(2r - p - 1) - r (2p - r + 3)] ,
+
+ where,
+
+ .. math::
+
+ p = (m - 1) \div n , r = (m - 1) \mod n .
+
+ Parameters
+ ----------
+ G : graph
+ A bipartite graph
+
+ nodes : list or container
+ Container with all nodes in one bipartite node set.
+
+ Returns
+ -------
+ betweenness : dictionary
+ Dictionary keyed by node with bipartite betweenness centrality
+ as the value.
+
+ See Also
+ --------
+ degree_centrality,
+ closeness_centrality,
+ sets,
+ is_bipartite
+
+ Notes
+ -----
+ The nodes input parameter must contain all nodes in one bipartite node set,
+ but the dictionary returned contains all nodes from both node sets.
+
+ References
+ ----------
+ .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
+ Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
+ of Social Network Analysis. Sage Publications.
+ http://www.steveborgatti.com/papers/bhaffiliations.pdf
+ """
+ top = set(nodes)
+ bottom = set(G) - top
+ n = float(len(top))
+ m = float(len(bottom))
+ s = (n-1) // m
+ t = (n-1) % m
+ bet_max_top = (((m**2)*((s+1)**2))+
+ (m*(s+1)*(2*t-s-1))-
+ (t*((2*s)-t+3)))/2.0
+ p = (m-1) // n
+ r = (m-1) % n
+ bet_max_bot = (((n**2)*((p+1)**2))+
+ (n*(p+1)*(2*r-p-1))-
+ (r*((2*p)-r+3)))/2.0
+ betweenness = nx.betweenness_centrality(G, normalized=False,
+ weight=None)
+ for node in top:
+ betweenness[node]/=bet_max_top
+ for node in bottom:
+ betweenness[node]/=bet_max_bot
+ return betweenness
+
+def closeness_centrality(G, nodes, normalized=True):
+ r"""Compute the closeness centrality for nodes in a bipartite network.
+
+ The closeness of a node is the distance to all other nodes in the
+ graph or in the case that the graph is not connected to all other nodes
+ in the connected component containing that node.
+
+ Parameters
+ ----------
+ G : graph
+ A bipartite network
+
+ nodes : list or container
+ Container with all nodes in one bipartite node set.
+
+ normalized : bool, optional
+ If True (default) normalize by connected component size.
+
+ Returns
+ -------
+ closeness : dictionary
+ Dictionary keyed by node with bipartite closeness centrality
+ as the value.
+
+ See Also
+ --------
+ betweenness_centrality,
+ degree_centrality
+ sets,
+ is_bipartite
+
+ Notes
+ -----
+ The nodes input parameter must conatin all nodes in one bipartite node set,
+ but the dictionary returned contains all nodes from both node sets.
+
+ Closeness centrality is normalized by the minimum distance possible.
+ In the bipartite case the minimum distance for a node in one bipartite
+ node set is 1 from all nodes in the other node set and 2 from all
+ other nodes in its own set [1]_. Thus the closeness centrality
+ for node `v` in the two bipartite sets `U` with
+ `n` nodes and `V` with `m` nodes is
+
+ .. math::
+
+ c_{v} = \frac{m + 2(n - 1)}{d}, \mbox{for} v \in U,
+
+ c_{v} = \frac{n + 2(m - 1)}{d}, \mbox{for} v \in V,
+
+ where `d` is the sum of the distances from `v` to all
+ other nodes.
+
+ Higher values of closeness indicate higher centrality.
+
+ As in the unipartite case, setting normalized=True causes the
+ values to normalized further to n-1 / size(G)-1 where n is the
+ number of nodes in the connected part of graph containing the
+ node. If the graph is not completely connected, this algorithm
+ computes the closeness centrality for each connected part
+ separately.
+
+ References
+ ----------
+ .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
+ Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
+ of Social Network Analysis. Sage Publications.
+ http://www.steveborgatti.com/papers/bhaffiliations.pdf
+ """
+ closeness={}
+ path_length=nx.single_source_shortest_path_length
+ top = set(nodes)
+ bottom = set(G) - top
+ n = float(len(top))
+ m = float(len(bottom))
+ for node in top:
+ sp=path_length(G,node)
+ totsp=sum(sp.values())
+ if totsp > 0.0 and len(G) > 1:
+ closeness[node]= (m + 2*(n-1)) / totsp
+ if normalized:
+ s=(len(sp)-1.0) / ( len(G) - 1 )
+ closeness[node] *= s
+ else:
+ closeness[n]=0.0
+ for node in bottom:
+ sp=path_length(G,node)
+ totsp=sum(sp.values())
+ if totsp > 0.0 and len(G) > 1:
+ closeness[node]= (n + 2*(m-1)) / totsp
+ if normalized:
+ s=(len(sp)-1.0) / ( len(G) - 1 )
+ closeness[node] *= s
+ else:
+ closeness[n]=0.0
+ return closeness
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/cluster.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/cluster.py
new file mode 100644
index 0000000..8adf92d
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/cluster.py
@@ -0,0 +1,266 @@
+#-*- coding: utf-8 -*-
+# Copyright (C) 2011 by
+# Jordi Torrents <jtorrents@milnou.net>
+# Aric Hagberg <hagberg@lanl.gov>
+# All rights reserved.
+# BSD license.
+import itertools
+import networkx as nx
+__author__ = """\n""".join(['Jordi Torrents <jtorrents@milnou.net>',
+ 'Aric Hagberg (hagberg@lanl.gov)'])
+__all__ = [ 'clustering',
+ 'average_clustering',
+ 'latapy_clustering',
+ 'robins_alexander_clustering']
+
+# functions for computing clustering of pairs
+def cc_dot(nu,nv):
+ return float(len(nu & nv))/len(nu | nv)
+
+def cc_max(nu,nv):
+ return float(len(nu & nv))/max(len(nu),len(nv))
+
+def cc_min(nu,nv):
+ return float(len(nu & nv))/min(len(nu),len(nv))
+
+modes={'dot':cc_dot,
+ 'min':cc_min,
+ 'max':cc_max}
+
+def latapy_clustering(G, nodes=None, mode='dot'):
+ r"""Compute a bipartite clustering coefficient for nodes.
+
+ The bipartie clustering coefficient is a measure of local density
+ of connections defined as [1]_:
+
+ .. math::
+
+ c_u = \frac{\sum_{v \in N(N(v))} c_{uv} }{|N(N(u))|}
+
+ where `N(N(u))` are the second order neighbors of `u` in `G` excluding `u`,
+ and `c_{uv}` is the pairwise clustering coefficient between nodes
+ `u` and `v`.
+
+ The mode selects the function for `c_{uv}` which can be:
+
+ `dot`:
+
+ .. math::
+
+ c_{uv}=\frac{|N(u)\cap N(v)|}{|N(u) \cup N(v)|}
+
+ `min`:
+
+ .. math::
+
+ c_{uv}=\frac{|N(u)\cap N(v)|}{min(|N(u)|,|N(v)|)}
+
+ `max`:
+
+ .. math::
+
+ c_{uv}=\frac{|N(u)\cap N(v)|}{max(|N(u)|,|N(v)|)}
+
+
+ Parameters
+ ----------
+ G : graph
+ A bipartite graph
+
+ nodes : list or iterable (optional)
+ Compute bipartite clustering for these nodes. The default
+ is all nodes in G.
+
+ mode : string
+ The pariwise bipartite clustering method to be used in the computation.
+ It must be "dot", "max", or "min".
+
+ Returns
+ -------
+ clustering : dictionary
+ A dictionary keyed by node with the clustering coefficient value.
+
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.path_graph(4) # path graphs are bipartite
+ >>> c = bipartite.clustering(G)
+ >>> c[0]
+ 0.5
+ >>> c = bipartite.clustering(G,mode='min')
+ >>> c[0]
+ 1.0
+
+ See Also
+ --------
+ robins_alexander_clustering
+ square_clustering
+ average_clustering
+
+ References
+ ----------
+ .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).
+ Basic notions for the analysis of large two-mode networks.
+ Social Networks 30(1), 31--48.
+ """
+ if not nx.algorithms.bipartite.is_bipartite(G):
+ raise nx.NetworkXError("Graph is not bipartite")
+
+ try:
+ cc_func = modes[mode]
+ except KeyError:
+ raise nx.NetworkXError(\
+ "Mode for bipartite clustering must be: dot, min or max")
+
+ if nodes is None:
+ nodes = G
+ ccs = {}
+ for v in nodes:
+ cc = 0.0
+ nbrs2=set([u for nbr in G[v] for u in G[nbr]])-set([v])
+ for u in nbrs2:
+ cc += cc_func(set(G[u]),set(G[v]))
+ if cc > 0.0: # len(nbrs2)>0
+ cc /= len(nbrs2)
+ ccs[v] = cc
+ return ccs
+
+clustering = latapy_clustering
+
+def average_clustering(G, nodes=None, mode='dot'):
+ r"""Compute the average bipartite clustering coefficient.
+
+ A clustering coefficient for the whole graph is the average,
+
+ .. math::
+
+ C = \frac{1}{n}\sum_{v \in G} c_v,
+
+ where `n` is the number of nodes in `G`.
+
+ Similar measures for the two bipartite sets can be defined [1]_
+
+ .. math::
+
+ C_X = \frac{1}{|X|}\sum_{v \in X} c_v,
+
+ where `X` is a bipartite set of `G`.
+
+ Parameters
+ ----------
+ G : graph
+ a bipartite graph
+
+ nodes : list or iterable, optional
+ A container of nodes to use in computing the average.
+ The nodes should be either the entire graph (the default) or one of the
+ bipartite sets.
+
+ mode : string
+ The pariwise bipartite clustering method.
+ It must be "dot", "max", or "min"
+
+ Returns
+ -------
+ clustering : float
+ The average bipartite clustering for the given set of nodes or the
+ entire graph if no nodes are specified.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G=nx.star_graph(3) # star graphs are bipartite
+ >>> bipartite.average_clustering(G)
+ 0.75
+ >>> X,Y=bipartite.sets(G)
+ >>> bipartite.average_clustering(G,X)
+ 0.0
+ >>> bipartite.average_clustering(G,Y)
+ 1.0
+
+ See Also
+ --------
+ clustering
+
+ Notes
+ -----
+ The container of nodes passed to this function must contain all of the nodes
+ in one of the bipartite sets ("top" or "bottom") in order to compute
+ the correct average bipartite clustering coefficients.
+
+ References
+ ----------
+ .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).
+ Basic notions for the analysis of large two-mode networks.
+ Social Networks 30(1), 31--48.
+ """
+ if nodes is None:
+ nodes=G
+ ccs=latapy_clustering(G, nodes=nodes, mode=mode)
+ return float(sum(ccs[v] for v in nodes))/len(nodes)
+
+def robins_alexander_clustering(G):
+ r"""Compute the bipartite clustering of G.
+
+ Robins and Alexander [1]_ defined bipartite clustering coefficient as
+ four times the number of four cycles `C_4` divided by the number of
+ three paths `L_3` in a bipartite graph:
+
+ .. math::
+
+ CC_4 = \frac{4 * C_4}{L_3}
+
+ Parameters
+ ----------
+ G : graph
+ a bipartite graph
+
+ Returns
+ -------
+ clustering : float
+ The Robins and Alexander bipartite clustering for the input graph.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.davis_southern_women_graph()
+ >>> print(round(bipartite.robins_alexander_clustering(G), 3))
+ 0.468
+
+ See Also
+ --------
+ latapy_clustering
+ square_clustering
+
+ References
+ ----------
+ .. [1] Robins, G. and M. Alexander (2004). Small worlds among interlocking
+ directors: Network structure and distance in bipartite graphs.
+ Computational & Mathematical Organization Theory 10(1), 69–94.
+
+ """
+ if G.order() < 4 or G.size() < 3:
+ return 0
+ L_3 = _threepaths(G)
+ if L_3 == 0:
+ return 0
+ C_4 = _four_cycles(G)
+ return (4. * C_4) / L_3
+
+def _four_cycles(G):
+ cycles = 0
+ for v in G:
+ for u, w in itertools.combinations(G[v], 2):
+ cycles += len((set(G[u]) & set(G[w])) - set([v]))
+ return cycles / 4
+
+def _threepaths(G):
+ paths = 0
+ for v in G:
+ for u in G[v]:
+ for w in set(G[u]) - set([v]):
+ paths += len(set(G[w]) - set([v, u]))
+ # Divide by two because we count each three path twice
+ # one for each possible starting point
+ return paths / 2
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/projection.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/projection.py
new file mode 100644
index 0000000..7f08244
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/projection.py
@@ -0,0 +1,497 @@
+# -*- coding: utf-8 -*-
+"""One-mode (unipartite) projections of bipartite graphs.
+"""
+import networkx as nx
+# Copyright (C) 2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>',
+ 'Jordi Torrents <jtorrents@milnou.net>'])
+__all__ = ['project',
+ 'projected_graph',
+ 'weighted_projected_graph',
+ 'collaboration_weighted_projected_graph',
+ 'overlap_weighted_projected_graph',
+ 'generic_weighted_projected_graph']
+
+def projected_graph(B, nodes, multigraph=False):
+ r"""Returns the projection of B onto one of its node sets.
+
+ Returns the graph G that is the projection of the bipartite graph B
+ onto the specified nodes. They retain their attributes and are connected
+ in G if they have a common neighbor in B.
+
+ Parameters
+ ----------
+ B : NetworkX graph
+ The input graph should be bipartite.
+
+ nodes : list or iterable
+ Nodes to project onto (the "bottom" nodes).
+
+ multigraph: bool (default=False)
+ If True return a multigraph where the multiple edges represent multiple
+ shared neighbors. They edge key in the multigraph is assigned to the
+ label of the neighbor.
+
+ Returns
+ -------
+ Graph : NetworkX graph or multigraph
+ A graph that is the projection onto the given nodes.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> B = nx.path_graph(4)
+ >>> G = bipartite.projected_graph(B, [1,3])
+ >>> print(G.nodes())
+ [1, 3]
+ >>> print(G.edges())
+ [(1, 3)]
+
+ If nodes `a`, and `b` are connected through both nodes 1 and 2 then
+ building a multigraph results in two edges in the projection onto
+ [`a`,`b`]:
+
+ >>> B = nx.Graph()
+ >>> B.add_edges_from([('a', 1), ('b', 1), ('a', 2), ('b', 2)])
+ >>> G = bipartite.projected_graph(B, ['a', 'b'], multigraph=True)
+ >>> print([sorted((u,v)) for u,v in G.edges()])
+ [['a', 'b'], ['a', 'b']]
+
+ Notes
+ ------
+ No attempt is made to verify that the input graph B is bipartite.
+ Returns a simple graph that is the projection of the bipartite graph B
+ onto the set of nodes given in list nodes. If multigraph=True then
+ a multigraph is returned with an edge for every shared neighbor.
+
+ Directed graphs are allowed as input. The output will also then
+ be a directed graph with edges if there is a directed path between
+ the nodes.
+
+ The graph and node properties are (shallow) copied to the projected graph.
+
+ See Also
+ --------
+ is_bipartite,
+ is_bipartite_node_set,
+ sets,
+ weighted_projected_graph,
+ collaboration_weighted_projected_graph,
+ overlap_weighted_projected_graph,
+ generic_weighted_projected_graph
+ """
+ if B.is_multigraph():
+ raise nx.NetworkXError("not defined for multigraphs")
+ if B.is_directed():
+ directed=True
+ if multigraph:
+ G=nx.MultiDiGraph()
+ else:
+ G=nx.DiGraph()
+ else:
+ directed=False
+ if multigraph:
+ G=nx.MultiGraph()
+ else:
+ G=nx.Graph()
+ G.graph.update(B.graph)
+ G.add_nodes_from((n,B.node[n]) for n in nodes)
+ for u in nodes:
+ nbrs2=set((v for nbr in B[u] for v in B[nbr])) -set([u])
+ if multigraph:
+ for n in nbrs2:
+ if directed:
+ links=set(B[u]) & set(B.pred[n])
+ else:
+ links=set(B[u]) & set(B[n])
+ for l in links:
+ if not G.has_edge(u,n,l):
+ G.add_edge(u,n,key=l)
+ else:
+ G.add_edges_from((u,n) for n in nbrs2)
+ return G
+
+def weighted_projected_graph(B, nodes, ratio=False):
+ r"""Returns a weighted projection of B onto one of its node sets.
+
+ The weighted projected graph is the projection of the bipartite
+ network B onto the specified nodes with weights representing the
+ number of shared neighbors or the ratio between actual shared
+ neighbors and possible shared neighbors if ratio=True [1]_. The
+ nodes retain their attributes and are connected in the resulting graph
+ if they have an edge to a common node in the original graph.
+
+ Parameters
+ ----------
+ B : NetworkX graph
+ The input graph should be bipartite.
+
+ nodes : list or iterable
+ Nodes to project onto (the "bottom" nodes).
+
+ ratio: Bool (default=False)
+ If True, edge weight is the ratio between actual shared neighbors
+ and possible shared neighbors. If False, edges weight is the number
+ of shared neighbors.
+
+ Returns
+ -------
+ Graph : NetworkX graph
+ A graph that is the projection onto the given nodes.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> B = nx.path_graph(4)
+ >>> G = bipartite.weighted_projected_graph(B, [1,3])
+ >>> print(G.nodes())
+ [1, 3]
+ >>> print(G.edges(data=True))
+ [(1, 3, {'weight': 1})]
+ >>> G = bipartite.weighted_projected_graph(B, [1,3], ratio=True)
+ >>> print(G.edges(data=True))
+ [(1, 3, {'weight': 0.5})]
+
+ Notes
+ ------
+ No attempt is made to verify that the input graph B is bipartite.
+ The graph and node properties are (shallow) copied to the projected graph.
+
+ See Also
+ --------
+ is_bipartite,
+ is_bipartite_node_set,
+ sets,
+ collaboration_weighted_projected_graph,
+ overlap_weighted_projected_graph,
+ generic_weighted_projected_graph
+ projected_graph
+
+ References
+ ----------
+ .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
+ Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
+ of Social Network Analysis. Sage Publications.
+ """
+ if B.is_multigraph():
+ raise nx.NetworkXError("not defined for multigraphs")
+ if B.is_directed():
+ pred=B.pred
+ G=nx.DiGraph()
+ else:
+ pred=B.adj
+ G=nx.Graph()
+ G.graph.update(B.graph)
+ G.add_nodes_from((n,B.node[n]) for n in nodes)
+ n_top = float(len(B) - len(nodes))
+ for u in nodes:
+ unbrs = set(B[u])
+ nbrs2 = set((n for nbr in unbrs for n in B[nbr])) - set([u])
+ for v in nbrs2:
+ vnbrs = set(pred[v])
+ common = unbrs & vnbrs
+ if not ratio:
+ weight = len(common)
+ else:
+ weight = len(common) / n_top
+ G.add_edge(u,v,weight=weight)
+ return G
+
+def collaboration_weighted_projected_graph(B, nodes):
+ r"""Newman's weighted projection of B onto one of its node sets.
+
+ The collaboration weighted projection is the projection of the
+ bipartite network B onto the specified nodes with weights assigned
+ using Newman's collaboration model [1]_:
+
+ .. math::
+
+ w_{v,u} = \sum_k \frac{\delta_{v}^{w} \delta_{w}^{k}}{k_w - 1}
+
+ where `v` and `u` are nodes from the same bipartite node set,
+ and `w` is a node of the opposite node set.
+ The value `k_w` is the degree of node `w` in the bipartite
+ network and `\delta_{v}^{w}` is 1 if node `v` is
+ linked to node `w` in the original bipartite graph or 0 otherwise.
+
+ The nodes retain their attributes and are connected in the resulting
+ graph if have an edge to a common node in the original bipartite
+ graph.
+
+ Parameters
+ ----------
+ B : NetworkX graph
+ The input graph should be bipartite.
+
+ nodes : list or iterable
+ Nodes to project onto (the "bottom" nodes).
+
+ Returns
+ -------
+ Graph : NetworkX graph
+ A graph that is the projection onto the given nodes.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> B = nx.path_graph(5)
+ >>> B.add_edge(1,5)
+ >>> G = bipartite.collaboration_weighted_projected_graph(B, [0, 2, 4, 5])
+ >>> print(G.nodes())
+ [0, 2, 4, 5]
+ >>> for edge in G.edges(data=True): print(edge)
+ ...
+ (0, 2, {'weight': 0.5})
+ (0, 5, {'weight': 0.5})
+ (2, 4, {'weight': 1.0})
+ (2, 5, {'weight': 0.5})
+
+ Notes
+ ------
+ No attempt is made to verify that the input graph B is bipartite.
+ The graph and node properties are (shallow) copied to the projected graph.
+
+ See Also
+ --------
+ is_bipartite,
+ is_bipartite_node_set,
+ sets,
+ weighted_projected_graph,
+ overlap_weighted_projected_graph,
+ generic_weighted_projected_graph,
+ projected_graph
+
+ References
+ ----------
+ .. [1] Scientific collaboration networks: II.
+ Shortest paths, weighted networks, and centrality,
+ M. E. J. Newman, Phys. Rev. E 64, 016132 (2001).
+ """
+ if B.is_multigraph():
+ raise nx.NetworkXError("not defined for multigraphs")
+ if B.is_directed():
+ pred=B.pred
+ G=nx.DiGraph()
+ else:
+ pred=B.adj
+ G=nx.Graph()
+ G.graph.update(B.graph)
+ G.add_nodes_from((n,B.node[n]) for n in nodes)
+ for u in nodes:
+ unbrs = set(B[u])
+ nbrs2 = set((n for nbr in unbrs for n in B[nbr])) - set([u])
+ for v in nbrs2:
+ vnbrs = set(pred[v])
+ common = unbrs & vnbrs
+ weight = sum([1.0/(len(B[n]) - 1) for n in common if len(B[n])>1])
+ G.add_edge(u,v,weight=weight)
+ return G
+
+def overlap_weighted_projected_graph(B, nodes, jaccard=True):
+ r"""Overlap weighted projection of B onto one of its node sets.
+
+ The overlap weighted projection is the projection of the bipartite
+ network B onto the specified nodes with weights representing
+ the Jaccard index between the neighborhoods of the two nodes in the
+ original bipartite network [1]_:
+
+ .. math::
+
+ w_{v,u} = \frac{|N(u) \cap N(v)|}{|N(u) \cup N(v)|}
+
+ or if the parameter 'jaccard' is False, the fraction of common
+ neighbors by minimum of both nodes degree in the original
+ bipartite graph [1]_:
+
+ .. math::
+
+ w_{v,u} = \frac{|N(u) \cap N(v)|}{min(|N(u)|,|N(v)|)}
+
+ The nodes retain their attributes and are connected in the resulting
+ graph if have an edge to a common node in the original bipartite graph.
+
+ Parameters
+ ----------
+ B : NetworkX graph
+ The input graph should be bipartite.
+
+ nodes : list or iterable
+ Nodes to project onto (the "bottom" nodes).
+
+ jaccard: Bool (default=True)
+
+ Returns
+ -------
+ Graph : NetworkX graph
+ A graph that is the projection onto the given nodes.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> B = nx.path_graph(5)
+ >>> G = bipartite.overlap_weighted_projected_graph(B, [0, 2, 4])
+ >>> print(G.nodes())
+ [0, 2, 4]
+ >>> print(G.edges(data=True))
+ [(0, 2, {'weight': 0.5}), (2, 4, {'weight': 0.5})]
+ >>> G = bipartite.overlap_weighted_projected_graph(B, [0, 2, 4], jaccard=False)
+ >>> print(G.edges(data=True))
+ [(0, 2, {'weight': 1.0}), (2, 4, {'weight': 1.0})]
+
+ Notes
+ ------
+ No attempt is made to verify that the input graph B is bipartite.
+ The graph and node properties are (shallow) copied to the projected graph.
+
+ See Also
+ --------
+ is_bipartite,
+ is_bipartite_node_set,
+ sets,
+ weighted_projected_graph,
+ collaboration_weighted_projected_graph,
+ generic_weighted_projected_graph,
+ projected_graph
+
+ References
+ ----------
+ .. [1] Borgatti, S.P. and Halgin, D. In press. Analyzing Affiliation
+ Networks. In Carrington, P. and Scott, J. (eds) The Sage Handbook
+ of Social Network Analysis. Sage Publications.
+
+ """
+ if B.is_multigraph():
+ raise nx.NetworkXError("not defined for multigraphs")
+ if B.is_directed():
+ pred=B.pred
+ G=nx.DiGraph()
+ else:
+ pred=B.adj
+ G=nx.Graph()
+ G.graph.update(B.graph)
+ G.add_nodes_from((n,B.node[n]) for n in nodes)
+ for u in nodes:
+ unbrs = set(B[u])
+ nbrs2 = set((n for nbr in unbrs for n in B[nbr])) - set([u])
+ for v in nbrs2:
+ vnbrs = set(pred[v])
+ if jaccard:
+ weight = float(len(unbrs & vnbrs)) / len(unbrs | vnbrs)
+ else:
+ weight = float(len(unbrs & vnbrs)) / min(len(unbrs),len(vnbrs))
+ G.add_edge(u,v,weight=weight)
+ return G
+
+def generic_weighted_projected_graph(B, nodes, weight_function=None):
+ r"""Weighted projection of B with a user-specified weight function.
+
+ The bipartite network B is projected on to the specified nodes
+ with weights computed by a user-specified function. This function
+ must accept as a parameter the neighborhood sets of two nodes and
+ return an integer or a float.
+
+ The nodes retain their attributes and are connected in the resulting graph
+ if they have an edge to a common node in the original graph.
+
+ Parameters
+ ----------
+ B : NetworkX graph
+ The input graph should be bipartite.
+
+ nodes : list or iterable
+ Nodes to project onto (the "bottom" nodes).
+
+ weight_function: function
+ This function must accept as parameters the same input graph
+ that this function, and two nodes; and return an integer or a float.
+ The default function computes the number of shared neighbors.
+
+ Returns
+ -------
+ Graph : NetworkX graph
+ A graph that is the projection onto the given nodes.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> # Define some custom weight functions
+ >>> def jaccard(G, u, v):
+ ... unbrs = set(G[u])
+ ... vnbrs = set(G[v])
+ ... return float(len(unbrs & vnbrs)) / len(unbrs | vnbrs)
+ ...
+ >>> def my_weight(G, u, v, weight='weight'):
+ ... w = 0
+ ... for nbr in set(G[u]) & set(G[v]):
+ ... w += G.edge[u][nbr].get(weight, 1) + G.edge[v][nbr].get(weight, 1)
+ ... return w
+ ...
+ >>> # A complete bipartite graph with 4 nodes and 4 edges
+ >>> B = nx.complete_bipartite_graph(2,2)
+ >>> # Add some arbitrary weight to the edges
+ >>> for i,(u,v) in enumerate(B.edges()):
+ ... B.edge[u][v]['weight'] = i + 1
+ ...
+ >>> for edge in B.edges(data=True):
+ ... print(edge)
+ ...
+ (0, 2, {'weight': 1})
+ (0, 3, {'weight': 2})
+ (1, 2, {'weight': 3})
+ (1, 3, {'weight': 4})
+ >>> # Without specifying a function, the weight is equal to # shared partners
+ >>> G = bipartite.generic_weighted_projected_graph(B, [0, 1])
+ >>> print(G.edges(data=True))
+ [(0, 1, {'weight': 2})]
+ >>> # To specify a custom weight function use the weight_function parameter
+ >>> G = bipartite.generic_weighted_projected_graph(B, [0, 1], weight_function=jaccard)
+ >>> print(G.edges(data=True))
+ [(0, 1, {'weight': 1.0})]
+ >>> G = bipartite.generic_weighted_projected_graph(B, [0, 1], weight_function=my_weight)
+ >>> print(G.edges(data=True))
+ [(0, 1, {'weight': 10})]
+
+ Notes
+ ------
+ No attempt is made to verify that the input graph B is bipartite.
+ The graph and node properties are (shallow) copied to the projected graph.
+
+ See Also
+ --------
+ is_bipartite,
+ is_bipartite_node_set,
+ sets,
+ weighted_projected_graph,
+ collaboration_weighted_projected_graph,
+ overlap_weighted_projected_graph,
+ projected_graph
+
+ """
+ if B.is_multigraph():
+ raise nx.NetworkXError("not defined for multigraphs")
+ if B.is_directed():
+ pred=B.pred
+ G=nx.DiGraph()
+ else:
+ pred=B.adj
+ G=nx.Graph()
+ if weight_function is None:
+ def weight_function(G, u, v):
+ # Notice that we use set(pred[v]) for handling the directed case.
+ return len(set(G[u]) & set(pred[v]))
+ G.graph.update(B.graph)
+ G.add_nodes_from((n,B.node[n]) for n in nodes)
+ for u in nodes:
+ nbrs2 = set((n for nbr in set(B[u]) for n in B[nbr])) - set([u])
+ for v in nbrs2:
+ weight = weight_function(B, u, v)
+ G.add_edge(u,v,weight=weight)
+ return G
+
+def project(B, nodes, create_using=None):
+ return projected_graph(B, nodes)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/redundancy.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/redundancy.py
new file mode 100644
index 0000000..055fdcb
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/redundancy.py
@@ -0,0 +1,84 @@
+#-*- coding: utf-8 -*-
+"""Node redundancy for bipartite graphs."""
+# Copyright (C) 2011 by
+# Jordi Torrents <jtorrents@milnou.net>
+# Aric Hagberg <hagberg@lanl.gov>
+# All rights reserved.
+# BSD license.
+from itertools import combinations
+import networkx as nx
+
+__author__ = """\n""".join(['Jordi Torrents <jtorrents@milnou.net>',
+ 'Aric Hagberg (hagberg@lanl.gov)'])
+__all__ = ['node_redundancy']
+
+def node_redundancy(G, nodes=None):
+ r"""Compute bipartite node redundancy coefficient.
+
+ The redundancy coefficient of a node `v` is the fraction of pairs of
+ neighbors of `v` that are both linked to other nodes. In a one-mode
+ projection these nodes would be linked together even if `v` were
+ not there.
+
+ .. math::
+
+ rc(v) = \frac{|\{\{u,w\} \subseteq N(v),
+ \: \exists v' \neq v,\: (v',u) \in E\:
+ \mathrm{and}\: (v',w) \in E\}|}{ \frac{|N(v)|(|N(v)|-1)}{2}}
+
+ where `N(v)` are the neighbors of `v` in `G`.
+
+ Parameters
+ ----------
+ G : graph
+ A bipartite graph
+
+ nodes : list or iterable (optional)
+ Compute redundancy for these nodes. The default is all nodes in G.
+
+ Returns
+ -------
+ redundancy : dictionary
+ A dictionary keyed by node with the node redundancy value.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.cycle_graph(4)
+ >>> rc = bipartite.node_redundancy(G)
+ >>> rc[0]
+ 1.0
+
+ Compute the average redundancy for the graph:
+
+ >>> sum(rc.values())/len(G)
+ 1.0
+
+ Compute the average redundancy for a set of nodes:
+
+ >>> nodes = [0, 2]
+ >>> sum(rc[n] for n in nodes)/len(nodes)
+ 1.0
+
+ References
+ ----------
+ .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).
+ Basic notions for the analysis of large two-mode networks.
+ Social Networks 30(1), 31--48.
+ """
+ if nodes is None:
+ nodes = G
+ rc = {}
+ for v in nodes:
+ overlap = 0.0
+ for u, w in combinations(G[v], 2):
+ if len((set(G[u]) & set(G[w])) - set([v])) > 0:
+ overlap += 1
+ if overlap > 0:
+ n = len(G[v])
+ norm = 2.0/(n*(n-1))
+ else:
+ norm = 1.0
+ rc[v] = overlap*norm
+ return rc
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/spectral.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/spectral.py
new file mode 100644
index 0000000..d0ebdd4
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/spectral.py
@@ -0,0 +1,88 @@
+# -*- coding: utf-8 -*-
+"""
+Spectral bipartivity measure.
+"""
+import networkx as nx
+__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
+# Copyright (C) 2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+__all__ = ['spectral_bipartivity']
+
+def spectral_bipartivity(G, nodes=None, weight='weight'):
+ """Returns the spectral bipartivity.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ nodes : list or container optional(default is all nodes)
+ Nodes to return value of spectral bipartivity contribution.
+
+ weight : string or None optional (default = 'weight')
+ Edge data key to use for edge weights. If None, weights set to 1.
+
+ Returns
+ -------
+ sb : float or dict
+ A single number if the keyword nodes is not specified, or
+ a dictionary keyed by node with the spectral bipartivity contribution
+ of that node as the value.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.path_graph(4)
+ >>> bipartite.spectral_bipartivity(G)
+ 1.0
+
+ Notes
+ -----
+ This implementation uses Numpy (dense) matrices which are not efficient
+ for storing large sparse graphs.
+
+ See Also
+ --------
+ color
+
+ References
+ ----------
+ .. [1] E. Estrada and J. A. Rodríguez-Velázquez, "Spectral measures of
+ bipartivity in complex networks", PhysRev E 72, 046105 (2005)
+ """
+ try:
+ import scipy.linalg
+ except ImportError:
+ raise ImportError('spectral_bipartivity() requires SciPy: ',
+ 'http://scipy.org/')
+ nodelist = G.nodes() # ordering of nodes in matrix
+ A = nx.to_numpy_matrix(G, nodelist, weight=weight)
+ expA = scipy.linalg.expm(A)
+ expmA = scipy.linalg.expm(-A)
+ coshA = 0.5 * (expA + expmA)
+ if nodes is None:
+ # return single number for entire graph
+ return coshA.diagonal().sum() / expA.diagonal().sum()
+ else:
+ # contribution for individual nodes
+ index = dict(zip(nodelist, range(len(nodelist))))
+ sb = {}
+ for n in nodes:
+ i = index[n]
+ sb[n] = coshA[i, i] / expA[i, i]
+ return sb
+
+def setup_module(module):
+ """Fixture for nose tests."""
+ from nose import SkipTest
+ try:
+ import numpy
+ except:
+ raise SkipTest("NumPy not available")
+ try:
+ import scipy
+ except:
+ raise SkipTest("SciPy not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/tests/test_basic.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/tests/test_basic.py
new file mode 100644
index 0000000..5b22e9a
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/tests/test_basic.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+from nose.tools import *
+from nose import SkipTest
+from nose.plugins.attrib import attr
+import networkx as nx
+from networkx.algorithms import bipartite
+class TestBipartiteBasic:
+
+ def test_is_bipartite(self):
+ assert_true(bipartite.is_bipartite(nx.path_graph(4)))
+ assert_true(bipartite.is_bipartite(nx.DiGraph([(1,0)])))
+ assert_false(bipartite.is_bipartite(nx.complete_graph(3)))
+
+
+ def test_bipartite_color(self):
+ G=nx.path_graph(4)
+ c=bipartite.color(G)
+ assert_equal(c,{0: 1, 1: 0, 2: 1, 3: 0})
+
+ @raises(nx.NetworkXError)
+ def test_not_bipartite_color(self):
+ c=bipartite.color(nx.complete_graph(4))
+
+
+ def test_bipartite_directed(self):
+ G = nx.bipartite_random_graph(10, 10, 0.1, directed=True)
+ assert_true(bipartite.is_bipartite(G))
+
+ def test_bipartite_sets(self):
+ G=nx.path_graph(4)
+ X,Y=bipartite.sets(G)
+ assert_equal(X,set([0,2]))
+ assert_equal(Y,set([1,3]))
+
+ def test_is_bipartite_node_set(self):
+ G=nx.path_graph(4)
+ assert_true(bipartite.is_bipartite_node_set(G,[0,2]))
+ assert_true(bipartite.is_bipartite_node_set(G,[1,3]))
+ assert_false(bipartite.is_bipartite_node_set(G,[1,2]))
+ G.add_path([10,20])
+ assert_true(bipartite.is_bipartite_node_set(G,[0,2,10]))
+ assert_true(bipartite.is_bipartite_node_set(G,[0,2,20]))
+ assert_true(bipartite.is_bipartite_node_set(G,[1,3,10]))
+ assert_true(bipartite.is_bipartite_node_set(G,[1,3,20]))
+
+ def test_bipartite_density(self):
+ G=nx.path_graph(5)
+ X,Y=bipartite.sets(G)
+ density=float(len(G.edges()))/(len(X)*len(Y))
+ assert_equal(bipartite.density(G,X),density)
+ D = nx.DiGraph(G.edges())
+ assert_equal(bipartite.density(D,X),density/2.0)
+ assert_equal(bipartite.density(nx.Graph(),{}),0.0)
+
+ def test_bipartite_degrees(self):
+ G=nx.path_graph(5)
+ X=set([1,3])
+ Y=set([0,2,4])
+ u,d=bipartite.degrees(G,Y)
+ assert_equal(u,{1:2,3:2})
+ assert_equal(d,{0:1,2:2,4:1})
+
+ def test_bipartite_weighted_degrees(self):
+ G=nx.path_graph(5)
+ G.add_edge(0,1,weight=0.1,other=0.2)
+ X=set([1,3])
+ Y=set([0,2,4])
+ u,d=bipartite.degrees(G,Y,weight='weight')
+ assert_equal(u,{1:1.1,3:2})
+ assert_equal(d,{0:0.1,2:2,4:1})
+ u,d=bipartite.degrees(G,Y,weight='other')
+ assert_equal(u,{1:1.2,3:2})
+ assert_equal(d,{0:0.2,2:2,4:1})
+
+
+ @attr('numpy')
+ def test_biadjacency_matrix_weight(self):
+ try:
+ import numpy
+ except ImportError:
+ raise SkipTest('numpy not available.')
+ G=nx.path_graph(5)
+ G.add_edge(0,1,weight=2,other=4)
+ X=[1,3]
+ Y=[0,2,4]
+ M = bipartite.biadjacency_matrix(G,X,weight='weight')
+ assert_equal(M[0,0], 2)
+ M = bipartite.biadjacency_matrix(G, X, weight='other')
+ assert_equal(M[0,0], 4)
+
+ @attr('numpy')
+ def test_biadjacency_matrix(self):
+ try:
+ import numpy
+ except ImportError:
+ raise SkipTest('numpy not available.')
+ tops = [2,5,10]
+ bots = [5,10,15]
+ for i in range(len(tops)):
+ G = nx.bipartite_random_graph(tops[i], bots[i], 0.2)
+ top = [n for n,d in G.nodes(data=True) if d['bipartite']==0]
+ M = bipartite.biadjacency_matrix(G, top)
+ assert_equal(M.shape[0],tops[i])
+ assert_equal(M.shape[1],bots[i])
+
+ @attr('numpy')
+ def test_biadjacency_matrix_order(self):
+ try:
+ import numpy
+ except ImportError:
+ raise SkipTest('numpy not available.')
+ G=nx.path_graph(5)
+ G.add_edge(0,1,weight=2)
+ X=[3,1]
+ Y=[4,2,0]
+ M = bipartite.biadjacency_matrix(G,X,Y,weight='weight')
+ assert_equal(M[1,2], 2)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/tests/test_centrality.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/tests/test_centrality.py
new file mode 100644
index 0000000..992d643
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/tests/test_centrality.py
@@ -0,0 +1,169 @@
+from nose.tools import *
+import networkx as nx
+from networkx.algorithms import bipartite
+
+class TestBipartiteCentrality(object):
+
+ def setUp(self):
+ self.P4 = nx.path_graph(4)
+ self.K3 = nx.complete_bipartite_graph(3,3)
+ self.C4 = nx.cycle_graph(4)
+ self.davis = nx.davis_southern_women_graph()
+ self.top_nodes = [n for n,d in self.davis.nodes(data=True)
+ if d['bipartite']==0]
+
+ def test_degree_centrality(self):
+ d = bipartite.degree_centrality(self.P4, [1,3])
+ answer = {0: 0.5, 1: 1.0, 2: 1.0, 3: 0.5}
+ assert_equal(d, answer)
+ d = bipartite.degree_centrality(self.K3, [0,1,2])
+ answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0}
+ assert_equal(d, answer)
+ d = bipartite.degree_centrality(self.C4, [0,2])
+ answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0}
+ assert_equal(d,answer)
+
+ def test_betweenness_centrality(self):
+ c = bipartite.betweenness_centrality(self.P4, [1,3])
+ answer = {0: 0.0, 1: 1.0, 2: 1.0, 3: 0.0}
+ assert_equal(c, answer)
+ c = bipartite.betweenness_centrality(self.K3, [0,1,2])
+ answer = {0: 0.125, 1: 0.125, 2: 0.125, 3: 0.125, 4: 0.125, 5: 0.125}
+ assert_equal(c, answer)
+ c = bipartite.betweenness_centrality(self.C4, [0,2])
+ answer = {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25}
+ assert_equal(c, answer)
+
+ def test_closeness_centrality(self):
+ c = bipartite.closeness_centrality(self.P4, [1,3])
+ answer = {0: 2.0/3, 1: 1.0, 2: 1.0, 3:2.0/3}
+ assert_equal(c, answer)
+ c = bipartite.closeness_centrality(self.K3, [0,1,2])
+ answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0}
+ assert_equal(c, answer)
+ c = bipartite.closeness_centrality(self.C4, [0,2])
+ answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0}
+ assert_equal(c, answer)
+ G = nx.Graph()
+ G.add_node(0)
+ G.add_node(1)
+ c = bipartite.closeness_centrality(G, [0])
+ assert_equal(c, {1: 0.0})
+ c = bipartite.closeness_centrality(G, [1])
+ assert_equal(c, {1: 0.0})
+
+ def test_davis_degree_centrality(self):
+ G = self.davis
+ deg = bipartite.degree_centrality(G, self.top_nodes)
+ answer = {'E8':0.78,
+ 'E9':0.67,
+ 'E7':0.56,
+ 'Nora Fayette':0.57,
+ 'Evelyn Jefferson':0.57,
+ 'Theresa Anderson':0.57,
+ 'E6':0.44,
+ 'Sylvia Avondale':0.50,
+ 'Laura Mandeville':0.50,
+ 'Brenda Rogers':0.50,
+ 'Katherina Rogers':0.43,
+ 'E5':0.44,
+ 'Helen Lloyd':0.36,
+ 'E3':0.33,
+ 'Ruth DeSand':0.29,
+ 'Verne Sanderson':0.29,
+ 'E12':0.33,
+ 'Myra Liddel':0.29,
+ 'E11':0.22,
+ 'Eleanor Nye':0.29,
+ 'Frances Anderson':0.29,
+ 'Pearl Oglethorpe':0.21,
+ 'E4':0.22,
+ 'Charlotte McDowd':0.29,
+ 'E10':0.28,
+ 'Olivia Carleton':0.14,
+ 'Flora Price':0.14,
+ 'E2':0.17,
+ 'E1':0.17,
+ 'Dorothy Murchison':0.14,
+ 'E13':0.17,
+ 'E14':0.17}
+ for node, value in answer.items():
+ assert_almost_equal(value, deg[node], places=2)
+
+ def test_davis_betweenness_centrality(self):
+ G = self.davis
+ bet = bipartite.betweenness_centrality(G, self.top_nodes)
+ answer = {'E8':0.24,
+ 'E9':0.23,
+ 'E7':0.13,
+ 'Nora Fayette':0.11,
+ 'Evelyn Jefferson':0.10,
+ 'Theresa Anderson':0.09,
+ 'E6':0.07,
+ 'Sylvia Avondale':0.07,
+ 'Laura Mandeville':0.05,
+ 'Brenda Rogers':0.05,
+ 'Katherina Rogers':0.05,
+ 'E5':0.04,
+ 'Helen Lloyd':0.04,
+ 'E3':0.02,
+ 'Ruth DeSand':0.02,
+ 'Verne Sanderson':0.02,
+ 'E12':0.02,
+ 'Myra Liddel':0.02,
+ 'E11':0.02,
+ 'Eleanor Nye':0.01,
+ 'Frances Anderson':0.01,
+ 'Pearl Oglethorpe':0.01,
+ 'E4':0.01,
+ 'Charlotte McDowd':0.01,
+ 'E10':0.01,
+ 'Olivia Carleton':0.01,
+ 'Flora Price':0.01,
+ 'E2':0.00,
+ 'E1':0.00,
+ 'Dorothy Murchison':0.00,
+ 'E13':0.00,
+ 'E14':0.00}
+ for node, value in answer.items():
+ assert_almost_equal(value, bet[node], places=2)
+
+ def test_davis_closeness_centrality(self):
+ G = self.davis
+ clos = bipartite.closeness_centrality(G, self.top_nodes)
+ answer = {'E8':0.85,
+ 'E9':0.79,
+ 'E7':0.73,
+ 'Nora Fayette':0.80,
+ 'Evelyn Jefferson':0.80,
+ 'Theresa Anderson':0.80,
+ 'E6':0.69,
+ 'Sylvia Avondale':0.77,
+ 'Laura Mandeville':0.73,
+ 'Brenda Rogers':0.73,
+ 'Katherina Rogers':0.73,
+ 'E5':0.59,
+ 'Helen Lloyd':0.73,
+ 'E3':0.56,
+ 'Ruth DeSand':0.71,
+ 'Verne Sanderson':0.71,
+ 'E12':0.56,
+ 'Myra Liddel':0.69,
+ 'E11':0.54,
+ 'Eleanor Nye':0.67,
+ 'Frances Anderson':0.67,
+ 'Pearl Oglethorpe':0.67,
+ 'E4':0.54,
+ 'Charlotte McDowd':0.60,
+ 'E10':0.55,
+ 'Olivia Carleton':0.59,
+ 'Flora Price':0.59,
+ 'E2':0.52,
+ 'E1':0.52,
+ 'Dorothy Murchison':0.65,
+ 'E13':0.52,
+ 'E14':0.52}
+ for node, value in answer.items():
+ assert_almost_equal(value, clos[node], places=2)
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/tests/test_cluster.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/tests/test_cluster.py
new file mode 100644
index 0000000..aa158f9
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/tests/test_cluster.py
@@ -0,0 +1,70 @@
+import networkx as nx
+from nose.tools import *
+from networkx.algorithms.bipartite.cluster import cc_dot,cc_min,cc_max
+import networkx.algorithms.bipartite as bipartite
+
+def test_pairwise_bipartite_cc_functions():
+ # Test functions for different kinds of bipartite clustering coefficients
+ # between pairs of nodes using 3 example graphs from figure 5 p. 40
+ # Latapy et al (2008)
+ G1 = nx.Graph([(0,2),(0,3),(0,4),(0,5),(0,6),(1,5),(1,6),(1,7)])
+ G2 = nx.Graph([(0,2),(0,3),(0,4),(1,3),(1,4),(1,5)])
+ G3 = nx.Graph([(0,2),(0,3),(0,4),(0,5),(0,6),(1,5),(1,6),(1,7),(1,8),(1,9)])
+ result = {0:[1/3.0, 2/3.0, 2/5.0],
+ 1:[1/2.0, 2/3.0, 2/3.0],
+ 2:[2/8.0, 2/5.0, 2/5.0]}
+ for i, G in enumerate([G1, G2, G3]):
+ assert(bipartite.is_bipartite(G))
+ assert(cc_dot(set(G[0]), set(G[1])) == result[i][0])
+ assert(cc_min(set(G[0]), set(G[1])) == result[i][1])
+ assert(cc_max(set(G[0]), set(G[1])) == result[i][2])
+
+def test_star_graph():
+ G=nx.star_graph(3)
+ # all modes are the same
+ answer={0:0,1:1,2:1,3:1}
+ assert_equal(bipartite.clustering(G,mode='dot'),answer)
+ assert_equal(bipartite.clustering(G,mode='min'),answer)
+ assert_equal(bipartite.clustering(G,mode='max'),answer)
+
+@raises(nx.NetworkXError)
+def test_not_bipartite():
+ bipartite.clustering(nx.complete_graph(4))
+
+@raises(nx.NetworkXError)
+def test_bad_mode():
+ bipartite.clustering(nx.path_graph(4),mode='foo')
+
+def test_path_graph():
+ G=nx.path_graph(4)
+ answer={0:0.5,1:0.5,2:0.5,3:0.5}
+ assert_equal(bipartite.clustering(G,mode='dot'),answer)
+ assert_equal(bipartite.clustering(G,mode='max'),answer)
+ answer={0:1,1:1,2:1,3:1}
+ assert_equal(bipartite.clustering(G,mode='min'),answer)
+
+def test_average_path_graph():
+ G=nx.path_graph(4)
+ assert_equal(bipartite.average_clustering(G,mode='dot'),0.5)
+ assert_equal(bipartite.average_clustering(G,mode='max'),0.5)
+ assert_equal(bipartite.average_clustering(G,mode='min'),1)
+
+def test_ra_clustering_davis():
+ G = nx.davis_southern_women_graph()
+ cc4 = round(bipartite.robins_alexander_clustering(G), 3)
+ assert_equal(cc4, 0.468)
+
+def test_ra_clustering_square():
+ G = nx.path_graph(4)
+ G.add_edge(0, 3)
+ assert_equal(bipartite.robins_alexander_clustering(G), 1.0)
+
+def test_ra_clustering_zero():
+ G = nx.Graph()
+ assert_equal(bipartite.robins_alexander_clustering(G), 0)
+ G.add_nodes_from(range(4))
+ assert_equal(bipartite.robins_alexander_clustering(G), 0)
+ G.add_edges_from([(0,1),(2,3),(3,4)])
+ assert_equal(bipartite.robins_alexander_clustering(G), 0)
+ G.add_edge(1,2)
+ assert_equal(bipartite.robins_alexander_clustering(G), 0)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/tests/test_project.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/tests/test_project.py
new file mode 100644
index 0000000..52a93b2
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/tests/test_project.py
@@ -0,0 +1,363 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+from networkx.algorithms import bipartite
+from networkx.testing import *
+
+class TestBipartiteProject:
+
+ def test_path_projected_graph(self):
+ G=nx.path_graph(4)
+ P=bipartite.projected_graph(G,[1,3])
+ assert_equal(sorted(P.nodes()),[1,3])
+ assert_equal(sorted(P.edges()),[(1,3)])
+ P=bipartite.projected_graph(G,[0,2])
+ assert_equal(sorted(P.nodes()),[0,2])
+ assert_equal(sorted(P.edges()),[(0,2)])
+
+ def test_path_projected_properties_graph(self):
+ G=nx.path_graph(4)
+ G.add_node(1,name='one')
+ G.add_node(2,name='two')
+ P=bipartite.projected_graph(G,[1,3])
+ assert_equal(sorted(P.nodes()),[1,3])
+ assert_equal(sorted(P.edges()),[(1,3)])
+ assert_equal(P.node[1]['name'],G.node[1]['name'])
+ P=bipartite.projected_graph(G,[0,2])
+ assert_equal(sorted(P.nodes()),[0,2])
+ assert_equal(sorted(P.edges()),[(0,2)])
+ assert_equal(P.node[2]['name'],G.node[2]['name'])
+
+ def test_path_collaboration_projected_graph(self):
+ G=nx.path_graph(4)
+ P=bipartite.collaboration_weighted_projected_graph(G,[1,3])
+ assert_equal(sorted(P.nodes()),[1,3])
+ assert_equal(sorted(P.edges()),[(1,3)])
+ P[1][3]['weight']=1
+ P=bipartite.collaboration_weighted_projected_graph(G,[0,2])
+ assert_equal(sorted(P.nodes()),[0,2])
+ assert_equal(sorted(P.edges()),[(0,2)])
+ P[0][2]['weight']=1
+
+ def test_directed_path_collaboration_projected_graph(self):
+ G=nx.DiGraph()
+ G.add_path(list(range(4)))
+ P=bipartite.collaboration_weighted_projected_graph(G,[1,3])
+ assert_equal(sorted(P.nodes()),[1,3])
+ assert_equal(sorted(P.edges()),[(1,3)])
+ P[1][3]['weight']=1
+ P=bipartite.collaboration_weighted_projected_graph(G,[0,2])
+ assert_equal(sorted(P.nodes()),[0,2])
+ assert_equal(sorted(P.edges()),[(0,2)])
+ P[0][2]['weight']=1
+
+ def test_path_weighted_projected_graph(self):
+ G=nx.path_graph(4)
+ P=bipartite.weighted_projected_graph(G,[1,3])
+ assert_equal(sorted(P.nodes()),[1,3])
+ assert_equal(sorted(P.edges()),[(1,3)])
+ P[1][3]['weight']=1
+ P=bipartite.weighted_projected_graph(G,[0,2])
+ assert_equal(sorted(P.nodes()),[0,2])
+ assert_equal(sorted(P.edges()),[(0,2)])
+ P[0][2]['weight']=1
+
+ def test_path_weighted_projected_directed_graph(self):
+ G=nx.DiGraph()
+ G.add_path(list(range(4)))
+ P=bipartite.weighted_projected_graph(G,[1,3])
+ assert_equal(sorted(P.nodes()),[1,3])
+ assert_equal(sorted(P.edges()),[(1,3)])
+ P[1][3]['weight']=1
+ P=bipartite.weighted_projected_graph(G,[0,2])
+ assert_equal(sorted(P.nodes()),[0,2])
+ assert_equal(sorted(P.edges()),[(0,2)])
+ P[0][2]['weight']=1
+
+
+ def test_star_projected_graph(self):
+ G=nx.star_graph(3)
+ P=bipartite.projected_graph(G,[1,2,3])
+ assert_equal(sorted(P.nodes()),[1,2,3])
+ assert_equal(sorted(P.edges()),[(1,2),(1,3),(2,3)])
+ P=bipartite.weighted_projected_graph(G,[1,2,3])
+ assert_equal(sorted(P.nodes()),[1,2,3])
+ assert_equal(sorted(P.edges()),[(1,2),(1,3),(2,3)])
+
+ P=bipartite.projected_graph(G,[0])
+ assert_equal(sorted(P.nodes()),[0])
+ assert_equal(sorted(P.edges()),[])
+
+ def test_project_multigraph(self):
+ G=nx.Graph()
+ G.add_edge('a',1)
+ G.add_edge('b',1)
+ G.add_edge('a',2)
+ G.add_edge('b',2)
+ P=bipartite.projected_graph(G,'ab')
+ assert_edges_equal(P.edges(),[('a','b')])
+ P=bipartite.weighted_projected_graph(G,'ab')
+ assert_edges_equal(P.edges(),[('a','b')])
+ P=bipartite.projected_graph(G,'ab',multigraph=True)
+ assert_edges_equal(P.edges(),[('a','b'),('a','b')])
+
+ def test_project_collaboration(self):
+ G=nx.Graph()
+ G.add_edge('a',1)
+ G.add_edge('b',1)
+ G.add_edge('b',2)
+ G.add_edge('c',2)
+ G.add_edge('c',3)
+ G.add_edge('c',4)
+ G.add_edge('b',4)
+ P=bipartite.collaboration_weighted_projected_graph(G,'abc')
+ assert_equal(P['a']['b']['weight'],1)
+ assert_equal(P['b']['c']['weight'],2)
+
+ def test_directed_projection(self):
+ G=nx.DiGraph()
+ G.add_edge('A',1)
+ G.add_edge(1,'B')
+ G.add_edge('A',2)
+ G.add_edge('B',2)
+ P=bipartite.projected_graph(G,'AB')
+ assert_equal(sorted(P.edges()),[('A','B')])
+ P=bipartite.weighted_projected_graph(G,'AB')
+ assert_equal(sorted(P.edges()),[('A','B')])
+ assert_equal(P['A']['B']['weight'],1)
+
+ P=bipartite.projected_graph(G,'AB',multigraph=True)
+ assert_equal(sorted(P.edges()),[('A','B')])
+
+ G=nx.DiGraph()
+ G.add_edge('A',1)
+ G.add_edge(1,'B')
+ G.add_edge('A',2)
+ G.add_edge(2,'B')
+ P=bipartite.projected_graph(G,'AB')
+ assert_equal(sorted(P.edges()),[('A','B')])
+ P=bipartite.weighted_projected_graph(G,'AB')
+ assert_equal(sorted(P.edges()),[('A','B')])
+ assert_equal(P['A']['B']['weight'],2)
+
+ P=bipartite.projected_graph(G,'AB',multigraph=True)
+ assert_equal(sorted(P.edges()),[('A','B'),('A','B')])
+
+
+class TestBipartiteWeightedProjection:
+
+ def setUp(self):
+ # Tore Opsahl's example
+ # http://toreopsahl.com/2009/05/01/projecting-two-mode-networks-onto-weighted-one-mode-networks/
+ self.G=nx.Graph()
+ self.G.add_edge('A',1)
+ self.G.add_edge('A',2)
+ self.G.add_edge('B',1)
+ self.G.add_edge('B',2)
+ self.G.add_edge('B',3)
+ self.G.add_edge('B',4)
+ self.G.add_edge('B',5)
+ self.G.add_edge('C',1)
+ self.G.add_edge('D',3)
+ self.G.add_edge('E',4)
+ self.G.add_edge('E',5)
+ self.G.add_edge('E',6)
+ self.G.add_edge('F',6)
+ # Graph based on figure 6 from Newman (2001)
+ self.N=nx.Graph()
+ self.N.add_edge('A',1)
+ self.N.add_edge('A',2)
+ self.N.add_edge('A',3)
+ self.N.add_edge('B',1)
+ self.N.add_edge('B',2)
+ self.N.add_edge('B',3)
+ self.N.add_edge('C',1)
+ self.N.add_edge('D',1)
+ self.N.add_edge('E',3)
+
+ def test_project_weighted_shared(self):
+ edges=[('A','B',2),
+ ('A','C',1),
+ ('B','C',1),
+ ('B','D',1),
+ ('B','E',2),
+ ('E','F',1)]
+ Panswer=nx.Graph()
+ Panswer.add_weighted_edges_from(edges)
+ P=bipartite.weighted_projected_graph(self.G,'ABCDEF')
+ assert_equal(P.edges(),Panswer.edges())
+ for u,v in P.edges():
+ assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
+
+ edges=[('A','B',3),
+ ('A','E',1),
+ ('A','C',1),
+ ('A','D',1),
+ ('B','E',1),
+ ('B','C',1),
+ ('B','D',1),
+ ('C','D',1)]
+ Panswer=nx.Graph()
+ Panswer.add_weighted_edges_from(edges)
+ P=bipartite.weighted_projected_graph(self.N,'ABCDE')
+ assert_equal(P.edges(),Panswer.edges())
+ for u,v in P.edges():
+ assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
+
+ def test_project_weighted_newman(self):
+ edges=[('A','B',1.5),
+ ('A','C',0.5),
+ ('B','C',0.5),
+ ('B','D',1),
+ ('B','E',2),
+ ('E','F',1)]
+ Panswer=nx.Graph()
+ Panswer.add_weighted_edges_from(edges)
+ P=bipartite.collaboration_weighted_projected_graph(self.G,'ABCDEF')
+ assert_equal(P.edges(),Panswer.edges())
+ for u,v in P.edges():
+ assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
+
+ edges=[('A','B',11/6.0),
+ ('A','E',1/2.0),
+ ('A','C',1/3.0),
+ ('A','D',1/3.0),
+ ('B','E',1/2.0),
+ ('B','C',1/3.0),
+ ('B','D',1/3.0),
+ ('C','D',1/3.0)]
+ Panswer=nx.Graph()
+ Panswer.add_weighted_edges_from(edges)
+ P=bipartite.collaboration_weighted_projected_graph(self.N,'ABCDE')
+ assert_equal(P.edges(),Panswer.edges())
+ for u,v in P.edges():
+ assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
+
+ def test_project_weighted_ratio(self):
+ edges=[('A','B',2/6.0),
+ ('A','C',1/6.0),
+ ('B','C',1/6.0),
+ ('B','D',1/6.0),
+ ('B','E',2/6.0),
+ ('E','F',1/6.0)]
+ Panswer=nx.Graph()
+ Panswer.add_weighted_edges_from(edges)
+ P=bipartite.weighted_projected_graph(self.G, 'ABCDEF', ratio=True)
+ assert_equal(P.edges(),Panswer.edges())
+ for u,v in P.edges():
+ assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
+
+ edges=[('A','B',3/3.0),
+ ('A','E',1/3.0),
+ ('A','C',1/3.0),
+ ('A','D',1/3.0),
+ ('B','E',1/3.0),
+ ('B','C',1/3.0),
+ ('B','D',1/3.0),
+ ('C','D',1/3.0)]
+ Panswer=nx.Graph()
+ Panswer.add_weighted_edges_from(edges)
+ P=bipartite.weighted_projected_graph(self.N, 'ABCDE', ratio=True)
+ assert_equal(P.edges(),Panswer.edges())
+ for u,v in P.edges():
+ assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
+
+ def test_project_weighted_overlap(self):
+ edges=[('A','B',2/2.0),
+ ('A','C',1/1.0),
+ ('B','C',1/1.0),
+ ('B','D',1/1.0),
+ ('B','E',2/3.0),
+ ('E','F',1/1.0)]
+ Panswer=nx.Graph()
+ Panswer.add_weighted_edges_from(edges)
+ P=bipartite.overlap_weighted_projected_graph(self.G,'ABCDEF', jaccard=False)
+ assert_equal(P.edges(),Panswer.edges())
+ for u,v in P.edges():
+ assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
+
+ edges=[('A','B',3/3.0),
+ ('A','E',1/1.0),
+ ('A','C',1/1.0),
+ ('A','D',1/1.0),
+ ('B','E',1/1.0),
+ ('B','C',1/1.0),
+ ('B','D',1/1.0),
+ ('C','D',1/1.0)]
+ Panswer=nx.Graph()
+ Panswer.add_weighted_edges_from(edges)
+ P=bipartite.overlap_weighted_projected_graph(self.N,'ABCDE', jaccard=False)
+ assert_equal(P.edges(),Panswer.edges())
+ for u,v in P.edges():
+ assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
+
+ def test_project_weighted_jaccard(self):
+ edges=[('A','B',2/5.0),
+ ('A','C',1/2.0),
+ ('B','C',1/5.0),
+ ('B','D',1/5.0),
+ ('B','E',2/6.0),
+ ('E','F',1/3.0)]
+ Panswer=nx.Graph()
+ Panswer.add_weighted_edges_from(edges)
+ P=bipartite.overlap_weighted_projected_graph(self.G,'ABCDEF')
+ assert_equal(P.edges(),Panswer.edges())
+ for u,v in P.edges():
+ assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
+
+ edges=[('A','B',3/3.0),
+ ('A','E',1/3.0),
+ ('A','C',1/3.0),
+ ('A','D',1/3.0),
+ ('B','E',1/3.0),
+ ('B','C',1/3.0),
+ ('B','D',1/3.0),
+ ('C','D',1/1.0)]
+ Panswer=nx.Graph()
+ Panswer.add_weighted_edges_from(edges)
+ P=bipartite.overlap_weighted_projected_graph(self.N,'ABCDE')
+ assert_equal(P.edges(),Panswer.edges())
+ for u,v in P.edges():
+ assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
+
+ def test_generic_weighted_projected_graph_simple(self):
+ def shared(G, u, v):
+ return len(set(G[u]) & set(G[v]))
+ B = nx.path_graph(5)
+ G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4], weight_function=shared)
+ assert_equal(sorted(G.nodes()), [0, 2, 4])
+ assert_equal(G.edges(data=True),
+ [(0, 2, {'weight': 1}), (2, 4, {'weight': 1})] )
+
+ G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4])
+ assert_equal(sorted(G.nodes()), [0, 2, 4])
+ assert_equal(G.edges(data=True),
+ [(0, 2, {'weight': 1}), (2, 4, {'weight': 1})] )
+ B = nx.DiGraph()
+ B.add_path(list(range(5)))
+ G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4])
+ assert_equal(sorted(G.nodes()), [0, 2, 4])
+ assert_equal(G.edges(data=True),
+ [(0, 2, {'weight': 1}), (2, 4, {'weight': 1})] )
+
+ def test_generic_weighted_projected_graph_custom(self):
+ def jaccard(G, u, v):
+ unbrs = set(G[u])
+ vnbrs = set(G[v])
+ return float(len(unbrs & vnbrs)) / len(unbrs | vnbrs)
+ def my_weight(G, u, v, weight='weight'):
+ w = 0
+ for nbr in set(G[u]) & set(G[v]):
+ w += G.edge[u][nbr].get(weight, 1) + G.edge[v][nbr].get(weight, 1)
+ return w
+ B = nx.complete_bipartite_graph(2,2)
+ for i,(u,v) in enumerate(B.edges()):
+ B.edge[u][v]['weight'] = i + 1
+ G = bipartite.generic_weighted_projected_graph(B, [0, 1],
+ weight_function=jaccard)
+ assert_equal(G.edges(data=True), [(0, 1, {'weight': 1.0})])
+ G = bipartite.generic_weighted_projected_graph(B, [0, 1],
+ weight_function=my_weight)
+ assert_equal(G.edges(data=True), [(0, 1, {'weight': 10})])
+ G = bipartite.generic_weighted_projected_graph(B, [0, 1])
+ assert_equal(G.edges(data=True), [(0, 1, {'weight': 2})])
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py
new file mode 100644
index 0000000..e244a42
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+from nose import SkipTest
+from nose.tools import *
+import networkx as nx
+from networkx.algorithms.bipartite import spectral_bipartivity as sb
+
+# Examples from Figure 1
+# E. Estrada and J. A. Rodríguez-Velázquez, "Spectral measures of
+# bipartivity in complex networks", PhysRev E 72, 046105 (2005)
+
+class TestSpectralBipartivity(object):
+ @classmethod
+ def setupClass(cls):
+ global scipy
+ global assert_equal
+ global assert_almost_equal
+ try:
+ import scipy.linalg
+ except ImportError:
+ raise SkipTest('SciPy not available.')
+
+
+ def test_star_like(self):
+ # star-like
+
+ G=nx.star_graph(2)
+ G.add_edge(1,2)
+ assert_almost_equal(sb(G),0.843,places=3)
+
+ G=nx.star_graph(3)
+ G.add_edge(1,2)
+ assert_almost_equal(sb(G),0.871,places=3)
+
+ G=nx.star_graph(4)
+ G.add_edge(1,2)
+ assert_almost_equal(sb(G),0.890,places=3)
+
+
+ def k23_like(self):
+ # K2,3-like
+ G=nx.complete_bipartite_graph(2,3)
+ G.add_edge(0,1)
+ assert_almost_equal(sb(G),0.769,places=3)
+
+ G=nx.complete_bipartite_graph(2,3)
+ G.add_edge(2,4)
+ assert_almost_equal(sb(G),0.829,places=3)
+
+ G=nx.complete_bipartite_graph(2,3)
+ G.add_edge(2,4)
+ G.add_edge(3,4)
+ assert_almost_equal(sb(G),0.731,places=3)
+
+
+ G=nx.complete_bipartite_graph(2,3)
+ G.add_edge(0,1)
+ G.add_edge(2,4)
+ assert_almost_equal(sb(G),0.692,places=3)
+
+ G=nx.complete_bipartite_graph(2,3)
+ G.add_edge(2,4)
+ G.add_edge(3,4)
+ G.add_edge(0,1)
+ assert_almost_equal(sb(G),0.645,places=3)
+
+ G=nx.complete_bipartite_graph(2,3)
+ G.add_edge(2,4)
+ G.add_edge(3,4)
+ G.add_edge(2,3)
+ assert_almost_equal(sb(G),0.645,places=3)
+
+ G=nx.complete_bipartite_graph(2,3)
+ G.add_edge(2,4)
+ G.add_edge(3,4)
+ G.add_edge(2,3)
+ G.add_edge(0,1)
+ assert_almost_equal(sb(G),0.597,places=3)
+
+ def test_single_nodes(self):
+
+ # single nodes
+ G=nx.complete_bipartite_graph(2,3)
+ G.add_edge(2,4)
+ sbn=sb(G,nodes=[1,2])
+ assert_almost_equal(sbn[1],0.85,places=2)
+ assert_almost_equal(sbn[2],0.77,places=2)
+
+ G=nx.complete_bipartite_graph(2,3)
+ G.add_edge(0,1)
+ sbn=sb(G,nodes=[1,2])
+ assert_almost_equal(sbn[1],0.73,places=2)
+ assert_almost_equal(sbn[2],0.82,places=2)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/block.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/block.py
new file mode 100644
index 0000000..cc2ff1d
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/block.py
@@ -0,0 +1,115 @@
+# encoding: utf-8
+"""
+Functions for creating network blockmodels from node partitions.
+
+Created by Drew Conway <drew.conway@nyu.edu>
+Copyright (c) 2010. All rights reserved.
+"""
+__author__ = """\n""".join(['Drew Conway <drew.conway@nyu.edu>',
+ 'Aric Hagberg <hagberg@lanl.gov>'])
+__all__=['blockmodel']
+
+import networkx as nx
+
+def blockmodel(G,partitions,multigraph=False):
+ """Returns a reduced graph constructed using the generalized block modeling
+ technique.
+
+ The blockmodel technique collapses nodes into blocks based on a
+ given partitioning of the node set. Each partition of nodes
+ (block) is represented as a single node in the reduced graph.
+
+ Edges between nodes in the block graph are added according to the
+ edges in the original graph. If the parameter multigraph is False
+ (the default) a single edge is added with a weight equal to the
+ sum of the edge weights between nodes in the original graph
+ The default is a weight of 1 if weights are not specified. If the
+ parameter multigraph is True then multiple edges are added each
+ with the edge data from the original graph.
+
+ Parameters
+ ----------
+ G : graph
+ A networkx Graph or DiGraph
+ partitions : list of lists, or list of sets
+ The partition of the nodes. Must be non-overlapping.
+ multigraph : bool, optional
+ If True return a MultiGraph with the edge data of the original
+ graph applied to each corresponding edge in the new graph.
+ If False return a Graph with the sum of the edge weights, or a
+ count of the edges if the original graph is unweighted.
+
+ Returns
+ -------
+ blockmodel : a Networkx graph object
+
+ Examples
+ --------
+ >>> G=nx.path_graph(6)
+ >>> partition=[[0,1],[2,3],[4,5]]
+ >>> M=nx.blockmodel(G,partition)
+
+ References
+ ----------
+ .. [1] Patrick Doreian, Vladimir Batagelj, and Anuska Ferligoj
+ "Generalized Blockmodeling",Cambridge University Press, 2004.
+ """
+ # Create sets of node partitions
+ part=list(map(set,partitions))
+
+ # Check for overlapping node partitions
+ u=set()
+ for p1,p2 in zip(part[:-1],part[1:]):
+ u.update(p1)
+ #if not u.isdisjoint(p2): # Python 2.6 required
+ if len (u.intersection(p2))>0:
+ raise nx.NetworkXException("Overlapping node partitions.")
+
+ # Initialize blockmodel graph
+ if multigraph:
+ if G.is_directed():
+ M=nx.MultiDiGraph()
+ else:
+ M=nx.MultiGraph()
+ else:
+ if G.is_directed():
+ M=nx.DiGraph()
+ else:
+ M=nx.Graph()
+
+ # Add nodes and properties to blockmodel
+ # The blockmodel nodes are node-induced subgraphs of G
+ # Label them with integers starting at 0
+ for i,p in zip(range(len(part)),part):
+ M.add_node(i)
+ # The node-induced subgraph is stored as the node 'graph' attribute
+ SG=G.subgraph(p)
+ M.node[i]['graph']=SG
+ M.node[i]['nnodes']=SG.number_of_nodes()
+ M.node[i]['nedges']=SG.number_of_edges()
+ M.node[i]['density']=nx.density(SG)
+
+ # Create mapping between original node labels and new blockmodel node labels
+ block_mapping={}
+ for n in M:
+ nodes_in_block=M.node[n]['graph'].nodes()
+ block_mapping.update(dict.fromkeys(nodes_in_block,n))
+
+ # Add edges to block graph
+ for u,v,d in G.edges(data=True):
+ bmu=block_mapping[u]
+ bmv=block_mapping[v]
+ if bmu==bmv: # no self loops
+ continue
+ if multigraph:
+ # For multigraphs add an edge for each edge in original graph
+ M.add_edge(bmu,bmv,attr_dict=d)
+ else:
+ # For graphs and digraphs add single weighted edge
+ weight=d.get('weight',1.0) # default to 1 if no weight specified
+ if M.has_edge(bmu,bmv):
+ M[bmu][bmv]['weight']+=weight
+ else:
+ M.add_edge(bmu,bmv,weight=weight)
+ return M
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/boundary.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/boundary.py
new file mode 100644
index 0000000..ec7b11c
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/boundary.py
@@ -0,0 +1,102 @@
+"""
+Routines to find the boundary of a set of nodes.
+
+Edge boundaries are edges that have only one end
+in the set of nodes.
+
+Node boundaries are nodes outside the set of nodes
+that have an edge to a node in the set.
+
+"""
+__author__ = """Aric Hagberg (hagberg@lanl.gov)\nPieter Swart (swart@lanl.gov)\nDan Schult (dschult@colgate.edu)"""
+# Copyright (C) 2004-2008 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+__all__=['edge_boundary','node_boundary']
+
+def edge_boundary(G, nbunch1, nbunch2=None):
+ """Return the edge boundary.
+
+ Edge boundaries are edges that have only one end
+ in the given set of nodes.
+
+ Parameters
+ -----------
+ G : graph
+ A networkx graph
+
+ nbunch1 : list, container
+ Interior node set
+
+ nbunch2 : list, container
+ Exterior node set. If None then it is set to all of the
+ nodes in G not in nbunch1.
+
+ Returns
+ -------
+ elist : list
+ List of edges
+
+ Notes
+ ------
+ Nodes in nbunch1 and nbunch2 that are not in G are ignored.
+
+ nbunch1 and nbunch2 are usually meant to be disjoint,
+ but in the interest of speed and generality, that is
+ not required here.
+
+ """
+ if nbunch2 is None: # Then nbunch2 is complement of nbunch1
+ nset1=set((n for n in nbunch1 if n in G))
+ return [(n1,n2) for n1 in nset1 for n2 in G[n1] \
+ if n2 not in nset1]
+
+ nset2=set(nbunch2)
+ return [(n1,n2) for n1 in nbunch1 if n1 in G for n2 in G[n1] \
+ if n2 in nset2]
+
+def node_boundary(G, nbunch1, nbunch2=None):
+ """Return the node boundary.
+
+ The node boundary is all nodes in the edge boundary of a given
+ set of nodes that are in the set.
+
+ Parameters
+ -----------
+ G : graph
+ A networkx graph
+
+ nbunch1 : list, container
+ Interior node set
+
+ nbunch2 : list, container
+ Exterior node set. If None then it is set to all of the
+ nodes in G not in nbunch1.
+
+ Returns
+ -------
+ nlist : list
+ List of nodes.
+
+ Notes
+ ------
+ Nodes in nbunch1 and nbunch2 that are not in G are ignored.
+
+ nbunch1 and nbunch2 are usually meant to be disjoint,
+ but in the interest of speed and generality, that is
+ not required here.
+
+ """
+ nset1=set(n for n in nbunch1 if n in G)
+ bdy=set()
+ for n1 in nset1:
+ bdy.update(G[n1])
+ bdy -= nset1
+ if nbunch2 is not None: # else nbunch2 is complement of nbunch1
+ bdy &= set(nbunch2)
+ return list(bdy)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/__init__.py
new file mode 100644
index 0000000..d60154b
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/__init__.py
@@ -0,0 +1,20 @@
+from networkx.algorithms.centrality.betweenness import *
+from networkx.algorithms.centrality.betweenness_subset import *
+from networkx.algorithms.centrality.closeness import *
+from networkx.algorithms.centrality.current_flow_closeness import *
+from networkx.algorithms.centrality.current_flow_betweenness import *
+from networkx.algorithms.centrality.current_flow_betweenness_subset import *
+from networkx.algorithms.centrality.degree_alg import *
+from networkx.algorithms.centrality.eigenvector import *
+from networkx.algorithms.centrality.katz import *
+from networkx.algorithms.centrality.load import *
+from networkx.algorithms.centrality.communicability_alg import *
+import networkx.algorithms.centrality.betweenness
+import networkx.algorithms.centrality.closeness
+import networkx.algorithms.centrality.current_flow_betweenness
+import networkx.algorithms.centrality.current_flow_closeness
+import networkx.algorithms.centrality.degree_alg
+import networkx.algorithms.centrality.eigenvector
+import networkx.algorithms.centrality.load
+import networkx.algorithms.centrality.communicability_alg
+import networkx.algorithms.centrality.katz
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/betweenness.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/betweenness.py
new file mode 100644
index 0000000..af2cf6a
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/betweenness.py
@@ -0,0 +1,334 @@
+"""
+Betweenness centrality measures.
+"""
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import heapq
+import networkx as nx
+import random
+__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
+
+__all__ = ['betweenness_centrality',
+ 'edge_betweenness_centrality',
+ 'edge_betweenness']
+
+def betweenness_centrality(G, k=None, normalized=True, weight=None,
+ endpoints=False,
+ seed=None):
+ r"""Compute the shortest-path betweenness centrality for nodes.
+
+ Betweenness centrality of a node `v` is the sum of the
+ fraction of all-pairs shortest paths that pass through `v`:
+
+ .. math::
+
+ c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)}
+
+ where `V` is the set of nodes, `\sigma(s, t)` is the number of
+ shortest `(s, t)`-paths, and `\sigma(s, t|v)` is the number of those
+ paths passing through some node `v` other than `s, t`.
+ If `s = t`, `\sigma(s, t) = 1`, and if `v \in {s, t}`,
+ `\sigma(s, t|v) = 0` [2]_.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ k : int, optional (default=None)
+ If k is not None use k node samples to estimate betweenness.
+ The value of k <= n where n is the number of nodes in the graph.
+ Higher values give better approximation.
+
+ normalized : bool, optional
+ If True the betweenness values are normalized by `2/((n-1)(n-2))`
+ for graphs, and `1/((n-1)(n-2))` for directed graphs where `n`
+ is the number of nodes in G.
+
+ weight : None or string, optional
+ If None, all edge weights are considered equal.
+ Otherwise holds the name of the edge attribute used as weight.
+
+ endpoints : bool, optional
+ If True include the endpoints in the shortest path counts.
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with betweenness centrality as the value.
+
+ See Also
+ --------
+ edge_betweenness_centrality
+ load_centrality
+
+ Notes
+ -----
+ The algorithm is from Ulrik Brandes [1]_.
+ See [2]_ for details on algorithms for variations and related metrics.
+
+ For approximate betweenness calculations set k=#samples to use
+ k nodes ("pivots") to estimate the betweenness values. For an estimate
+ of the number of pivots needed see [3]_.
+
+ For weighted graphs the edge weights must be greater than zero.
+ Zero edge weights can produce an infinite number of equal length
+ paths between pairs of nodes.
+
+ References
+ ----------
+ .. [1] A Faster Algorithm for Betweenness Centrality.
+ Ulrik Brandes,
+ Journal of Mathematical Sociology 25(2):163-177, 2001.
+ http://www.inf.uni-konstanz.de/algo/publications/b-fabc-01.pdf
+ .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
+ Centrality and their Generic Computation.
+ Social Networks 30(2):136-145, 2008.
+ http://www.inf.uni-konstanz.de/algo/publications/b-vspbc-08.pdf
+ .. [3] Ulrik Brandes and Christian Pich:
+ Centrality Estimation in Large Networks.
+ International Journal of Bifurcation and Chaos 17(7):2303-2318, 2007.
+ http://www.inf.uni-konstanz.de/algo/publications/bp-celn-06.pdf
+ """
+ betweenness=dict.fromkeys(G,0.0) # b[v]=0 for v in G
+ if k is None:
+ nodes = G
+ else:
+ random.seed(seed)
+ nodes = random.sample(G.nodes(), k)
+ for s in nodes:
+ # single source shortest paths
+ if weight is None: # use BFS
+ S,P,sigma=_single_source_shortest_path_basic(G,s)
+ else: # use Dijkstra's algorithm
+ S,P,sigma=_single_source_dijkstra_path_basic(G,s,weight)
+ # accumulation
+ if endpoints:
+ betweenness=_accumulate_endpoints(betweenness,S,P,sigma,s)
+ else:
+ betweenness=_accumulate_basic(betweenness,S,P,sigma,s)
+ # rescaling
+ betweenness=_rescale(betweenness, len(G),
+ normalized=normalized,
+ directed=G.is_directed(),
+ k=k)
+ return betweenness
+
+
+def edge_betweenness_centrality(G,normalized=True,weight=None):
+ r"""Compute betweenness centrality for edges.
+
+ Betweenness centrality of an edge `e` is the sum of the
+ fraction of all-pairs shortest paths that pass through `e`:
+
+ .. math::
+
+ c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|e)}{\sigma(s, t)}
+
+ where `V` is the set of nodes,`\sigma(s, t)` is the number of
+ shortest `(s, t)`-paths, and `\sigma(s, t|e)` is the number of
+ those paths passing through edge `e` [2]_.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ normalized : bool, optional
+ If True the betweenness values are normalized by `2/(n(n-1))`
+ for graphs, and `1/(n(n-1))` for directed graphs where `n`
+ is the number of nodes in G.
+
+ weight : None or string, optional
+ If None, all edge weights are considered equal.
+ Otherwise holds the name of the edge attribute used as weight.
+
+ Returns
+ -------
+ edges : dictionary
+ Dictionary of edges with betweenness centrality as the value.
+
+ See Also
+ --------
+ betweenness_centrality
+ edge_load
+
+ Notes
+ -----
+ The algorithm is from Ulrik Brandes [1]_.
+
+ For weighted graphs the edge weights must be greater than zero.
+ Zero edge weights can produce an infinite number of equal length
+ paths between pairs of nodes.
+
+ References
+ ----------
+ .. [1] A Faster Algorithm for Betweenness Centrality. Ulrik Brandes,
+ Journal of Mathematical Sociology 25(2):163-177, 2001.
+ http://www.inf.uni-konstanz.de/algo/publications/b-fabc-01.pdf
+ .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
+ Centrality and their Generic Computation.
+ Social Networks 30(2):136-145, 2008.
+ http://www.inf.uni-konstanz.de/algo/publications/b-vspbc-08.pdf
+ """
+ betweenness=dict.fromkeys(G,0.0) # b[v]=0 for v in G
+ # b[e]=0 for e in G.edges()
+ betweenness.update(dict.fromkeys(G.edges(),0.0))
+ for s in G:
+ # single source shortest paths
+ if weight is None: # use BFS
+ S,P,sigma=_single_source_shortest_path_basic(G,s)
+ else: # use Dijkstra's algorithm
+ S,P,sigma=_single_source_dijkstra_path_basic(G,s,weight)
+ # accumulation
+ betweenness=_accumulate_edges(betweenness,S,P,sigma,s)
+ # rescaling
+ for n in G: # remove nodes to only return edges
+ del betweenness[n]
+ betweenness=_rescale_e(betweenness, len(G),
+ normalized=normalized,
+ directed=G.is_directed())
+ return betweenness
+
+# obsolete name
+def edge_betweenness(G,normalized=True,weight=None):
+ return edge_betweenness_centrality(G,normalized,weight)
+
+
+# helpers for betweenness centrality
+
+def _single_source_shortest_path_basic(G,s):
+ S=[]
+ P={}
+ for v in G:
+ P[v]=[]
+ sigma=dict.fromkeys(G,0.0) # sigma[v]=0 for v in G
+ D={}
+ sigma[s]=1.0
+ D[s]=0
+ Q=[s]
+ while Q: # use BFS to find shortest paths
+ v=Q.pop(0)
+ S.append(v)
+ Dv=D[v]
+ sigmav=sigma[v]
+ for w in G[v]:
+ if w not in D:
+ Q.append(w)
+ D[w]=Dv+1
+ if D[w]==Dv+1: # this is a shortest path, count paths
+ sigma[w] += sigmav
+ P[w].append(v) # predecessors
+ return S,P,sigma
+
+
+
+def _single_source_dijkstra_path_basic(G,s,weight='weight'):
+ # modified from Eppstein
+ S=[]
+ P={}
+ for v in G:
+ P[v]=[]
+ sigma=dict.fromkeys(G,0.0) # sigma[v]=0 for v in G
+ D={}
+ sigma[s]=1.0
+ push=heapq.heappush
+ pop=heapq.heappop
+ seen = {s:0}
+ Q=[] # use Q as heap with (distance,node id) tuples
+ push(Q,(0,s,s))
+ while Q:
+ (dist,pred,v)=pop(Q)
+ if v in D:
+ continue # already searched this node.
+ sigma[v] += sigma[pred] # count paths
+ S.append(v)
+ D[v] = dist
+ for w,edgedata in G[v].items():
+ vw_dist = dist + edgedata.get(weight,1)
+ if w not in D and (w not in seen or vw_dist < seen[w]):
+ seen[w] = vw_dist
+ push(Q,(vw_dist,v,w))
+ sigma[w]=0.0
+ P[w]=[v]
+ elif vw_dist==seen[w]: # handle equal paths
+ sigma[w] += sigma[v]
+ P[w].append(v)
+ return S,P,sigma
+
+def _accumulate_basic(betweenness,S,P,sigma,s):
+ delta=dict.fromkeys(S,0)
+ while S:
+ w=S.pop()
+ coeff=(1.0+delta[w])/sigma[w]
+ for v in P[w]:
+ delta[v] += sigma[v]*coeff
+ if w != s:
+ betweenness[w]+=delta[w]
+ return betweenness
+
+def _accumulate_endpoints(betweenness,S,P,sigma,s):
+ betweenness[s]+=len(S)-1
+ delta=dict.fromkeys(S,0)
+ while S:
+ w=S.pop()
+ coeff=(1.0+delta[w])/sigma[w]
+ for v in P[w]:
+ delta[v] += sigma[v]*coeff
+ if w != s:
+ betweenness[w] += delta[w]+1
+ return betweenness
+
+def _accumulate_edges(betweenness,S,P,sigma,s):
+ delta=dict.fromkeys(S,0)
+ while S:
+ w=S.pop()
+ coeff=(1.0+delta[w])/sigma[w]
+ for v in P[w]:
+ c=sigma[v]*coeff
+ if (v,w) not in betweenness:
+ betweenness[(w,v)]+=c
+ else:
+ betweenness[(v,w)]+=c
+ delta[v]+=c
+ if w != s:
+ betweenness[w]+=delta[w]
+ return betweenness
+
+def _rescale(betweenness,n,normalized,directed=False,k=None):
+ if normalized is True:
+ if n <=2:
+ scale=None # no normalization b=0 for all nodes
+ else:
+ scale=1.0/((n-1)*(n-2))
+ else: # rescale by 2 for undirected graphs
+ if not directed:
+ scale=1.0/2.0
+ else:
+ scale=None
+ if scale is not None:
+ if k is not None:
+ scale=scale*n/k
+ for v in betweenness:
+ betweenness[v] *= scale
+ return betweenness
+
+def _rescale_e(betweenness,n,normalized,directed=False):
+ if normalized is True:
+ if n <=1:
+ scale=None # no normalization b=0 for all nodes
+ else:
+ scale=1.0/(n*(n-1))
+ else: # rescale by 2 for undirected graphs
+ if not directed:
+ scale=1.0/2.0
+ else:
+ scale=None
+ if scale is not None:
+ for v in betweenness:
+ betweenness[v] *= scale
+ return betweenness
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/betweenness_subset.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/betweenness_subset.py
new file mode 100644
index 0000000..cd4f8a2
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/betweenness_subset.py
@@ -0,0 +1,263 @@
+"""
+Betweenness centrality measures for subsets of nodes.
+"""
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
+
+__all__ = ['betweenness_centrality_subset',
+ 'edge_betweenness_centrality_subset',
+ 'betweenness_centrality_source']
+
+import networkx as nx
+
+from networkx.algorithms.centrality.betweenness import\
+ _single_source_dijkstra_path_basic as dijkstra
+from networkx.algorithms.centrality.betweenness import\
+ _single_source_shortest_path_basic as shortest_path
+
+
+def betweenness_centrality_subset(G,sources,targets,
+ normalized=False,
+ weight=None):
+ """Compute betweenness centrality for a subset of nodes.
+
+ .. math::
+
+ c_B(v) =\sum_{s\in S, t \in T} \frac{\sigma(s, t|v)}{\sigma(s, t)}
+
+ where `S` is the set of sources, `T` is the set of targets,
+ `\sigma(s, t)` is the number of shortest `(s, t)`-paths,
+ and `\sigma(s, t|v)` is the number of those paths
+ passing through some node `v` other than `s, t`.
+ If `s = t`, `\sigma(s, t) = 1`,
+ and if `v \in {s, t}`, `\sigma(s, t|v) = 0` [2]_.
+
+
+ Parameters
+ ----------
+ G : graph
+
+ sources: list of nodes
+ Nodes to use as sources for shortest paths in betweenness
+
+ targets: list of nodes
+ Nodes to use as targets for shortest paths in betweenness
+
+ normalized : bool, optional
+ If True the betweenness values are normalized by `2/((n-1)(n-2))`
+ for graphs, and `1/((n-1)(n-2))` for directed graphs where `n`
+ is the number of nodes in G.
+
+ weight : None or string, optional
+ If None, all edge weights are considered equal.
+ Otherwise holds the name of the edge attribute used as weight.
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with betweenness centrality as the value.
+
+ See Also
+ --------
+ edge_betweenness_centrality
+ load_centrality
+
+ Notes
+ -----
+ The basic algorithm is from [1]_.
+
+ For weighted graphs the edge weights must be greater than zero.
+ Zero edge weights can produce an infinite number of equal length
+ paths between pairs of nodes.
+
+ The normalization might seem a little strange but it is the same
+ as in betweenness_centrality() and is designed to make
+ betweenness_centrality(G) be the same as
+ betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()).
+
+
+ References
+ ----------
+ .. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality.
+ Journal of Mathematical Sociology 25(2):163-177, 2001.
+ http://www.inf.uni-konstanz.de/algo/publications/b-fabc-01.pdf
+ .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
+ Centrality and their Generic Computation.
+ Social Networks 30(2):136-145, 2008.
+ http://www.inf.uni-konstanz.de/algo/publications/b-vspbc-08.pdf
+ """
+ b=dict.fromkeys(G,0.0) # b[v]=0 for v in G
+ for s in sources:
+ # single source shortest paths
+ if weight is None: # use BFS
+ S,P,sigma=shortest_path(G,s)
+ else: # use Dijkstra's algorithm
+ S,P,sigma=dijkstra(G,s,weight)
+ b=_accumulate_subset(b,S,P,sigma,s,targets)
+ b=_rescale(b,len(G),normalized=normalized,directed=G.is_directed())
+ return b
+
+
+def edge_betweenness_centrality_subset(G,sources,targets,
+ normalized=False,
+ weight=None):
+ """Compute betweenness centrality for edges for a subset of nodes.
+
+ .. math::
+
+ c_B(v) =\sum_{s\in S,t \in T} \frac{\sigma(s, t|e)}{\sigma(s, t)}
+
+ where `S` is the set of sources, `T` is the set of targets,
+ `\sigma(s, t)` is the number of shortest `(s, t)`-paths,
+ and `\sigma(s, t|e)` is the number of those paths
+ passing through edge `e` [2]_.
+
+ Parameters
+ ----------
+ G : graph
+ A networkx graph
+
+ sources: list of nodes
+ Nodes to use as sources for shortest paths in betweenness
+
+ targets: list of nodes
+ Nodes to use as targets for shortest paths in betweenness
+
+ normalized : bool, optional
+ If True the betweenness values are normalized by `2/(n(n-1))`
+ for graphs, and `1/(n(n-1))` for directed graphs where `n`
+ is the number of nodes in G.
+
+ weight : None or string, optional
+ If None, all edge weights are considered equal.
+ Otherwise holds the name of the edge attribute used as weight.
+
+ Returns
+ -------
+ edges : dictionary
+ Dictionary of edges with Betweenness centrality as the value.
+
+ See Also
+ --------
+ betweenness_centrality
+ edge_load
+
+ Notes
+ -----
+ The basic algorithm is from [1]_.
+
+ For weighted graphs the edge weights must be greater than zero.
+ Zero edge weights can produce an infinite number of equal length
+ paths between pairs of nodes.
+
+ The normalization might seem a little strange but it is the same
+ as in edge_betweenness_centrality() and is designed to make
+ edge_betweenness_centrality(G) be the same as
+ edge_betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()).
+
+ References
+ ----------
+ .. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality.
+ Journal of Mathematical Sociology 25(2):163-177, 2001.
+ http://www.inf.uni-konstanz.de/algo/publications/b-fabc-01.pdf
+ .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
+ Centrality and their Generic Computation.
+ Social Networks 30(2):136-145, 2008.
+ http://www.inf.uni-konstanz.de/algo/publications/b-vspbc-08.pdf
+
+ """
+
+ b=dict.fromkeys(G,0.0) # b[v]=0 for v in G
+ b.update(dict.fromkeys(G.edges(),0.0)) # b[e] for e in G.edges()
+ for s in sources:
+ # single source shortest paths
+ if weight is None: # use BFS
+ S,P,sigma=shortest_path(G,s)
+ else: # use Dijkstra's algorithm
+ S,P,sigma=dijkstra(G,s,weight)
+ b=_accumulate_edges_subset(b,S,P,sigma,s,targets)
+ for n in G: # remove nodes to only return edges
+ del b[n]
+ b=_rescale_e(b,len(G),normalized=normalized,directed=G.is_directed())
+ return b
+
+# obsolete name
+def betweenness_centrality_source(G,normalized=True,weight=None,sources=None):
+ if sources is None:
+ sources=G.nodes()
+ targets=G.nodes()
+ return betweenness_centrality_subset(G,sources,targets,normalized,weight)
+
+
+def _accumulate_subset(betweenness,S,P,sigma,s,targets):
+ delta=dict.fromkeys(S,0)
+ target_set=set(targets)
+ while S:
+ w=S.pop()
+ for v in P[w]:
+ if w in target_set:
+ delta[v]+=(sigma[v]/sigma[w])*(1.0+delta[w])
+ else:
+ delta[v]+=delta[w]/len(P[w])
+ if w != s:
+ betweenness[w]+=delta[w]
+ return betweenness
+
+def _accumulate_edges_subset(betweenness,S,P,sigma,s,targets):
+ delta=dict.fromkeys(S,0)
+ target_set=set(targets)
+ while S:
+ w=S.pop()
+ for v in P[w]:
+ if w in target_set:
+ c=(sigma[v]/sigma[w])*(1.0+delta[w])
+ else:
+ c=delta[w]/len(P[w])
+ if (v,w) not in betweenness:
+ betweenness[(w,v)]+=c
+ else:
+ betweenness[(v,w)]+=c
+ delta[v]+=c
+ if w != s:
+ betweenness[w]+=delta[w]
+ return betweenness
+
+
+
+
+def _rescale(betweenness,n,normalized,directed=False):
+ if normalized is True:
+ if n <=2:
+ scale=None # no normalization b=0 for all nodes
+ else:
+ scale=1.0/((n-1)*(n-2))
+ else: # rescale by 2 for undirected graphs
+ if not directed:
+ scale=1.0/2.0
+ else:
+ scale=None
+ if scale is not None:
+ for v in betweenness:
+ betweenness[v] *= scale
+ return betweenness
+
+def _rescale_e(betweenness,n,normalized,directed=False):
+ if normalized is True:
+ if n <=1:
+ scale=None # no normalization b=0 for all nodes
+ else:
+ scale=1.0/(n*(n-1))
+ else: # rescale by 2 for undirected graphs
+ if not directed:
+ scale=1.0/2.0
+ else:
+ scale=None
+ if scale is not None:
+ for v in betweenness:
+ betweenness[v] *= scale
+ return betweenness
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/closeness.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/closeness.py
new file mode 100644
index 0000000..67d8089
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/closeness.py
@@ -0,0 +1,103 @@
+"""
+Closeness centrality measures.
+"""
+# Copyright (C) 2004-2013 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import functools
+import networkx as nx
+__author__ = "\n".join(['Aric Hagberg <aric.hagberg@gmail.com>',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Sasha Gutfraind (ag362@cornell.edu)'])
+__all__ = ['closeness_centrality']
+
+
+def closeness_centrality(G, u=None, distance=None, normalized=True):
+ r"""Compute closeness centrality for nodes.
+
+ Closeness centrality [1]_ of a node `u` is the reciprocal of the
+ sum of the shortest path distances from `u` to all `n-1` other nodes.
+ Since the sum of distances depends on the number of nodes in the
+ graph, closeness is normalized by the sum of minimum possible
+ distances `n-1`.
+
+ .. math::
+
+ C(u) = \frac{n - 1}{\sum_{v=1}^{n} d(v, u)},
+
+ where `d(v, u)` is the shortest-path distance between `v` and `u`,
+ and `n` is the number of nodes in the graph.
+
+ Notice that higher values of closeness indicate higher centrality.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+ u : node, optional
+ Return only the value for node u
+ distance : edge attribute key, optional (default=None)
+ Use the specified edge attribute as the edge distance in shortest
+ path calculations
+ normalized : bool, optional
+ If True (default) normalize by the number of nodes in the connected
+ part of the graph.
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with closeness centrality as the value.
+
+ See Also
+ --------
+ betweenness_centrality, load_centrality, eigenvector_centrality,
+ degree_centrality
+
+ Notes
+ -----
+ The closeness centrality is normalized to `(n-1)/(|G|-1)` where
+ `n` is the number of nodes in the connected part of graph
+ containing the node. If the graph is not completely connected,
+ this algorithm computes the closeness centrality for each
+ connected part separately.
+
+ If the 'distance' keyword is set to an edge attribute key then the
+ shortest-path length will be computed using Dijkstra's algorithm with
+ that edge attribute as the edge weight.
+
+ References
+ ----------
+ .. [1] Freeman, L.C., 1979. Centrality in networks: I.
+ Conceptual clarification. Social Networks 1, 215--239.
+ http://www.soc.ucsb.edu/faculty/friedkin/Syllabi/Soc146/Freeman78.PDF
+ """
+ if distance is not None:
+ # use Dijkstra's algorithm with specified attribute as edge weight
+ path_length = functools.partial(nx.single_source_dijkstra_path_length,
+ weight=distance)
+ else:
+ path_length = nx.single_source_shortest_path_length
+
+ if u is None:
+ nodes = G.nodes()
+ else:
+ nodes = [u]
+ closeness_centrality = {}
+ for n in nodes:
+ sp = path_length(G,n)
+ totsp = sum(sp.values())
+ if totsp > 0.0 and len(G) > 1:
+ closeness_centrality[n] = (len(sp)-1.0) / totsp
+ # normalize to number of nodes-1 in connected part
+ if normalized:
+ s = (len(sp)-1.0) / ( len(G) - 1 )
+ closeness_centrality[n] *= s
+ else:
+ closeness_centrality[n] = 0.0
+ if u is not None:
+ return closeness_centrality[u]
+ else:
+ return closeness_centrality
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/communicability_alg.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/communicability_alg.py
new file mode 100644
index 0000000..3c8fc6b
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/communicability_alg.py
@@ -0,0 +1,495 @@
+"""
+Communicability and centrality measures.
+"""
+# Copyright (C) 2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+from networkx.utils import *
+__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Franck Kalala (franckkalala@yahoo.fr'])
+__all__ = ['communicability_centrality_exp',
+ 'communicability_centrality',
+ 'communicability_betweenness_centrality',
+ 'communicability',
+ 'communicability_exp',
+ 'estrada_index',
+ ]
+
+@require('scipy')
+@not_implemented_for('directed')
+@not_implemented_for('multigraph')
+def communicability_centrality_exp(G):
+ r"""Return the communicability centrality for each node of G
+
+ Communicability centrality, also called subgraph centrality, of a node `n`
+ is the sum of closed walks of all lengths starting and ending at node `n`.
+
+ Parameters
+ ----------
+ G: graph
+
+ Returns
+ -------
+ nodes:dictionary
+ Dictionary of nodes with communicability centrality as the value.
+
+ Raises
+ ------
+ NetworkXError
+ If the graph is not undirected and simple.
+
+ See Also
+ --------
+ communicability:
+ Communicability between all pairs of nodes in G.
+ communicability_centrality:
+ Communicability centrality for each node of G.
+
+ Notes
+ -----
+ This version of the algorithm exponentiates the adjacency matrix.
+ The communicability centrality of a node `u` in G can be found using
+ the matrix exponential of the adjacency matrix of G [1]_ [2]_,
+
+ .. math::
+
+ SC(u)=(e^A)_{uu} .
+
+ References
+ ----------
+ .. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez,
+ "Subgraph centrality in complex networks",
+ Physical Review E 71, 056103 (2005).
+ http://arxiv.org/abs/cond-mat/0504730
+
+ .. [2] Ernesto Estrada, Naomichi Hatano,
+ "Communicability in complex networks",
+ Phys. Rev. E 77, 036111 (2008).
+ http://arxiv.org/abs/0707.0756
+
+ Examples
+ --------
+ >>> G = nx.Graph([(0,1),(1,2),(1,5),(5,4),(2,4),(2,3),(4,3),(3,6)])
+ >>> sc = nx.communicability_centrality_exp(G)
+ """
+ # alternative implementation that calculates the matrix exponential
+ import scipy.linalg
+ nodelist = G.nodes() # ordering of nodes in matrix
+ A = nx.to_numpy_matrix(G,nodelist)
+ # convert to 0-1 matrix
+ A[A!=0.0] = 1
+ expA = scipy.linalg.expm(A)
+ # convert diagonal to dictionary keyed by node
+ sc = dict(zip(nodelist,map(float,expA.diagonal())))
+ return sc
+
+@require('numpy')
+@not_implemented_for('directed')
+@not_implemented_for('multigraph')
+def communicability_centrality(G):
+ r"""Return communicability centrality for each node in G.
+
+ Communicability centrality, also called subgraph centrality, of a node `n`
+ is the sum of closed walks of all lengths starting and ending at node `n`.
+
+ Parameters
+ ----------
+ G: graph
+
+ Returns
+ -------
+ nodes: dictionary
+ Dictionary of nodes with communicability centrality as the value.
+
+ Raises
+ ------
+ NetworkXError
+ If the graph is not undirected and simple.
+
+ See Also
+ --------
+ communicability:
+ Communicability between all pairs of nodes in G.
+ communicability_centrality:
+ Communicability centrality for each node of G.
+
+ Notes
+ -----
+ This version of the algorithm computes eigenvalues and eigenvectors
+ of the adjacency matrix.
+
+ Communicability centrality of a node `u` in G can be found using
+ a spectral decomposition of the adjacency matrix [1]_ [2]_,
+
+ .. math::
+
+ SC(u)=\sum_{j=1}^{N}(v_{j}^{u})^2 e^{\lambda_{j}},
+
+ where `v_j` is an eigenvector of the adjacency matrix `A` of G
+ corresponding corresponding to the eigenvalue `\lambda_j`.
+
+ Examples
+ --------
+ >>> G = nx.Graph([(0,1),(1,2),(1,5),(5,4),(2,4),(2,3),(4,3),(3,6)])
+ >>> sc = nx.communicability_centrality(G)
+
+ References
+ ----------
+ .. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez,
+ "Subgraph centrality in complex networks",
+ Physical Review E 71, 056103 (2005).
+ http://arxiv.org/abs/cond-mat/0504730
+ .. [2] Ernesto Estrada, Naomichi Hatano,
+ "Communicability in complex networks",
+ Phys. Rev. E 77, 036111 (2008).
+ http://arxiv.org/abs/0707.0756
+ """
+ import numpy
+ import numpy.linalg
+ nodelist = G.nodes() # ordering of nodes in matrix
+ A = nx.to_numpy_matrix(G,nodelist)
+ # convert to 0-1 matrix
+ A[A!=0.0] = 1
+ w,v = numpy.linalg.eigh(A)
+ vsquare = numpy.array(v)**2
+ expw = numpy.exp(w)
+ xg = numpy.dot(vsquare,expw)
+ # convert vector dictionary keyed by node
+ sc = dict(zip(nodelist,map(float,xg)))
+ return sc
+
+@require('scipy')
+@not_implemented_for('directed')
+@not_implemented_for('multigraph')
+def communicability_betweenness_centrality(G, normalized=True):
+ r"""Return communicability betweenness for all pairs of nodes in G.
+
+ Communicability betweenness measure makes use of the number of walks
+ connecting every pair of nodes as the basis of a betweenness centrality
+ measure.
+
+ Parameters
+ ----------
+ G: graph
+
+ Returns
+ -------
+ nodes:dictionary
+ Dictionary of nodes with communicability betweenness as the value.
+
+ Raises
+ ------
+ NetworkXError
+ If the graph is not undirected and simple.
+
+ See Also
+ --------
+ communicability:
+ Communicability between all pairs of nodes in G.
+ communicability_centrality:
+ Communicability centrality for each node of G using matrix exponential.
+ communicability_centrality_exp:
+ Communicability centrality for each node in G using
+ spectral decomposition.
+
+ Notes
+ -----
+ Let `G=(V,E)` be a simple undirected graph with `n` nodes and `m` edges,
+ and `A` denote the adjacency matrix of `G`.
+
+ Let `G(r)=(V,E(r))` be the graph resulting from
+ removing all edges connected to node `r` but not the node itself.
+
+ The adjacency matrix for `G(r)` is `A+E(r)`, where `E(r)` has nonzeros
+ only in row and column `r`.
+
+ The communicability betweenness of a node `r` is [1]_
+
+ .. math::
+ \omega_{r} = \frac{1}{C}\sum_{p}\sum_{q}\frac{G_{prq}}{G_{pq}},
+ p\neq q, q\neq r,
+
+ where
+ `G_{prq}=(e^{A}_{pq} - (e^{A+E(r)})_{pq}` is the number of walks
+ involving node r,
+ `G_{pq}=(e^{A})_{pq}` is the number of closed walks starting
+ at node `p` and ending at node `q`,
+ and `C=(n-1)^{2}-(n-1)` is a normalization factor equal to the
+ number of terms in the sum.
+
+ The resulting `\omega_{r}` takes values between zero and one.
+ The lower bound cannot be attained for a connected
+ graph, and the upper bound is attained in the star graph.
+
+ References
+ ----------
+ .. [1] Ernesto Estrada, Desmond J. Higham, Naomichi Hatano,
+ "Communicability Betweenness in Complex Networks"
+ Physica A 388 (2009) 764-774.
+ http://arxiv.org/abs/0905.4102
+
+ Examples
+ --------
+ >>> G = nx.Graph([(0,1),(1,2),(1,5),(5,4),(2,4),(2,3),(4,3),(3,6)])
+ >>> cbc = nx.communicability_betweenness_centrality(G)
+ """
+ import scipy
+ import scipy.linalg
+ nodelist = G.nodes() # ordering of nodes in matrix
+ n = len(nodelist)
+ A = nx.to_numpy_matrix(G,nodelist)
+ # convert to 0-1 matrix
+ A[A!=0.0] = 1
+ expA = scipy.linalg.expm(A)
+ mapping = dict(zip(nodelist,range(n)))
+ sc = {}
+ for v in G:
+ # remove row and col of node v
+ i = mapping[v]
+ row = A[i,:].copy()
+ col = A[:,i].copy()
+ A[i,:] = 0
+ A[:,i] = 0
+ B = (expA - scipy.linalg.expm(A)) / expA
+ # sum with row/col of node v and diag set to zero
+ B[i,:] = 0
+ B[:,i] = 0
+ B -= scipy.diag(scipy.diag(B))
+ sc[v] = float(B.sum())
+ # put row and col back
+ A[i,:] = row
+ A[:,i] = col
+ # rescaling
+ sc = _rescale(sc,normalized=normalized)
+ return sc
+
+def _rescale(sc,normalized):
+ # helper to rescale betweenness centrality
+ if normalized is True:
+ order=len(sc)
+ if order <=2:
+ scale=None
+ else:
+ scale=1.0/((order-1.0)**2-(order-1.0))
+ if scale is not None:
+ for v in sc:
+ sc[v] *= scale
+ return sc
+
+
+@require('numpy','scipy')
+@not_implemented_for('directed')
+@not_implemented_for('multigraph')
+def communicability(G):
+ r"""Return communicability between all pairs of nodes in G.
+
+ The communicability between pairs of nodes in G is the sum of
+ closed walks of different lengths starting at node u and ending at node v.
+
+ Parameters
+ ----------
+ G: graph
+
+ Returns
+ -------
+ comm: dictionary of dictionaries
+ Dictionary of dictionaries keyed by nodes with communicability
+ as the value.
+
+ Raises
+ ------
+ NetworkXError
+ If the graph is not undirected and simple.
+
+ See Also
+ --------
+ communicability_centrality_exp:
+ Communicability centrality for each node of G using matrix exponential.
+ communicability_centrality:
+ Communicability centrality for each node in G using spectral
+ decomposition.
+ communicability:
+ Communicability between pairs of nodes in G.
+
+ Notes
+ -----
+ This algorithm uses a spectral decomposition of the adjacency matrix.
+ Let G=(V,E) be a simple undirected graph. Using the connection between
+ the powers of the adjacency matrix and the number of walks in the graph,
+ the communicability between nodes `u` and `v` based on the graph spectrum
+ is [1]_
+
+ .. math::
+ C(u,v)=\sum_{j=1}^{n}\phi_{j}(u)\phi_{j}(v)e^{\lambda_{j}},
+
+ where `\phi_{j}(u)` is the `u\rm{th}` element of the `j\rm{th}` orthonormal
+ eigenvector of the adjacency matrix associated with the eigenvalue
+ `\lambda_{j}`.
+
+ References
+ ----------
+ .. [1] Ernesto Estrada, Naomichi Hatano,
+ "Communicability in complex networks",
+ Phys. Rev. E 77, 036111 (2008).
+ http://arxiv.org/abs/0707.0756
+
+ Examples
+ --------
+ >>> G = nx.Graph([(0,1),(1,2),(1,5),(5,4),(2,4),(2,3),(4,3),(3,6)])
+ >>> c = nx.communicability(G)
+ """
+ import numpy
+ import scipy.linalg
+ nodelist = G.nodes() # ordering of nodes in matrix
+ A = nx.to_numpy_matrix(G,nodelist)
+ # convert to 0-1 matrix
+ A[A!=0.0] = 1
+ w,vec = numpy.linalg.eigh(A)
+ expw = numpy.exp(w)
+ mapping = dict(zip(nodelist,range(len(nodelist))))
+ sc={}
+ # computing communicabilities
+ for u in G:
+ sc[u]={}
+ for v in G:
+ s = 0
+ p = mapping[u]
+ q = mapping[v]
+ for j in range(len(nodelist)):
+ s += vec[:,j][p,0]*vec[:,j][q,0]*expw[j]
+ sc[u][v] = float(s)
+ return sc
+
+@require('scipy')
+@not_implemented_for('directed')
+@not_implemented_for('multigraph')
+def communicability_exp(G):
+ r"""Return communicability between all pairs of nodes in G.
+
+ Communicability between pair of node (u,v) of node in G is the sum of
+ closed walks of different lengths starting at node u and ending at node v.
+
+ Parameters
+ ----------
+ G: graph
+
+ Returns
+ -------
+ comm: dictionary of dictionaries
+ Dictionary of dictionaries keyed by nodes with communicability
+ as the value.
+
+ Raises
+ ------
+ NetworkXError
+ If the graph is not undirected and simple.
+
+ See Also
+ --------
+ communicability_centrality_exp:
+ Communicability centrality for each node of G using matrix exponential.
+ communicability_centrality:
+ Communicability centrality for each node in G using spectral
+ decomposition.
+ communicability_exp:
+ Communicability between all pairs of nodes in G using spectral
+ decomposition.
+
+ Notes
+ -----
+ This algorithm uses matrix exponentiation of the adjacency matrix.
+
+ Let G=(V,E) be a simple undirected graph. Using the connection between
+ the powers of the adjacency matrix and the number of walks in the graph,
+ the communicability between nodes u and v is [1]_,
+
+ .. math::
+ C(u,v) = (e^A)_{uv},
+
+ where `A` is the adjacency matrix of G.
+
+ References
+ ----------
+ .. [1] Ernesto Estrada, Naomichi Hatano,
+ "Communicability in complex networks",
+ Phys. Rev. E 77, 036111 (2008).
+ http://arxiv.org/abs/0707.0756
+
+ Examples
+ --------
+ >>> G = nx.Graph([(0,1),(1,2),(1,5),(5,4),(2,4),(2,3),(4,3),(3,6)])
+ >>> c = nx.communicability_exp(G)
+ """
+ import scipy.linalg
+ nodelist = G.nodes() # ordering of nodes in matrix
+ A = nx.to_numpy_matrix(G,nodelist)
+ # convert to 0-1 matrix
+ A[A!=0.0] = 1
+ # communicability matrix
+ expA = scipy.linalg.expm(A)
+ mapping = dict(zip(nodelist,range(len(nodelist))))
+ sc = {}
+ for u in G:
+ sc[u]={}
+ for v in G:
+ sc[u][v] = float(expA[mapping[u],mapping[v]])
+ return sc
+
+@require('numpy')
+def estrada_index(G):
+ r"""Return the Estrada index of a the graph G.
+
+ Parameters
+ ----------
+ G: graph
+
+ Returns
+ -------
+ estrada index: float
+
+ Raises
+ ------
+ NetworkXError
+ If the graph is not undirected and simple.
+
+ See also
+ --------
+ estrada_index_exp
+
+ Notes
+ -----
+ Let `G=(V,E)` be a simple undirected graph with `n` nodes and let
+ `\lambda_{1}\leq\lambda_{2}\leq\cdots\lambda_{n}`
+ be a non-increasing ordering of the eigenvalues of its adjacency
+ matrix `A`. The Estrada index is
+
+ .. math::
+ EE(G)=\sum_{j=1}^n e^{\lambda _j}.
+
+ References
+ ----------
+ .. [1] E. Estrada, Characterization of 3D molecular structure,
+ Chem. Phys. Lett. 319, 713 (2000).
+
+ Examples
+ --------
+ >>> G=nx.Graph([(0,1),(1,2),(1,5),(5,4),(2,4),(2,3),(4,3),(3,6)])
+ >>> ei=nx.estrada_index(G)
+ """
+ return sum(communicability_centrality(G).values())
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import numpy
+ except:
+ raise SkipTest("NumPy not available")
+ try:
+ import scipy
+ except:
+ raise SkipTest("SciPy not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/current_flow_betweenness.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/current_flow_betweenness.py
new file mode 100644
index 0000000..23cb5bf
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/current_flow_betweenness.py
@@ -0,0 +1,361 @@
+"""
+Current-flow betweenness centrality measures.
+"""
+# Copyright (C) 2010-2012 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import random
+import networkx as nx
+from networkx.algorithms.centrality.flow_matrix import *
+__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
+
+__all__ = ['current_flow_betweenness_centrality',
+ 'approximate_current_flow_betweenness_centrality',
+ 'edge_current_flow_betweenness_centrality']
+
+
+def approximate_current_flow_betweenness_centrality(G, normalized=True,
+ weight='weight',
+ dtype=float, solver='full',
+ epsilon=0.5, kmax=10000):
+ r"""Compute the approximate current-flow betweenness centrality for nodes.
+
+ Approximates the current-flow betweenness centrality within absolute
+ error of epsilon with high probability [1]_.
+
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ normalized : bool, optional (default=True)
+ If True the betweenness values are normalized by 2/[(n-1)(n-2)] where
+ n is the number of nodes in G.
+
+ weight : string or None, optional (default='weight')
+ Key for edge data used as the edge weight.
+ If None, then use 1 as each edge weight.
+
+ dtype: data type (float)
+ Default data type for internal matrices.
+ Set to np.float32 for lower memory consumption.
+
+ solver: string (default='lu')
+ Type of linear solver to use for computing the flow matrix.
+ Options are "full" (uses most memory), "lu" (recommended), and
+ "cg" (uses least memory).
+
+ epsilon: float
+ Absolute error tolerance.
+
+ kmax: int
+ Maximum number of sample node pairs to use for approximation.
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with betweenness centrality as the value.
+
+ See Also
+ --------
+ current_flow_betweenness_centrality
+
+ Notes
+ -----
+ The running time is `O((1/\epsilon^2)m{\sqrt k} \log n)`
+ and the space required is `O(m)` for n nodes and m edges.
+
+ If the edges have a 'weight' attribute they will be used as
+ weights in this algorithm. Unspecified weights are set to 1.
+
+ References
+ ----------
+ .. [1] Centrality Measures Based on Current Flow.
+ Ulrik Brandes and Daniel Fleischer,
+ Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
+ LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
+ http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
+ """
+ from networkx.utils import reverse_cuthill_mckee_ordering
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError('current_flow_betweenness_centrality requires NumPy ',
+ 'http://scipy.org/')
+ try:
+ from scipy import sparse
+ from scipy.sparse import linalg
+ except ImportError:
+ raise ImportError('current_flow_betweenness_centrality requires SciPy ',
+ 'http://scipy.org/')
+ if G.is_directed():
+ raise nx.NetworkXError('current_flow_betweenness_centrality() ',
+ 'not defined for digraphs.')
+ if not nx.is_connected(G):
+ raise nx.NetworkXError("Graph not connected.")
+ solvername={"full" :FullInverseLaplacian,
+ "lu": SuperLUInverseLaplacian,
+ "cg": CGInverseLaplacian}
+ n = G.number_of_nodes()
+ ordering = list(reverse_cuthill_mckee_ordering(G))
+ # make a copy with integer labels according to rcm ordering
+ # this could be done without a copy if we really wanted to
+ H = nx.relabel_nodes(G,dict(zip(ordering,range(n))))
+ L = laplacian_sparse_matrix(H, nodelist=range(n), weight=weight,
+ dtype=dtype, format='csc')
+ C = solvername[solver](L, dtype=dtype) # initialize solver
+ betweenness = dict.fromkeys(H,0.0)
+ nb = (n-1.0)*(n-2.0) # normalization factor
+ cstar = n*(n-1)/nb
+ l = 1 # parameter in approximation, adjustable
+ k = l*int(np.ceil((cstar/epsilon)**2*np.log(n)))
+ if k > kmax:
+ raise nx.NetworkXError('Number random pairs k>kmax (%d>%d) '%(k,kmax),
+ 'Increase kmax or epsilon')
+ cstar2k = cstar/(2*k)
+ for i in range(k):
+ s,t = random.sample(range(n),2)
+ b = np.zeros(n, dtype=dtype)
+ b[s] = 1
+ b[t] = -1
+ p = C.solve(b)
+ for v in H:
+ if v==s or v==t:
+ continue
+ for nbr in H[v]:
+ w = H[v][nbr].get(weight,1.0)
+ betweenness[v] += w*np.abs(p[v]-p[nbr])*cstar2k
+ if normalized:
+ factor = 1.0
+ else:
+ factor = nb/2.0
+ # remap to original node names and "unnormalize" if required
+ return dict((ordering[k],float(v*factor)) for k,v in betweenness.items())
+
+
+def current_flow_betweenness_centrality(G, normalized=True, weight='weight',
+ dtype=float, solver='full'):
+ r"""Compute current-flow betweenness centrality for nodes.
+
+ Current-flow betweenness centrality uses an electrical current
+ model for information spreading in contrast to betweenness
+ centrality which uses shortest paths.
+
+ Current-flow betweenness centrality is also known as
+ random-walk betweenness centrality [2]_.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ normalized : bool, optional (default=True)
+ If True the betweenness values are normalized by 2/[(n-1)(n-2)] where
+ n is the number of nodes in G.
+
+ weight : string or None, optional (default='weight')
+ Key for edge data used as the edge weight.
+ If None, then use 1 as each edge weight.
+
+ dtype: data type (float)
+ Default data type for internal matrices.
+ Set to np.float32 for lower memory consumption.
+
+ solver: string (default='lu')
+ Type of linear solver to use for computing the flow matrix.
+ Options are "full" (uses most memory), "lu" (recommended), and
+ "cg" (uses least memory).
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with betweenness centrality as the value.
+
+ See Also
+ --------
+ approximate_current_flow_betweenness_centrality
+ betweenness_centrality
+ edge_betweenness_centrality
+ edge_current_flow_betweenness_centrality
+
+ Notes
+ -----
+ Current-flow betweenness can be computed in `O(I(n-1)+mn \log n)`
+ time [1]_, where `I(n-1)` is the time needed to compute the
+ inverse Laplacian. For a full matrix this is `O(n^3)` but using
+ sparse methods you can achieve `O(nm{\sqrt k})` where `k` is the
+ Laplacian matrix condition number.
+
+ The space required is `O(nw) where `w` is the width of the sparse
+ Laplacian matrix. Worse case is `w=n` for `O(n^2)`.
+
+ If the edges have a 'weight' attribute they will be used as
+ weights in this algorithm. Unspecified weights are set to 1.
+
+ References
+ ----------
+ .. [1] Centrality Measures Based on Current Flow.
+ Ulrik Brandes and Daniel Fleischer,
+ Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
+ LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
+ http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
+
+ .. [2] A measure of betweenness centrality based on random walks,
+ M. E. J. Newman, Social Networks 27, 39-54 (2005).
+ """
+ from networkx.utils import reverse_cuthill_mckee_ordering
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError('current_flow_betweenness_centrality requires NumPy ',
+ 'http://scipy.org/')
+ try:
+ import scipy
+ except ImportError:
+ raise ImportError('current_flow_betweenness_centrality requires SciPy ',
+ 'http://scipy.org/')
+ if G.is_directed():
+ raise nx.NetworkXError('current_flow_betweenness_centrality() ',
+ 'not defined for digraphs.')
+ if not nx.is_connected(G):
+ raise nx.NetworkXError("Graph not connected.")
+ n = G.number_of_nodes()
+ ordering = list(reverse_cuthill_mckee_ordering(G))
+ # make a copy with integer labels according to rcm ordering
+ # this could be done without a copy if we really wanted to
+ H = nx.relabel_nodes(G,dict(zip(ordering,range(n))))
+ betweenness = dict.fromkeys(H,0.0) # b[v]=0 for v in H
+ for row,(s,t) in flow_matrix_row(H, weight=weight, dtype=dtype,
+ solver=solver):
+ pos = dict(zip(row.argsort()[::-1],range(n)))
+ for i in range(n):
+ betweenness[s] += (i-pos[i])*row[i]
+ betweenness[t] += (n-i-1-pos[i])*row[i]
+ if normalized:
+ nb = (n-1.0)*(n-2.0) # normalization factor
+ else:
+ nb = 2.0
+ for i,v in enumerate(H): # map integers to nodes
+ betweenness[v] = float((betweenness[v]-i)*2.0/nb)
+ return dict((ordering[k],v) for k,v in betweenness.items())
+
+
+def edge_current_flow_betweenness_centrality(G, normalized=True,
+ weight='weight',
+ dtype=float, solver='full'):
+ """Compute current-flow betweenness centrality for edges.
+
+ Current-flow betweenness centrality uses an electrical current
+ model for information spreading in contrast to betweenness
+ centrality which uses shortest paths.
+
+ Current-flow betweenness centrality is also known as
+ random-walk betweenness centrality [2]_.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ normalized : bool, optional (default=True)
+ If True the betweenness values are normalized by 2/[(n-1)(n-2)] where
+ n is the number of nodes in G.
+
+ weight : string or None, optional (default='weight')
+ Key for edge data used as the edge weight.
+ If None, then use 1 as each edge weight.
+
+ dtype: data type (float)
+ Default data type for internal matrices.
+ Set to np.float32 for lower memory consumption.
+
+ solver: string (default='lu')
+ Type of linear solver to use for computing the flow matrix.
+ Options are "full" (uses most memory), "lu" (recommended), and
+ "cg" (uses least memory).
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of edge tuples with betweenness centrality as the value.
+
+ See Also
+ --------
+ betweenness_centrality
+ edge_betweenness_centrality
+ current_flow_betweenness_centrality
+
+ Notes
+ -----
+ Current-flow betweenness can be computed in `O(I(n-1)+mn \log n)`
+ time [1]_, where `I(n-1)` is the time needed to compute the
+ inverse Laplacian. For a full matrix this is `O(n^3)` but using
+ sparse methods you can achieve `O(nm{\sqrt k})` where `k` is the
+ Laplacian matrix condition number.
+
+ The space required is `O(nw) where `w` is the width of the sparse
+ Laplacian matrix. Worse case is `w=n` for `O(n^2)`.
+
+ If the edges have a 'weight' attribute they will be used as
+ weights in this algorithm. Unspecified weights are set to 1.
+
+ References
+ ----------
+ .. [1] Centrality Measures Based on Current Flow.
+ Ulrik Brandes and Daniel Fleischer,
+ Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
+ LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
+ http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
+
+ .. [2] A measure of betweenness centrality based on random walks,
+ M. E. J. Newman, Social Networks 27, 39-54 (2005).
+ """
+ from networkx.utils import reverse_cuthill_mckee_ordering
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError('current_flow_betweenness_centrality requires NumPy ',
+ 'http://scipy.org/')
+ try:
+ import scipy
+ except ImportError:
+ raise ImportError('current_flow_betweenness_centrality requires SciPy ',
+ 'http://scipy.org/')
+ if G.is_directed():
+ raise nx.NetworkXError('edge_current_flow_betweenness_centrality ',
+ 'not defined for digraphs.')
+ if not nx.is_connected(G):
+ raise nx.NetworkXError("Graph not connected.")
+ n = G.number_of_nodes()
+ ordering = list(reverse_cuthill_mckee_ordering(G))
+ # make a copy with integer labels according to rcm ordering
+ # this could be done without a copy if we really wanted to
+ H = nx.relabel_nodes(G,dict(zip(ordering,range(n))))
+ betweenness=(dict.fromkeys(H.edges(),0.0))
+ if normalized:
+ nb=(n-1.0)*(n-2.0) # normalization factor
+ else:
+ nb=2.0
+ for row,(e) in flow_matrix_row(H, weight=weight, dtype=dtype,
+ solver=solver):
+ pos=dict(zip(row.argsort()[::-1],range(1,n+1)))
+ for i in range(n):
+ betweenness[e]+=(i+1-pos[i])*row[i]
+ betweenness[e]+=(n-i-pos[i])*row[i]
+ betweenness[e]/=nb
+ return dict(((ordering[s],ordering[t]),float(v))
+ for (s,t),v in betweenness.items())
+
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import numpy
+ import scipy
+ except:
+ raise SkipTest("NumPy not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/current_flow_betweenness_subset.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/current_flow_betweenness_subset.py
new file mode 100644
index 0000000..7902f30
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/current_flow_betweenness_subset.py
@@ -0,0 +1,263 @@
+"""
+Current-flow betweenness centrality measures for subsets of nodes.
+"""
+# Copyright (C) 2010-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
+
+__all__ = ['current_flow_betweenness_centrality_subset',
+ 'edge_current_flow_betweenness_centrality_subset']
+
+import itertools
+import networkx as nx
+from networkx.algorithms.centrality.flow_matrix import *
+
+
+def current_flow_betweenness_centrality_subset(G,sources,targets,
+ normalized=True,
+ weight='weight',
+ dtype=float, solver='lu'):
+ r"""Compute current-flow betweenness centrality for subsets of nodes.
+
+ Current-flow betweenness centrality uses an electrical current
+ model for information spreading in contrast to betweenness
+ centrality which uses shortest paths.
+
+ Current-flow betweenness centrality is also known as
+ random-walk betweenness centrality [2]_.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ sources: list of nodes
+ Nodes to use as sources for current
+
+ targets: list of nodes
+ Nodes to use as sinks for current
+
+ normalized : bool, optional (default=True)
+ If True the betweenness values are normalized by b=b/(n-1)(n-2) where
+ n is the number of nodes in G.
+
+ weight : string or None, optional (default='weight')
+ Key for edge data used as the edge weight.
+ If None, then use 1 as each edge weight.
+
+ dtype: data type (float)
+ Default data type for internal matrices.
+ Set to np.float32 for lower memory consumption.
+
+ solver: string (default='lu')
+ Type of linear solver to use for computing the flow matrix.
+ Options are "full" (uses most memory), "lu" (recommended), and
+ "cg" (uses least memory).
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with betweenness centrality as the value.
+
+ See Also
+ --------
+ approximate_current_flow_betweenness_centrality
+ betweenness_centrality
+ edge_betweenness_centrality
+ edge_current_flow_betweenness_centrality
+
+ Notes
+ -----
+ Current-flow betweenness can be computed in `O(I(n-1)+mn \log n)`
+ time [1]_, where `I(n-1)` is the time needed to compute the
+ inverse Laplacian. For a full matrix this is `O(n^3)` but using
+ sparse methods you can achieve `O(nm{\sqrt k})` where `k` is the
+ Laplacian matrix condition number.
+
+ The space required is `O(nw) where `w` is the width of the sparse
+ Laplacian matrix. Worse case is `w=n` for `O(n^2)`.
+
+ If the edges have a 'weight' attribute they will be used as
+ weights in this algorithm. Unspecified weights are set to 1.
+
+ References
+ ----------
+ .. [1] Centrality Measures Based on Current Flow.
+ Ulrik Brandes and Daniel Fleischer,
+ Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
+ LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
+ http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
+
+ .. [2] A measure of betweenness centrality based on random walks,
+ M. E. J. Newman, Social Networks 27, 39-54 (2005).
+ """
+ from networkx.utils import reverse_cuthill_mckee_ordering
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError('current_flow_betweenness_centrality requires NumPy ',
+ 'http://scipy.org/')
+ try:
+ import scipy
+ except ImportError:
+ raise ImportError('current_flow_betweenness_centrality requires SciPy ',
+ 'http://scipy.org/')
+ if G.is_directed():
+ raise nx.NetworkXError('current_flow_betweenness_centrality() ',
+ 'not defined for digraphs.')
+ if not nx.is_connected(G):
+ raise nx.NetworkXError("Graph not connected.")
+ n = G.number_of_nodes()
+ ordering = list(reverse_cuthill_mckee_ordering(G))
+ # make a copy with integer labels according to rcm ordering
+ # this could be done without a copy if we really wanted to
+ mapping=dict(zip(ordering,range(n)))
+ H = nx.relabel_nodes(G,mapping)
+ betweenness = dict.fromkeys(H,0.0) # b[v]=0 for v in H
+ for row,(s,t) in flow_matrix_row(H, weight=weight, dtype=dtype,
+ solver=solver):
+ for ss in sources:
+ i=mapping[ss]
+ for tt in targets:
+ j=mapping[tt]
+ betweenness[s]+=0.5*np.abs(row[i]-row[j])
+ betweenness[t]+=0.5*np.abs(row[i]-row[j])
+ if normalized:
+ nb=(n-1.0)*(n-2.0) # normalization factor
+ else:
+ nb=2.0
+ for v in H:
+ betweenness[v]=betweenness[v]/nb+1.0/(2-n)
+ return dict((ordering[k],v) for k,v in betweenness.items())
+
+
+def edge_current_flow_betweenness_centrality_subset(G, sources, targets,
+ normalized=True,
+ weight='weight',
+ dtype=float, solver='lu'):
+ """Compute current-flow betweenness centrality for edges using subsets
+ of nodes.
+
+ Current-flow betweenness centrality uses an electrical current
+ model for information spreading in contrast to betweenness
+ centrality which uses shortest paths.
+
+ Current-flow betweenness centrality is also known as
+ random-walk betweenness centrality [2]_.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ sources: list of nodes
+ Nodes to use as sources for current
+
+ targets: list of nodes
+ Nodes to use as sinks for current
+
+ normalized : bool, optional (default=True)
+ If True the betweenness values are normalized by b=b/(n-1)(n-2) where
+ n is the number of nodes in G.
+
+ weight : string or None, optional (default='weight')
+ Key for edge data used as the edge weight.
+ If None, then use 1 as each edge weight.
+
+ dtype: data type (float)
+ Default data type for internal matrices.
+ Set to np.float32 for lower memory consumption.
+
+ solver: string (default='lu')
+ Type of linear solver to use for computing the flow matrix.
+ Options are "full" (uses most memory), "lu" (recommended), and
+ "cg" (uses least memory).
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of edge tuples with betweenness centrality as the value.
+
+ See Also
+ --------
+ betweenness_centrality
+ edge_betweenness_centrality
+ current_flow_betweenness_centrality
+
+ Notes
+ -----
+ Current-flow betweenness can be computed in `O(I(n-1)+mn \log n)`
+ time [1]_, where `I(n-1)` is the time needed to compute the
+ inverse Laplacian. For a full matrix this is `O(n^3)` but using
+ sparse methods you can achieve `O(nm{\sqrt k})` where `k` is the
+ Laplacian matrix condition number.
+
+ The space required is `O(nw) where `w` is the width of the sparse
+ Laplacian matrix. Worse case is `w=n` for `O(n^2)`.
+
+ If the edges have a 'weight' attribute they will be used as
+ weights in this algorithm. Unspecified weights are set to 1.
+
+ References
+ ----------
+ .. [1] Centrality Measures Based on Current Flow.
+ Ulrik Brandes and Daniel Fleischer,
+ Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
+ LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
+ http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
+
+ .. [2] A measure of betweenness centrality based on random walks,
+ M. E. J. Newman, Social Networks 27, 39-54 (2005).
+ """
+ from networkx.utils import reverse_cuthill_mckee_ordering
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError('current_flow_betweenness_centrality requires NumPy ',
+ 'http://scipy.org/')
+ try:
+ import scipy
+ except ImportError:
+ raise ImportError('current_flow_betweenness_centrality requires SciPy ',
+ 'http://scipy.org/')
+ if G.is_directed():
+ raise nx.NetworkXError('edge_current_flow_betweenness_centrality ',
+ 'not defined for digraphs.')
+ if not nx.is_connected(G):
+ raise nx.NetworkXError("Graph not connected.")
+ n = G.number_of_nodes()
+ ordering = list(reverse_cuthill_mckee_ordering(G))
+ # make a copy with integer labels according to rcm ordering
+ # this could be done without a copy if we really wanted to
+ mapping=dict(zip(ordering,range(n)))
+ H = nx.relabel_nodes(G,mapping)
+ betweenness=(dict.fromkeys(H.edges(),0.0))
+ if normalized:
+ nb=(n-1.0)*(n-2.0) # normalization factor
+ else:
+ nb=2.0
+ for row,(e) in flow_matrix_row(H, weight=weight, dtype=dtype,
+ solver=solver):
+ for ss in sources:
+ i=mapping[ss]
+ for tt in targets:
+ j=mapping[tt]
+ betweenness[e]+=0.5*np.abs(row[i]-row[j])
+ betweenness[e]/=nb
+ return dict(((ordering[s],ordering[t]),v)
+ for (s,t),v in betweenness.items())
+
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import numpy
+ import scipy
+ except:
+ raise SkipTest("NumPy not available")
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/current_flow_closeness.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/current_flow_closeness.py
new file mode 100644
index 0000000..1a484b2
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/current_flow_closeness.py
@@ -0,0 +1,127 @@
+"""
+Current-flow closeness centrality measures.
+
+"""
+# Copyright (C) 2010 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+__author__ = """Aric Hagberg <aric.hagberg@gmail.com>"""
+
+__all__ = ['current_flow_closeness_centrality','information_centrality']
+
+import networkx as nx
+from networkx.algorithms.centrality.flow_matrix import *
+
+
+def current_flow_closeness_centrality(G, normalized=True, weight='weight',
+ dtype=float, solver='lu'):
+ """Compute current-flow closeness centrality for nodes.
+
+ A variant of closeness centrality based on effective
+ resistance between nodes in a network. This metric
+ is also known as information centrality.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ normalized : bool, optional
+ If True the values are normalized by 1/(n-1) where n is the
+ number of nodes in G.
+
+ dtype: data type (float)
+ Default data type for internal matrices.
+ Set to np.float32 for lower memory consumption.
+
+ solver: string (default='lu')
+ Type of linear solver to use for computing the flow matrix.
+ Options are "full" (uses most memory), "lu" (recommended), and
+ "cg" (uses least memory).
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with current flow closeness centrality as the value.
+
+ See Also
+ --------
+ closeness_centrality
+
+ Notes
+ -----
+ The algorithm is from Brandes [1]_.
+
+ See also [2]_ for the original definition of information centrality.
+
+ References
+ ----------
+ .. [1] Ulrik Brandes and Daniel Fleischer,
+ Centrality Measures Based on Current Flow.
+ Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
+ LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
+ http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
+
+ .. [2] Stephenson, K. and Zelen, M.
+ Rethinking centrality: Methods and examples.
+ Social Networks. Volume 11, Issue 1, March 1989, pp. 1-37
+ http://dx.doi.org/10.1016/0378-8733(89)90016-6
+ """
+ from networkx.utils import reverse_cuthill_mckee_ordering
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError('current_flow_closeness_centrality requires NumPy ',
+ 'http://scipy.org/')
+ try:
+ import scipy
+ except ImportError:
+ raise ImportError('current_flow_closeness_centrality requires SciPy ',
+ 'http://scipy.org/')
+ if G.is_directed():
+ raise nx.NetworkXError('current_flow_closeness_centrality ',
+ 'not defined for digraphs.')
+ if G.is_directed():
+ raise nx.NetworkXError(\
+ "current_flow_closeness_centrality() not defined for digraphs.")
+ if not nx.is_connected(G):
+ raise nx.NetworkXError("Graph not connected.")
+ solvername={"full" :FullInverseLaplacian,
+ "lu": SuperLUInverseLaplacian,
+ "cg": CGInverseLaplacian}
+ n = G.number_of_nodes()
+ ordering = list(reverse_cuthill_mckee_ordering(G))
+ # make a copy with integer labels according to rcm ordering
+ # this could be done without a copy if we really wanted to
+ H = nx.relabel_nodes(G,dict(zip(ordering,range(n))))
+ betweenness = dict.fromkeys(H,0.0) # b[v]=0 for v in H
+ n = G.number_of_nodes()
+ L = laplacian_sparse_matrix(H, nodelist=range(n), weight=weight,
+ dtype=dtype, format='csc')
+ C2 = solvername[solver](L, width=1, dtype=dtype) # initialize solver
+ for v in H:
+ col=C2.get_row(v)
+ for w in H:
+ betweenness[v]+=col[v]-2*col[w]
+ betweenness[w]+=col[v]
+
+ if normalized:
+ nb=len(betweenness)-1.0
+ else:
+ nb=1.0
+ for v in H:
+ betweenness[v]=nb/(betweenness[v])
+ return dict((ordering[k],float(v)) for k,v in betweenness.items())
+
+information_centrality=current_flow_closeness_centrality
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import numpy
+ except:
+ raise SkipTest("NumPy not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/degree_alg.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/degree_alg.py
new file mode 100644
index 0000000..c65d967
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/degree_alg.py
@@ -0,0 +1,131 @@
+"""
+Degree centrality measures.
+
+"""
+# Copyright (C) 2004-2010 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Sasha Gutfraind (ag362@cornell.edu)'])
+
+__all__ = ['degree_centrality',
+ 'in_degree_centrality',
+ 'out_degree_centrality']
+
+import networkx as nx
+
+def degree_centrality(G):
+ """Compute the degree centrality for nodes.
+
+ The degree centrality for a node v is the fraction of nodes it
+ is connected to.
+
+ Parameters
+ ----------
+ G : graph
+ A networkx graph
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with degree centrality as the value.
+
+ See Also
+ --------
+ betweenness_centrality, load_centrality, eigenvector_centrality
+
+ Notes
+ -----
+ The degree centrality values are normalized by dividing by the maximum
+ possible degree in a simple graph n-1 where n is the number of nodes in G.
+
+ For multigraphs or graphs with self loops the maximum degree might
+ be higher than n-1 and values of degree centrality greater than 1
+ are possible.
+ """
+ centrality={}
+ s=1.0/(len(G)-1.0)
+ centrality=dict((n,d*s) for n,d in G.degree_iter())
+ return centrality
+
+def in_degree_centrality(G):
+ """Compute the in-degree centrality for nodes.
+
+ The in-degree centrality for a node v is the fraction of nodes its
+ incoming edges are connected to.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with in-degree centrality as values.
+
+ See Also
+ --------
+ degree_centrality, out_degree_centrality
+
+ Notes
+ -----
+ The degree centrality values are normalized by dividing by the maximum
+ possible degree in a simple graph n-1 where n is the number of nodes in G.
+
+ For multigraphs or graphs with self loops the maximum degree might
+ be higher than n-1 and values of degree centrality greater than 1
+ are possible.
+ """
+ if not G.is_directed():
+ raise nx.NetworkXError(\
+ "in_degree_centrality() not defined for undirected graphs.")
+ centrality={}
+ s=1.0/(len(G)-1.0)
+ centrality=dict((n,d*s) for n,d in G.in_degree_iter())
+ return centrality
+
+
+def out_degree_centrality(G):
+ """Compute the out-degree centrality for nodes.
+
+ The out-degree centrality for a node v is the fraction of nodes its
+ outgoing edges are connected to.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with out-degree centrality as values.
+
+ See Also
+ --------
+ degree_centrality, in_degree_centrality
+
+ Notes
+ -----
+ The degree centrality values are normalized by dividing by the maximum
+ possible degree in a simple graph n-1 where n is the number of nodes in G.
+
+ For multigraphs or graphs with self loops the maximum degree might
+ be higher than n-1 and values of degree centrality greater than 1
+ are possible.
+ """
+ if not G.is_directed():
+ raise nx.NetworkXError(\
+ "out_degree_centrality() not defined for undirected graphs.")
+ centrality={}
+ s=1.0/(len(G)-1.0)
+ centrality=dict((n,d*s) for n,d in G.out_degree_iter())
+ return centrality
+
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/eigenvector.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/eigenvector.py
new file mode 100644
index 0000000..28e0013
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/eigenvector.py
@@ -0,0 +1,169 @@
+"""
+Eigenvector centrality.
+"""
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Sasha Gutfraind (ag362@cornell.edu)'])
+__all__ = ['eigenvector_centrality',
+ 'eigenvector_centrality_numpy']
+
+def eigenvector_centrality(G,max_iter=100,tol=1.0e-6,nstart=None):
+ """Compute the eigenvector centrality for the graph G.
+
+ Uses the power method to find the eigenvector for the
+ largest eigenvalue of the adjacency matrix of G.
+
+ Parameters
+ ----------
+ G : graph
+ A networkx graph
+
+ max_iter : interger, optional
+ Maximum number of iterations in power method.
+
+ tol : float, optional
+ Error tolerance used to check convergence in power method iteration.
+
+ nstart : dictionary, optional
+ Starting value of eigenvector iteration for each node.
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with eigenvector centrality as the value.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> centrality=nx.eigenvector_centrality(G)
+ >>> print(['%s %0.2f'%(node,centrality[node]) for node in centrality])
+ ['0 0.37', '1 0.60', '2 0.60', '3 0.37']
+
+ Notes
+ ------
+ The eigenvector calculation is done by the power iteration method
+ and has no guarantee of convergence. The iteration will stop
+ after max_iter iterations or an error tolerance of
+ number_of_nodes(G)*tol has been reached.
+
+ For directed graphs this is "right" eigevector centrality. For
+ "left" eigenvector centrality, first reverse the graph with
+ G.reverse().
+
+ See Also
+ --------
+ eigenvector_centrality_numpy
+ pagerank
+ hits
+ """
+ from math import sqrt
+ if type(G) == nx.MultiGraph or type(G) == nx.MultiDiGraph:
+ raise nx.NetworkXException("Not defined for multigraphs.")
+
+ if len(G)==0:
+ raise nx.NetworkXException("Empty graph.")
+
+ if nstart is None:
+ # choose starting vector with entries of 1/len(G)
+ x=dict([(n,1.0/len(G)) for n in G])
+ else:
+ x=nstart
+ # normalize starting vector
+ s=1.0/sum(x.values())
+ for k in x: x[k]*=s
+ nnodes=G.number_of_nodes()
+ # make up to max_iter iterations
+ for i in range(max_iter):
+ xlast=x
+ x=dict.fromkeys(xlast, 0)
+ # do the multiplication y=Ax
+ for n in x:
+ for nbr in G[n]:
+ x[n]+=xlast[nbr]*G[n][nbr].get('weight',1)
+ # normalize vector
+ try:
+ s=1.0/sqrt(sum(v**2 for v in x.values()))
+ # this should never be zero?
+ except ZeroDivisionError:
+ s=1.0
+ for n in x: x[n]*=s
+ # check convergence
+ err=sum([abs(x[n]-xlast[n]) for n in x])
+ if err < nnodes*tol:
+ return x
+
+ raise nx.NetworkXError("""eigenvector_centrality():
+power iteration failed to converge in %d iterations."%(i+1))""")
+
+
+def eigenvector_centrality_numpy(G):
+ """Compute the eigenvector centrality for the graph G.
+
+ Parameters
+ ----------
+ G : graph
+ A networkx graph
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with eigenvector centrality as the value.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> centrality=nx.eigenvector_centrality_numpy(G)
+ >>> print(['%s %0.2f'%(node,centrality[node]) for node in centrality])
+ ['0 0.37', '1 0.60', '2 0.60', '3 0.37']
+
+ Notes
+ ------
+ This algorithm uses the NumPy eigenvalue solver.
+
+ For directed graphs this is "right" eigevector centrality. For
+ "left" eigenvector centrality, first reverse the graph with
+ G.reverse().
+
+ See Also
+ --------
+ eigenvector_centrality
+ pagerank
+ hits
+ """
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError('Requires NumPy: http://scipy.org/')
+
+ if type(G) == nx.MultiGraph or type(G) == nx.MultiDiGraph:
+ raise nx.NetworkXException('Not defined for multigraphs.')
+
+ if len(G)==0:
+ raise nx.NetworkXException('Empty graph.')
+
+ A=nx.adj_matrix(G,nodelist=G.nodes())
+ eigenvalues,eigenvectors=np.linalg.eig(A)
+ # eigenvalue indices in reverse sorted order
+ ind=eigenvalues.argsort()[::-1]
+ # eigenvector of largest eigenvalue at ind[0], normalized
+ largest=np.array(eigenvectors[:,ind[0]]).flatten().real
+ norm=np.sign(largest.sum())*np.linalg.norm(largest)
+ centrality=dict(zip(G,map(float,largest/norm)))
+ return centrality
+
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import numpy
+ import numpy.linalg
+ except:
+ raise SkipTest("numpy not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/flow_matrix.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/flow_matrix.py
new file mode 100644
index 0000000..d886182
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/flow_matrix.py
@@ -0,0 +1,139 @@
+# Helpers for current-flow betweenness and current-flow closness
+# Lazy computations for inverse Laplacian and flow-matrix rows.
+import networkx as nx
+
+def flow_matrix_row(G, weight='weight', dtype=float, solver='lu'):
+ # Generate a row of the current-flow matrix
+ import numpy as np
+ from scipy import sparse
+ from scipy.sparse import linalg
+ solvername={"full" :FullInverseLaplacian,
+ "lu": SuperLUInverseLaplacian,
+ "cg": CGInverseLaplacian}
+ n = G.number_of_nodes()
+ L = laplacian_sparse_matrix(G, nodelist=range(n), weight=weight,
+ dtype=dtype, format='csc')
+ C = solvername[solver](L, dtype=dtype) # initialize solver
+ w = C.w # w is the Laplacian matrix width
+ # row-by-row flow matrix
+ for u,v,d in G.edges_iter(data=True):
+ B = np.zeros(w, dtype=dtype)
+ c = d.get(weight,1.0)
+ B[u%w] = c
+ B[v%w] = -c
+ # get only the rows needed in the inverse laplacian
+ # and multiply to get the flow matrix row
+ row = np.dot(B, C.get_rows(u,v))
+ yield row,(u,v)
+
+
+# Class to compute the inverse laplacian only for specified rows
+# Allows computation of the current-flow matrix without storing entire
+# inverse laplacian matrix
+class InverseLaplacian(object):
+ def __init__(self, L, width=None, dtype=None):
+ global np
+ import numpy as np
+ (n,n) = L.shape
+ self.dtype = dtype
+ self.n = n
+ if width is None:
+ self.w = self.width(L)
+ else:
+ self.w = width
+ self.C = np.zeros((self.w,n), dtype=dtype)
+ self.L1 = L[1:,1:]
+ self.init_solver(L)
+
+ def init_solver(self,L):
+ pass
+
+ def solve(self,r):
+ raise("Implement solver")
+
+ def solve_inverse(self,r):
+ raise("Implement solver")
+
+
+ def get_rows(self, r1, r2):
+ for r in range(r1, r2+1):
+ self.C[r%self.w, 1:] = self.solve_inverse(r)
+ return self.C
+
+ def get_row(self, r):
+ self.C[r%self.w, 1:] = self.solve_inverse(r)
+ return self.C[r%self.w]
+
+
+ def width(self,L):
+ m=0
+ for i,row in enumerate(L):
+ w=0
+ x,y = np.nonzero(row)
+ if len(y) > 0:
+ v = y-i
+ w=v.max()-v.min()+1
+ m = max(w,m)
+ return m
+
+class FullInverseLaplacian(InverseLaplacian):
+ def init_solver(self,L):
+ self.IL = np.zeros(L.shape, dtype=self.dtype)
+ self.IL[1:,1:] = np.linalg.inv(self.L1.todense())
+
+ def solve(self,rhs):
+ s = np.zeros(rhs.shape, dtype=self.dtype)
+ s = np.dot(self.IL,rhs)
+ return s
+
+ def solve_inverse(self,r):
+ return self.IL[r,1:]
+
+
+class SuperLUInverseLaplacian(InverseLaplacian):
+ def init_solver(self,L):
+ from scipy.sparse import linalg
+ self.lusolve = linalg.factorized(self.L1.tocsc())
+
+ def solve_inverse(self,r):
+ rhs = np.zeros(self.n, dtype=self.dtype)
+ rhs[r]=1
+ return self.lusolve(rhs[1:])
+
+ def solve(self,rhs):
+ s = np.zeros(rhs.shape, dtype=self.dtype)
+ s[1:]=self.lusolve(rhs[1:])
+ return s
+
+
+
+class CGInverseLaplacian(InverseLaplacian):
+ def init_solver(self,L):
+ global linalg
+ from scipy.sparse import linalg
+ ilu= linalg.spilu(self.L1.tocsc())
+ n=self.n-1
+ self.M = linalg.LinearOperator(shape=(n,n), matvec=ilu.solve)
+
+ def solve(self,rhs):
+ s = np.zeros(rhs.shape, dtype=self.dtype)
+ s[1:]=linalg.cg(self.L1, rhs[1:], M=self.M)[0]
+ return s
+
+ def solve_inverse(self,r):
+ rhs = np.zeros(self.n, self.dtype)
+ rhs[r] = 1
+ return linalg.cg(self.L1, rhs[1:], M=self.M)[0]
+
+
+# graph laplacian, sparse version, will move to linalg/laplacianmatrix.py
+def laplacian_sparse_matrix(G, nodelist=None, weight='weight', dtype=None,
+ format='csr'):
+ import numpy as np
+ import scipy.sparse
+ A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,
+ dtype=dtype, format=format)
+ (n,n) = A.shape
+ data = np.asarray(A.sum(axis=1).T)
+ D = scipy.sparse.spdiags(data,0,n,n, format=format)
+ return D - A
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/katz.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/katz.py
new file mode 100644
index 0000000..4babe25
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/katz.py
@@ -0,0 +1,296 @@
+"""
+Katz centrality.
+"""
+# Copyright (C) 2004-2013 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+from networkx.utils import *
+__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Sasha Gutfraind (ag362@cornell.edu)',
+ 'Vincent Gauthier (vgauthier@luxbulb.org)'])
+
+__all__ = ['katz_centrality',
+ 'katz_centrality_numpy']
+
+@not_implemented_for('multigraph')
+def katz_centrality(G, alpha=0.1, beta=1.0,
+ max_iter=1000, tol=1.0e-6, nstart=None, normalized=True):
+ r"""Compute the Katz centrality for the nodes of the graph G.
+
+
+ Katz centrality is related to eigenvalue centrality and PageRank.
+ The Katz centrality for node `i` is
+
+ .. math::
+
+ x_i = \alpha \sum_{j} A_{ij} x_j + \beta,
+
+ where `A` is the adjacency matrix of the graph G with eigenvalues `\lambda`.
+
+ The parameter `\beta` controls the initial centrality and
+
+ .. math::
+
+ \alpha < \frac{1}{\lambda_{max}}.
+
+
+ Katz centrality computes the relative influence of a node within a
+ network by measuring the number of the immediate neighbors (first
+ degree nodes) and also all other nodes in the network that connect
+ to the node under consideration through these immediate neighbors.
+
+ Extra weight can be provided to immediate neighbors through the
+ parameter :math:`\beta`. Connections made with distant neighbors
+ are, however, penalized by an attenuation factor `\alpha` which
+ should be strictly less than the inverse largest eigenvalue of the
+ adjacency matrix in order for the Katz centrality to be computed
+ correctly. More information is provided in [1]_ .
+
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ alpha : float
+ Attenuation factor
+
+ beta : scalar or dictionary, optional (default=1.0)
+ Weight attributed to the immediate neighborhood. If not a scalar the
+ dictionary must have an value for every node.
+
+ max_iter : integer, optional (default=1000)
+ Maximum number of iterations in power method.
+
+ tol : float, optional (default=1.0e-6)
+ Error tolerance used to check convergence in power method iteration.
+
+ nstart : dictionary, optional
+ Starting value of Katz iteration for each node.
+
+ normalized : bool, optional (default=True)
+ If True normalize the resulting values.
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with Katz centrality as the value.
+
+ Examples
+ --------
+ >>> import math
+ >>> G = nx.path_graph(4)
+ >>> phi = (1+math.sqrt(5))/2.0 # largest eigenvalue of adj matrix
+ >>> centrality = nx.katz_centrality(G,1/phi-0.01)
+ >>> for n,c in sorted(centrality.items()):
+ ... print("%d %0.2f"%(n,c))
+ 0 0.37
+ 1 0.60
+ 2 0.60
+ 3 0.37
+
+ Notes
+ -----
+ This algorithm it uses the power method to find the eigenvector
+ corresponding to the largest eigenvalue of the adjacency matrix of G.
+ The constant alpha should be strictly less than the inverse of largest
+ eigenvalue of the adjacency matrix for the algorithm to converge.
+ The iteration will stop after max_iter iterations or an error tolerance of
+ number_of_nodes(G)*tol has been reached.
+
+ When `\alpha = 1/\lambda_{max}` and `\beta=1` Katz centrality is the same as
+ eigenvector centrality.
+
+ References
+ ----------
+ .. [1] M. Newman, Networks: An Introduction. Oxford University Press,
+ USA, 2010, p. 720.
+
+ See Also
+ --------
+ katz_centrality_numpy
+ eigenvector_centrality
+ eigenvector_centrality_numpy
+ pagerank
+ hits
+ """
+ from math import sqrt
+
+ if len(G)==0:
+ return {}
+
+ nnodes=G.number_of_nodes()
+
+ if nstart is None:
+ # choose starting vector with entries of 0
+ x=dict([(n,0) for n in G])
+ else:
+ x=nstart
+
+ try:
+ b = dict.fromkeys(G,float(beta))
+ except (TypeError,ValueError):
+ b = beta
+ if set(beta) != set(G):
+ raise nx.NetworkXError('beta dictionary '
+ 'must have a value for every node')
+
+ # make up to max_iter iterations
+ for i in range(max_iter):
+ xlast=x
+ x=dict.fromkeys(xlast, 0)
+ # do the multiplication y = Alpha * Ax - Beta
+ for n in x:
+ for nbr in G[n]:
+ x[n] += xlast[nbr] * G[n][nbr].get('weight',1)
+ x[n] = alpha*x[n] + b[n]
+
+ # check convergence
+ err=sum([abs(x[n]-xlast[n]) for n in x])
+ if err < nnodes*tol:
+ if normalized:
+ # normalize vector
+ try:
+ s=1.0/sqrt(sum(v**2 for v in x.values()))
+ # this should never be zero?
+ except ZeroDivisionError:
+ s=1.0
+ else:
+ s = 1
+ for n in x:
+ x[n]*=s
+ return x
+
+ raise nx.NetworkXError('Power iteration failed to converge in ',
+ '%d iterations."%(i+1))')
+
+@not_implemented_for('multigraph')
+def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True):
+ r"""Compute the Katz centrality for the graph G.
+
+
+ Katz centrality is related to eigenvalue centrality and PageRank.
+ The Katz centrality for node `i` is
+
+ .. math::
+
+ x_i = \alpha \sum_{j} A_{ij} x_j + \beta,
+
+ where `A` is the adjacency matrix of the graph G with eigenvalues `\lambda`.
+
+ The parameter `\beta` controls the initial centrality and
+
+ .. math::
+
+ \alpha < \frac{1}{\lambda_{max}}.
+
+
+ Katz centrality computes the relative influence of a node within a
+ network by measuring the number of the immediate neighbors (first
+ degree nodes) and also all other nodes in the network that connect
+ to the node under consideration through these immediate neighbors.
+
+ Extra weight can be provided to immediate neighbors through the
+ parameter :math:`\beta`. Connections made with distant neighbors
+ are, however, penalized by an attenuation factor `\alpha` which
+ should be strictly less than the inverse largest eigenvalue of the
+ adjacency matrix in order for the Katz centrality to be computed
+ correctly. More information is provided in [1]_ .
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ alpha : float
+ Attenuation factor
+
+ beta : scalar or dictionary, optional (default=1.0)
+ Weight attributed to the immediate neighborhood. If not a scalar the
+ dictionary must have an value for every node.
+
+ normalized : bool
+ If True normalize the resulting values.
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with Katz centrality as the value.
+
+ Examples
+ --------
+ >>> import math
+ >>> G = nx.path_graph(4)
+ >>> phi = (1+math.sqrt(5))/2.0 # largest eigenvalue of adj matrix
+ >>> centrality = nx.katz_centrality_numpy(G,1/phi)
+ >>> for n,c in sorted(centrality.items()):
+ ... print("%d %0.2f"%(n,c))
+ 0 0.37
+ 1 0.60
+ 2 0.60
+ 3 0.37
+
+ Notes
+ ------
+ This algorithm uses a direct linear solver to solve the above equation.
+ The constant alpha should be strictly less than the inverse of largest
+ eigenvalue of the adjacency matrix for there to be a solution. When
+ `\alpha = 1/\lambda_{max}` and `\beta=1` Katz centrality is the same as
+ eigenvector centrality.
+
+ References
+ ----------
+ .. [1] M. Newman, Networks: An Introduction. Oxford University Press,
+ USA, 2010, p. 720.
+
+ See Also
+ --------
+ katz_centrality
+ eigenvector_centrality_numpy
+ eigenvector_centrality
+ pagerank
+ hits
+ """
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError('Requires NumPy: http://scipy.org/')
+ if len(G)==0:
+ return {}
+ try:
+ nodelist = beta.keys()
+ if set(nodelist) != set(G):
+ raise nx.NetworkXError('beta dictionary '
+ 'must have a value for every node')
+ b = np.array(list(beta.values()),dtype=float)
+ except AttributeError:
+ nodelist = G.nodes()
+ try:
+ b = np.ones((len(nodelist),1))*float(beta)
+ except (TypeError,ValueError):
+ raise nx.NetworkXError('beta must be a number')
+
+ A=nx.adj_matrix(G, nodelist=nodelist)
+ n = np.array(A).shape[0]
+ centrality = np.linalg.solve( np.eye(n,n) - (alpha * A) , b)
+ if normalized:
+ norm = np.sign(sum(centrality)) * np.linalg.norm(centrality)
+ else:
+ norm = 1.0
+ centrality=dict(zip(nodelist, map(float,centrality/norm)))
+ return centrality
+
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import numpy
+ import numpy.linalg
+ except:
+ raise SkipTest("numpy not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/load.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/load.py
new file mode 100644
index 0000000..2d279e8
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/load.py
@@ -0,0 +1,190 @@
+"""
+Load centrality.
+
+"""
+# Copyright (C) 2004-2010 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Sasha Gutfraind (ag362@cornell.edu)'])
+
+__all__ = ['load_centrality',
+ 'edge_load']
+
+import networkx as nx
+
+def newman_betweenness_centrality(G,v=None,cutoff=None,
+ normalized=True,
+ weight=None):
+ """Compute load centrality for nodes.
+
+ The load centrality of a node is the fraction of all shortest
+ paths that pass through that node.
+
+ Parameters
+ ----------
+ G : graph
+ A networkx graph
+
+ normalized : bool, optional
+ If True the betweenness values are normalized by b=b/(n-1)(n-2) where
+ n is the number of nodes in G.
+
+ weight : None or string, optional
+ If None, edge weights are ignored.
+ Otherwise holds the name of the edge attribute used as weight.
+
+ cutoff : bool, optional
+ If specified, only consider paths of length <= cutoff.
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with centrality as the value.
+
+
+ See Also
+ --------
+ betweenness_centrality()
+
+ Notes
+ -----
+ Load centrality is slightly different than betweenness.
+ For this load algorithm see the reference
+ Scientific collaboration networks: II.
+ Shortest paths, weighted networks, and centrality,
+ M. E. J. Newman, Phys. Rev. E 64, 016132 (2001).
+
+ """
+ if v is not None: # only one node
+ betweenness=0.0
+ for source in G:
+ ubetween = _node_betweenness(G, source, cutoff, False, weight)
+ betweenness += ubetween[v] if v in ubetween else 0
+ if normalized:
+ order = G.order()
+ if order <= 2:
+ return betweenness # no normalization b=0 for all nodes
+ betweenness *= 1.0 / ((order-1) * (order-2))
+ return betweenness
+ else:
+ betweenness = {}.fromkeys(G,0.0)
+ for source in betweenness:
+ ubetween = _node_betweenness(G, source, cutoff, False, weight)
+ for vk in ubetween:
+ betweenness[vk] += ubetween[vk]
+ if normalized:
+ order = G.order()
+ if order <= 2:
+ return betweenness # no normalization b=0 for all nodes
+ scale = 1.0 / ((order-1) * (order-2))
+ for v in betweenness:
+ betweenness[v] *= scale
+ return betweenness # all nodes
+
+def _node_betweenness(G,source,cutoff=False,normalized=True,weight=None):
+ """Node betweenness helper:
+ see betweenness_centrality for what you probably want.
+
+ This actually computes "load" and not betweenness.
+ See https://networkx.lanl.gov/ticket/103
+
+ This calculates the load of each node for paths from a single source.
+ (The fraction of number of shortests paths from source that go
+ through each node.)
+
+ To get the load for a node you need to do all-pairs shortest paths.
+
+ If weight is not None then use Dijkstra for finding shortest paths.
+ In this case a cutoff is not implemented and so is ignored.
+
+ """
+
+ # get the predecessor and path length data
+ if weight is None:
+ (pred,length)=nx.predecessor(G,source,cutoff=cutoff,return_seen=True)
+ else:
+ (pred,length)=nx.dijkstra_predecessor_and_distance(G,source,weight=weight)
+
+ # order the nodes by path length
+ onodes = [ (l,vert) for (vert,l) in length.items() ]
+ onodes.sort()
+ onodes[:] = [vert for (l,vert) in onodes if l>0]
+
+ # intialize betweenness
+ between={}.fromkeys(length,1.0)
+
+ while onodes:
+ v=onodes.pop()
+ if v in pred:
+ num_paths=len(pred[v]) # Discount betweenness if more than
+ for x in pred[v]: # one shortest path.
+ if x==source: # stop if hit source because all remaining v
+ break # also have pred[v]==[source]
+ between[x]+=between[v]/float(num_paths)
+ # remove source
+ for v in between:
+ between[v]-=1
+ # rescale to be between 0 and 1
+ if normalized:
+ l=len(between)
+ if l > 2:
+ scale=1.0/float((l-1)*(l-2)) # 1/the number of possible paths
+ for v in between:
+ between[v] *= scale
+ return between
+
+
+load_centrality=newman_betweenness_centrality
+
+
+def edge_load(G,nodes=None,cutoff=False):
+ """Compute edge load.
+
+ WARNING:
+
+ This module is for demonstration and testing purposes.
+
+ """
+ betweenness={}
+ if not nodes: # find betweenness for every node in graph
+ nodes=G.nodes() # that probably is what you want...
+ for source in nodes:
+ ubetween=_edge_betweenness(G,source,nodes,cutoff=cutoff)
+ for v in ubetween.keys():
+ b=betweenness.setdefault(v,0) # get or set default
+ betweenness[v]=ubetween[v]+b # cumulative total
+ return betweenness
+
+def _edge_betweenness(G,source,nodes,cutoff=False):
+ """
+ Edge betweenness helper.
+ """
+ between={}
+ # get the predecessor data
+ #(pred,length)=_fast_predecessor(G,source,cutoff=cutoff)
+ (pred,length)=nx.predecessor(G,source,cutoff=cutoff,return_seen=True)
+ # order the nodes by path length
+ onodes = [ nn for dd,nn in sorted( (dist,n) for n,dist in length.items() )]
+ # intialize betweenness, doesn't account for any edge weights
+ for u,v in G.edges(nodes):
+ between[(u,v)]=1.0
+ between[(v,u)]=1.0
+
+ while onodes: # work through all paths
+ v=onodes.pop()
+ if v in pred:
+ num_paths=len(pred[v]) # Discount betweenness if more than
+ for w in pred[v]: # one shortest path.
+ if w in pred:
+ num_paths=len(pred[w]) # Discount betweenness, mult path
+ for x in pred[w]:
+ between[(w,x)]+=between[(v,w)]/num_paths
+ between[(x,w)]+=between[(w,v)]/num_paths
+ return between
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_betweenness_centrality.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_betweenness_centrality.py
new file mode 100644
index 0000000..b0169a9
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_betweenness_centrality.py
@@ -0,0 +1,462 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+def weighted_G():
+ G=nx.Graph();
+ G.add_edge(0,1,weight=3)
+ G.add_edge(0,2,weight=2)
+ G.add_edge(0,3,weight=6)
+ G.add_edge(0,4,weight=4)
+ G.add_edge(1,3,weight=5)
+ G.add_edge(1,5,weight=5)
+ G.add_edge(2,4,weight=1)
+ G.add_edge(3,4,weight=2)
+ G.add_edge(3,5,weight=1)
+ G.add_edge(4,5,weight=4)
+
+ return G
+
+
+class TestBetweennessCentrality(object):
+
+ def test_K5(self):
+ """Betweenness centrality: K5"""
+ G=nx.complete_graph(5)
+ b=nx.betweenness_centrality(G,
+ weight=None,
+ normalized=False)
+ b_answer={0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_K5_endpoints(self):
+ """Betweenness centrality: K5 endpoints"""
+ G=nx.complete_graph(5)
+ b=nx.betweenness_centrality(G,
+ weight=None,
+ normalized=False,
+ endpoints=True)
+ b_answer={0: 4.0, 1: 4.0, 2: 4.0, 3: 4.0, 4: 4.0}
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+ def test_P3_normalized(self):
+ """Betweenness centrality: P3 normalized"""
+ G=nx.path_graph(3)
+ b=nx.betweenness_centrality(G,
+ weight=None,
+ normalized=True)
+ b_answer={0: 0.0, 1: 1.0, 2: 0.0}
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+ def test_P3(self):
+ """Betweenness centrality: P3"""
+ G=nx.path_graph(3)
+ b_answer={0: 0.0, 1: 1.0, 2: 0.0}
+ b=nx.betweenness_centrality(G,
+ weight=None,
+ normalized=False)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_P3_endpoints(self):
+ """Betweenness centrality: P3 endpoints"""
+ G=nx.path_graph(3)
+ b_answer={0: 2.0, 1: 3.0, 2: 2.0}
+ b=nx.betweenness_centrality(G,
+ weight=None,
+ normalized=False,
+ endpoints=True)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+ def test_krackhardt_kite_graph(self):
+ """Betweenness centrality: Krackhardt kite graph"""
+ G=nx.krackhardt_kite_graph()
+ b_answer={0: 1.667,1: 1.667,2: 0.000,3: 7.333,4: 0.000,
+ 5: 16.667,6: 16.667,7: 28.000,8: 16.000,9: 0.000}
+ for b in b_answer:
+ b_answer[b]/=2.0
+ b=nx.betweenness_centrality(G,
+ weight=None,
+ normalized=False)
+
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n],places=3)
+
+
+ def test_krackhardt_kite_graph_normalized(self):
+ """Betweenness centrality: Krackhardt kite graph normalized"""
+ G=nx.krackhardt_kite_graph()
+ b_answer={0:0.023,1:0.023,2:0.000,3:0.102,4:0.000,
+ 5:0.231,6:0.231,7:0.389,8:0.222,9:0.000}
+ b=nx.betweenness_centrality(G,
+ weight=None,
+ normalized=True)
+
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n],places=3)
+
+
+ def test_florentine_families_graph(self):
+ """Betweenness centrality: Florentine families graph"""
+ G=nx.florentine_families_graph()
+ b_answer=\
+ {'Acciaiuoli': 0.000,
+ 'Albizzi': 0.212,
+ 'Barbadori': 0.093,
+ 'Bischeri': 0.104,
+ 'Castellani': 0.055,
+ 'Ginori': 0.000,
+ 'Guadagni': 0.255,
+ 'Lamberteschi': 0.000,
+ 'Medici': 0.522,
+ 'Pazzi': 0.000,
+ 'Peruzzi': 0.022,
+ 'Ridolfi': 0.114,
+ 'Salviati': 0.143,
+ 'Strozzi': 0.103,
+ 'Tornabuoni': 0.092}
+
+ b=nx.betweenness_centrality(G,
+ weight=None,
+ normalized=True)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n],places=3)
+
+
+ def test_ladder_graph(self):
+ """Betweenness centrality: Ladder graph"""
+ G = nx.Graph() # ladder_graph(3)
+ G.add_edges_from([(0,1), (0,2), (1,3), (2,3),
+ (2,4), (4,5), (3,5)])
+ b_answer={0:1.667,1: 1.667,2: 6.667,
+ 3: 6.667,4: 1.667,5: 1.667}
+ for b in b_answer:
+ b_answer[b]/=2.0
+ b=nx.betweenness_centrality(G,
+ weight=None,
+ normalized=False)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n],places=3)
+
+ def test_disconnected_path(self):
+ """Betweenness centrality: disconnected path"""
+ G=nx.Graph()
+ G.add_path([0,1,2])
+ G.add_path([3,4,5,6])
+ b_answer={0:0,1:1,2:0,3:0,4:2,5:2,6:0}
+ b=nx.betweenness_centrality(G,
+ weight=None,
+ normalized=False)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_disconnected_path_endpoints(self):
+ """Betweenness centrality: disconnected path endpoints"""
+ G=nx.Graph()
+ G.add_path([0,1,2])
+ G.add_path([3,4,5,6])
+ b_answer={0:2,1:3,2:2,3:3,4:5,5:5,6:3}
+ b=nx.betweenness_centrality(G,
+ weight=None,
+ normalized=False,
+ endpoints=True)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+ def test_directed_path(self):
+ """Betweenness centrality: directed path"""
+ G=nx.DiGraph()
+ G.add_path([0,1,2])
+ b=nx.betweenness_centrality(G,
+ weight=None,
+ normalized=False)
+ b_answer={0: 0.0, 1: 1.0, 2: 0.0}
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_directed_path_normalized(self):
+ """Betweenness centrality: directed path normalized"""
+ G=nx.DiGraph()
+ G.add_path([0,1,2])
+ b=nx.betweenness_centrality(G,
+ weight=None,
+ normalized=True)
+ b_answer={0: 0.0, 1: 0.5, 2: 0.0}
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+
+class TestWeightedBetweennessCentrality(object):
+
+ def test_K5(self):
+ """Weighted betweenness centrality: K5"""
+ G=nx.complete_graph(5)
+ b=nx.betweenness_centrality(G,
+ weight='weight',
+ normalized=False)
+ b_answer={0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_P3_normalized(self):
+ """Weighted betweenness centrality: P3 normalized"""
+ G=nx.path_graph(3)
+ b=nx.betweenness_centrality(G,
+ weight='weight',
+ normalized=True)
+ b_answer={0: 0.0, 1: 1.0, 2: 0.0}
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+ def test_P3(self):
+ """Weighted betweenness centrality: P3"""
+ G=nx.path_graph(3)
+ b_answer={0: 0.0, 1: 1.0, 2: 0.0}
+ b=nx.betweenness_centrality(G,
+ weight='weight',
+ normalized=False)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_krackhardt_kite_graph(self):
+ """Weighted betweenness centrality: Krackhardt kite graph"""
+ G=nx.krackhardt_kite_graph()
+ b_answer={0: 1.667,1: 1.667,2: 0.000,3: 7.333,4: 0.000,
+ 5: 16.667,6: 16.667,7: 28.000,8: 16.000,9: 0.000}
+ for b in b_answer:
+ b_answer[b]/=2.0
+
+ b=nx.betweenness_centrality(G,
+ weight='weight',
+ normalized=False)
+
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n],places=3)
+
+
+ def test_krackhardt_kite_graph_normalized(self):
+ """Weighted betweenness centrality:
+ Krackhardt kite graph normalized
+ """
+ G=nx.krackhardt_kite_graph()
+ b_answer={0:0.023,1:0.023,2:0.000,3:0.102,4:0.000,
+ 5:0.231,6:0.231,7:0.389,8:0.222,9:0.000}
+ b=nx.betweenness_centrality(G,
+ weight='weight',
+ normalized=True)
+
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n],places=3)
+
+
+ def test_florentine_families_graph(self):
+ """Weighted betweenness centrality:
+ Florentine families graph"""
+ G=nx.florentine_families_graph()
+ b_answer=\
+ {'Acciaiuoli': 0.000,
+ 'Albizzi': 0.212,
+ 'Barbadori': 0.093,
+ 'Bischeri': 0.104,
+ 'Castellani': 0.055,
+ 'Ginori': 0.000,
+ 'Guadagni': 0.255,
+ 'Lamberteschi': 0.000,
+ 'Medici': 0.522,
+ 'Pazzi': 0.000,
+ 'Peruzzi': 0.022,
+ 'Ridolfi': 0.114,
+ 'Salviati': 0.143,
+ 'Strozzi': 0.103,
+ 'Tornabuoni': 0.092}
+
+ b=nx.betweenness_centrality(G,
+ weight='weight',
+ normalized=True)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n],places=3)
+
+
+ def test_ladder_graph(self):
+ """Weighted betweenness centrality: Ladder graph"""
+ G = nx.Graph() # ladder_graph(3)
+ G.add_edges_from([(0,1), (0,2), (1,3), (2,3),
+ (2,4), (4,5), (3,5)])
+ b_answer={0:1.667,1: 1.667,2: 6.667,
+ 3: 6.667,4: 1.667,5: 1.667}
+ for b in b_answer:
+ b_answer[b]/=2.0
+ b=nx.betweenness_centrality(G,
+ weight='weight',
+ normalized=False)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n],places=3)
+
+ def test_G(self):
+ """Weighted betweenness centrality: G"""
+ G = weighted_G()
+ b_answer={0: 2.0, 1: 0.0, 2: 4.0, 3: 3.0, 4: 4.0, 5: 0.0}
+ b=nx.betweenness_centrality(G,
+ weight='weight',
+ normalized=False)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_G2(self):
+ """Weighted betweenness centrality: G2"""
+ G=nx.DiGraph()
+ G.add_weighted_edges_from([('s','u',10) ,('s','x',5) ,
+ ('u','v',1) ,('u','x',2) ,
+ ('v','y',1) ,('x','u',3) ,
+ ('x','v',5) ,('x','y',2) ,
+ ('y','s',7) ,('y','v',6)])
+
+ b_answer={'y':5.0,'x':5.0,'s':4.0,'u':2.0,'v':2.0}
+
+ b=nx.betweenness_centrality(G,
+ weight='weight',
+ normalized=False)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+class TestEdgeBetweennessCentrality(object):
+
+ def test_K5(self):
+ """Edge betweenness centrality: K5"""
+ G=nx.complete_graph(5)
+ b=nx.edge_betweenness_centrality(G, weight=None, normalized=False)
+ b_answer=dict.fromkeys(G.edges(),1)
+ for n in sorted(G.edges()):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_normalized_K5(self):
+ """Edge betweenness centrality: K5"""
+ G=nx.complete_graph(5)
+ b=nx.edge_betweenness_centrality(G, weight=None, normalized=True)
+ b_answer=dict.fromkeys(G.edges(),1/10.0)
+ for n in sorted(G.edges()):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+ def test_C4(self):
+ """Edge betweenness centrality: C4"""
+ G=nx.cycle_graph(4)
+ b=nx.edge_betweenness_centrality(G, weight=None, normalized=True)
+ b_answer={(0, 1):2,(0, 3):2, (1, 2):2, (2, 3): 2}
+ for n in sorted(G.edges()):
+ assert_almost_equal(b[n],b_answer[n]/6.0)
+
+ def test_P4(self):
+ """Edge betweenness centrality: P4"""
+ G=nx.path_graph(4)
+ b=nx.edge_betweenness_centrality(G, weight=None, normalized=False)
+ b_answer={(0, 1):3,(1, 2):4, (2, 3):3}
+ for n in sorted(G.edges()):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_normalized_P4(self):
+ """Edge betweenness centrality: P4"""
+ G=nx.path_graph(4)
+ b=nx.edge_betweenness_centrality(G, weight=None, normalized=True)
+ b_answer={(0, 1):3,(1, 2):4, (2, 3):3}
+ for n in sorted(G.edges()):
+ assert_almost_equal(b[n],b_answer[n]/6.0)
+
+
+ def test_balanced_tree(self):
+ """Edge betweenness centrality: balanced tree"""
+ G=nx.balanced_tree(r=2,h=2)
+ b=nx.edge_betweenness_centrality(G, weight=None, normalized=False)
+ b_answer={(0, 1):12,(0, 2):12,
+ (1, 3):6,(1, 4):6,(2, 5):6,(2,6):6}
+ for n in sorted(G.edges()):
+ assert_almost_equal(b[n],b_answer[n])
+
+class TestWeightedEdgeBetweennessCentrality(object):
+
+ def test_K5(self):
+ """Edge betweenness centrality: K5"""
+ G=nx.complete_graph(5)
+ b=nx.edge_betweenness_centrality(G, weight='weight', normalized=False)
+ b_answer=dict.fromkeys(G.edges(),1)
+ for n in sorted(G.edges()):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_C4(self):
+ """Edge betweenness centrality: C4"""
+ G=nx.cycle_graph(4)
+ b=nx.edge_betweenness_centrality(G, weight='weight', normalized=False)
+ b_answer={(0, 1):2,(0, 3):2, (1, 2):2, (2, 3): 2}
+ for n in sorted(G.edges()):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_P4(self):
+ """Edge betweenness centrality: P4"""
+ G=nx.path_graph(4)
+ b=nx.edge_betweenness_centrality(G, weight='weight', normalized=False)
+ b_answer={(0, 1):3,(1, 2):4, (2, 3):3}
+ for n in sorted(G.edges()):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+ def test_balanced_tree(self):
+ """Edge betweenness centrality: balanced tree"""
+ G=nx.balanced_tree(r=2,h=2)
+ b=nx.edge_betweenness_centrality(G, weight='weight', normalized=False)
+ b_answer={(0, 1):12,(0, 2):12,
+ (1, 3):6,(1, 4):6,(2, 5):6,(2,6):6}
+ for n in sorted(G.edges()):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_weighted_graph(self):
+ eList = [(0, 1, 5), (0, 2, 4), (0, 3, 3),
+ (0, 4, 2), (1, 2, 4), (1, 3, 1),
+ (1, 4, 3), (2, 4, 5), (3, 4, 4)]
+ G = nx.Graph()
+ G.add_weighted_edges_from(eList)
+ b = nx.edge_betweenness_centrality(G, weight='weight', normalized=False)
+ b_answer={(0, 1):0.0,
+ (0, 2):1.0,
+ (0, 3):2.0,
+ (0, 4):1.0,
+ (1, 2):2.0,
+ (1, 3):3.5,
+ (1, 4):1.5,
+ (2, 4):1.0,
+ (3, 4):0.5}
+
+ for n in sorted(G.edges()):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_normalized_weighted_graph(self):
+ eList = [(0, 1, 5), (0, 2, 4), (0, 3, 3),
+ (0, 4, 2), (1, 2, 4), (1, 3, 1),
+ (1, 4, 3), (2, 4, 5), (3, 4, 4)]
+ G = nx.Graph()
+ G.add_weighted_edges_from(eList)
+ b = nx.edge_betweenness_centrality(G, weight='weight', normalized=True)
+ b_answer={(0, 1):0.0,
+ (0, 2):1.0,
+ (0, 3):2.0,
+ (0, 4):1.0,
+ (1, 2):2.0,
+ (1, 3):3.5,
+ (1, 4):1.5,
+ (2, 4):1.0,
+ (3, 4):0.5}
+
+ norm = len(G)*(len(G)-1)/2.0
+ for n in sorted(G.edges()):
+ assert_almost_equal(b[n],b_answer[n]/norm)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py
new file mode 100644
index 0000000..762b873
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py
@@ -0,0 +1,258 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx
+from networkx import betweenness_centrality_subset,\
+ edge_betweenness_centrality_subset
+
+class TestSubsetBetweennessCentrality:
+
+ def test_K5(self):
+ """Betweenness centrality: K5"""
+ G=networkx.complete_graph(5)
+ b=betweenness_centrality_subset(G,
+ sources=[0],
+ targets=[1,3],
+ weight=None)
+ b_answer={0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_P5_directed(self):
+ """Betweenness centrality: P5 directed"""
+ G=networkx.DiGraph()
+ G.add_path(list(range(5)))
+ b_answer={0:0,1:1,2:1,3:0,4:0,5:0}
+ b=betweenness_centrality_subset(G,
+ sources=[0],
+ targets=[3],
+ weight=None)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_P5(self):
+ """Betweenness centrality: P5"""
+ G=networkx.Graph()
+ G.add_path(list(range(5)))
+ b_answer={0:0,1:0.5,2:0.5,3:0,4:0,5:0}
+ b=betweenness_centrality_subset(G,
+ sources=[0],
+ targets=[3],
+ weight=None)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_P5_multiple_target(self):
+ """Betweenness centrality: P5 multiple target"""
+ G=networkx.Graph()
+ G.add_path(list(range(5)))
+ b_answer={0:0,1:1,2:1,3:0.5,4:0,5:0}
+ b=betweenness_centrality_subset(G,
+ sources=[0],
+ targets=[3,4],
+ weight=None)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_box(self):
+ """Betweenness centrality: box"""
+ G=networkx.Graph()
+ G.add_edge(0,1)
+ G.add_edge(0,2)
+ G.add_edge(1,3)
+ G.add_edge(2,3)
+ b_answer={0:0,1:0.25,2:0.25,3:0}
+ b=betweenness_centrality_subset(G,
+ sources=[0],
+ targets=[3],
+ weight=None)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_box_and_path(self):
+ """Betweenness centrality: box and path"""
+ G=networkx.Graph()
+ G.add_edge(0,1)
+ G.add_edge(0,2)
+ G.add_edge(1,3)
+ G.add_edge(2,3)
+ G.add_edge(3,4)
+ G.add_edge(4,5)
+ b_answer={0:0,1:0.5,2:0.5,3:0.5,4:0,5:0}
+ b=betweenness_centrality_subset(G,
+ sources=[0],
+ targets=[3,4],
+ weight=None)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+ def test_box_and_path2(self):
+ """Betweenness centrality: box and path multiple target"""
+ G=networkx.Graph()
+ G.add_edge(0,1)
+ G.add_edge(1,2)
+ G.add_edge(2,3)
+ G.add_edge(1,20)
+ G.add_edge(20,3)
+ G.add_edge(3,4)
+ b_answer={0:0,1:1.0,2:0.5,20:0.5,3:0.5,4:0}
+ b=betweenness_centrality_subset(G,
+ sources=[0],
+ targets=[3,4])
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+class TestBetweennessCentralitySources:
+ def test_K5(self):
+ """Betweenness centrality: K5"""
+ G=networkx.complete_graph(5)
+ b=networkx.betweenness_centrality_source(G,
+ weight=None,
+ normalized=False)
+ b_answer={0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_P3(self):
+ """Betweenness centrality: P3"""
+ G=networkx.path_graph(3)
+ b_answer={0: 0.0, 1: 1.0, 2: 0.0}
+ b=networkx.betweenness_centrality_source(G,
+ weight=None,
+ normalized=True)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+
+
+class TestEdgeSubsetBetweennessCentrality:
+
+ def test_K5(self):
+ """Edge betweenness centrality: K5"""
+ G=networkx.complete_graph(5)
+ b=edge_betweenness_centrality_subset(G,
+ sources=[0],
+ targets=[1,3],
+ weight=None)
+ b_answer=dict.fromkeys(G.edges(),0)
+ b_answer[(0,3)]=0.5
+ b_answer[(0,1)]=0.5
+ for n in sorted(G.edges()):
+ print(n,b[n])
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_P5_directed(self):
+ """Edge betweenness centrality: P5 directed"""
+ G=networkx.DiGraph()
+ G.add_path(list(range(5)))
+ b_answer=dict.fromkeys(G.edges(),0)
+ b_answer[(0,1)]=1
+ b_answer[(1,2)]=1
+ b_answer[(2,3)]=1
+ b=edge_betweenness_centrality_subset(G,
+ sources=[0],
+ targets=[3],
+ weight=None)
+ for n in sorted(G.edges()):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_P5(self):
+ """Edge betweenness centrality: P5"""
+ G=networkx.Graph()
+ G.add_path(list(range(5)))
+ b_answer=dict.fromkeys(G.edges(),0)
+ b_answer[(0,1)]=0.5
+ b_answer[(1,2)]=0.5
+ b_answer[(2,3)]=0.5
+ b=edge_betweenness_centrality_subset(G,
+ sources=[0],
+ targets=[3],
+ weight=None)
+ for n in sorted(G.edges()):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_P5_multiple_target(self):
+ """Edge betweenness centrality: P5 multiple target"""
+ G=networkx.Graph()
+ G.add_path(list(range(5)))
+ b_answer=dict.fromkeys(G.edges(),0)
+ b_answer[(0,1)]=1
+ b_answer[(1,2)]=1
+ b_answer[(2,3)]=1
+ b_answer[(3,4)]=0.5
+ b=edge_betweenness_centrality_subset(G,
+ sources=[0],
+ targets=[3,4],
+ weight=None)
+ for n in sorted(G.edges()):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_box(self):
+ """Edge etweenness centrality: box"""
+ G=networkx.Graph()
+ G.add_edge(0,1)
+ G.add_edge(0,2)
+ G.add_edge(1,3)
+ G.add_edge(2,3)
+ b_answer=dict.fromkeys(G.edges(),0)
+
+ b_answer[(0,1)]=0.25
+ b_answer[(0,2)]=0.25
+ b_answer[(1,3)]=0.25
+ b_answer[(2,3)]=0.25
+ b=edge_betweenness_centrality_subset(G,
+ sources=[0],
+ targets=[3],
+ weight=None)
+ for n in sorted(G.edges()):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_box_and_path(self):
+ """Edge etweenness centrality: box and path"""
+ G=networkx.Graph()
+ G.add_edge(0,1)
+ G.add_edge(0,2)
+ G.add_edge(1,3)
+ G.add_edge(2,3)
+ G.add_edge(3,4)
+ G.add_edge(4,5)
+ b_answer=dict.fromkeys(G.edges(),0)
+ b_answer[(0,1)]=1.0/2
+ b_answer[(0,2)]=1.0/2
+ b_answer[(1,3)]=1.0/2
+ b_answer[(2,3)]=1.0/2
+ b_answer[(3,4)]=1.0/2
+ b=edge_betweenness_centrality_subset(G,
+ sources=[0],
+ targets=[3,4],
+ weight=None)
+ for n in sorted(G.edges()):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+ def test_box_and_path2(self):
+ """Edge betweenness centrality: box and path multiple target"""
+ G=networkx.Graph()
+ G.add_edge(0,1)
+ G.add_edge(1,2)
+ G.add_edge(2,3)
+ G.add_edge(1,20)
+ G.add_edge(20,3)
+ G.add_edge(3,4)
+ b_answer=dict.fromkeys(G.edges(),0)
+ b_answer[(0,1)]=1.0
+ b_answer[(1,20)]=1.0/2
+ b_answer[(3,20)]=1.0/2
+ b_answer[(1,2)]=1.0/2
+ b_answer[(2,3)]=1.0/2
+ b_answer[(3,4)]=1.0/2
+ b=edge_betweenness_centrality_subset(G,
+ sources=[0],
+ targets=[3,4],
+ weight=None)
+ for n in sorted(G.edges()):
+ assert_almost_equal(b[n],b_answer[n])
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_closeness_centrality.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_closeness_centrality.py
new file mode 100644
index 0000000..71b0009
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_closeness_centrality.py
@@ -0,0 +1,93 @@
+"""
+Tests for degree centrality.
+"""
+from nose.tools import *
+import networkx as nx
+
+class TestClosenessCentrality:
+ def setUp(self):
+
+ self.K = nx.krackhardt_kite_graph()
+ self.P3 = nx.path_graph(3)
+ self.P4 = nx.path_graph(4)
+ self.K5 = nx.complete_graph(5)
+
+ self.C4=nx.cycle_graph(4)
+ self.T=nx.balanced_tree(r=2, h=2)
+ self.Gb = nx.Graph()
+ self.Gb.add_edges_from([(0,1), (0,2), (1,3), (2,3),
+ (2,4), (4,5), (3,5)])
+
+
+ F = nx.florentine_families_graph()
+ self.F = F
+
+
+ def test_k5_closeness(self):
+ c=nx.closeness_centrality(self.K5)
+ d={0: 1.000,
+ 1: 1.000,
+ 2: 1.000,
+ 3: 1.000,
+ 4: 1.000}
+ for n in sorted(self.K5):
+ assert_almost_equal(c[n],d[n],places=3)
+
+ def test_p3_closeness(self):
+ c=nx.closeness_centrality(self.P3)
+ d={0: 0.667,
+ 1: 1.000,
+ 2: 0.667}
+ for n in sorted(self.P3):
+ assert_almost_equal(c[n],d[n],places=3)
+
+ def test_krackhardt_closeness(self):
+ c=nx.closeness_centrality(self.K)
+ d={0: 0.529,
+ 1: 0.529,
+ 2: 0.500,
+ 3: 0.600,
+ 4: 0.500,
+ 5: 0.643,
+ 6: 0.643,
+ 7: 0.600,
+ 8: 0.429,
+ 9: 0.310}
+ for n in sorted(self.K):
+ assert_almost_equal(c[n],d[n],places=3)
+
+ def test_florentine_families_closeness(self):
+ c=nx.closeness_centrality(self.F)
+ d={'Acciaiuoli': 0.368,
+ 'Albizzi': 0.483,
+ 'Barbadori': 0.4375,
+ 'Bischeri': 0.400,
+ 'Castellani': 0.389,
+ 'Ginori': 0.333,
+ 'Guadagni': 0.467,
+ 'Lamberteschi': 0.326,
+ 'Medici': 0.560,
+ 'Pazzi': 0.286,
+ 'Peruzzi': 0.368,
+ 'Ridolfi': 0.500,
+ 'Salviati': 0.389,
+ 'Strozzi': 0.4375,
+ 'Tornabuoni': 0.483}
+ for n in sorted(self.F):
+ assert_almost_equal(c[n],d[n],places=3)
+
+ def test_weighted_closeness(self):
+ XG=nx.Graph()
+ XG.add_weighted_edges_from([('s','u',10), ('s','x',5), ('u','v',1),
+ ('u','x',2), ('v','y',1), ('x','u',3),
+ ('x','v',5), ('x','y',2), ('y','s',7),
+ ('y','v',6)])
+ c=nx.closeness_centrality(XG,distance='weight')
+ d={'y': 0.200,
+ 'x': 0.286,
+ 's': 0.138,
+ 'u': 0.235,
+ 'v': 0.200}
+ for n in sorted(XG):
+ assert_almost_equal(c[n],d[n],places=3)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_communicability.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_communicability.py
new file mode 100644
index 0000000..d03be65
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_communicability.py
@@ -0,0 +1,134 @@
+from collections import defaultdict
+from nose.tools import *
+from nose import SkipTest
+import networkx as nx
+from networkx.algorithms.centrality.communicability_alg import *
+
+class TestCommunicability:
+ @classmethod
+ def setupClass(cls):
+ global numpy
+ global scipy
+ try:
+ import numpy
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+ try:
+ import scipy
+ except ImportError:
+ raise SkipTest('SciPy not available.')
+
+
+ def test_communicability_centrality(self):
+ answer={0: 1.5430806348152433, 1: 1.5430806348152433}
+ result=communicability_centrality(nx.path_graph(2))
+ for k,v in result.items():
+ assert_almost_equal(answer[k],result[k],places=7)
+
+ answer1={'1': 1.6445956054135658,
+ 'Albert': 2.4368257358712189,
+ 'Aric': 2.4368257358712193,
+ 'Dan':3.1306328496328168,
+ 'Franck': 2.3876142275231915}
+ G1=nx.Graph([('Franck','Aric'),('Aric','Dan'),('Dan','Albert'),
+ ('Albert','Franck'),('Dan','1'),('Franck','Albert')])
+ result1=communicability_centrality(G1)
+ for k,v in result1.items():
+ assert_almost_equal(answer1[k],result1[k],places=7)
+ result1=communicability_centrality_exp(G1)
+ for k,v in result1.items():
+ assert_almost_equal(answer1[k],result1[k],places=7)
+
+ def test_communicability_betweenness_centrality(self):
+ answer={0: 0.07017447951484615, 1: 0.71565598701107991,
+ 2: 0.71565598701107991, 3: 0.07017447951484615}
+ result=communicability_betweenness_centrality(nx.path_graph(4))
+ for k,v in result.items():
+ assert_almost_equal(answer[k],result[k],places=7)
+
+ answer1={'1': 0.060039074193949521,
+ 'Albert': 0.315470761661372,
+ 'Aric': 0.31547076166137211,
+ 'Dan': 0.68297778678316201,
+ 'Franck': 0.21977926617449497}
+ G1=nx.Graph([('Franck','Aric'),
+ ('Aric','Dan'),('Dan','Albert'),('Albert','Franck'),
+ ('Dan','1'),('Franck','Albert')])
+ result1=communicability_betweenness_centrality(G1)
+ for k,v in result1.items():
+ assert_almost_equal(answer1[k],result1[k],places=7)
+
+ def test_communicability_betweenness_centrality_small(self):
+ G = nx.Graph([(1,2)])
+ result=communicability_betweenness_centrality(G)
+ assert_equal(result, {1:0,2:0})
+
+
+ def test_communicability(self):
+ answer={0 :{0: 1.5430806348152435,
+ 1: 1.1752011936438012
+ },
+ 1 :{0: 1.1752011936438012,
+ 1: 1.5430806348152435
+ }
+ }
+# answer={(0, 0): 1.5430806348152435,
+# (0, 1): 1.1752011936438012,
+# (1, 0): 1.1752011936438012,
+# (1, 1): 1.5430806348152435}
+
+ result=communicability(nx.path_graph(2))
+ for k1,val in result.items():
+ for k2 in val:
+ assert_almost_equal(answer[k1][k2],result[k1][k2],places=7)
+
+ def test_communicability2(self):
+
+ answer_orig ={('1', '1'): 1.6445956054135658,
+ ('1', 'Albert'): 0.7430186221096251,
+ ('1', 'Aric'): 0.7430186221096251,
+ ('1', 'Dan'): 1.6208126320442937,
+ ('1', 'Franck'): 0.42639707170035257,
+ ('Albert', '1'): 0.7430186221096251,
+ ('Albert', 'Albert'): 2.4368257358712189,
+ ('Albert', 'Aric'): 1.4368257358712191,
+ ('Albert', 'Dan'): 2.0472097037446453,
+ ('Albert', 'Franck'): 1.8340111678944691,
+ ('Aric', '1'): 0.7430186221096251,
+ ('Aric', 'Albert'): 1.4368257358712191,
+ ('Aric', 'Aric'): 2.4368257358712193,
+ ('Aric', 'Dan'): 2.0472097037446457,
+ ('Aric', 'Franck'): 1.8340111678944691,
+ ('Dan', '1'): 1.6208126320442937,
+ ('Dan', 'Albert'): 2.0472097037446453,
+ ('Dan', 'Aric'): 2.0472097037446457,
+ ('Dan', 'Dan'): 3.1306328496328168,
+ ('Dan', 'Franck'): 1.4860372442192515,
+ ('Franck', '1'): 0.42639707170035257,
+ ('Franck', 'Albert'): 1.8340111678944691,
+ ('Franck', 'Aric'): 1.8340111678944691,
+ ('Franck', 'Dan'): 1.4860372442192515,
+ ('Franck', 'Franck'): 2.3876142275231915}
+
+ answer=defaultdict(dict)
+ for (k1,k2),v in answer_orig.items():
+ answer[k1][k2]=v
+
+ G1=nx.Graph([('Franck','Aric'),('Aric','Dan'),('Dan','Albert'),
+ ('Albert','Franck'),('Dan','1'),('Franck','Albert')])
+
+ result=communicability(G1)
+ for k1,val in result.items():
+ for k2 in val:
+ assert_almost_equal(answer[k1][k2],result[k1][k2],places=7)
+
+ result=communicability_exp(G1)
+ for k1,val in result.items():
+ for k2 in val:
+ assert_almost_equal(answer[k1][k2],result[k1][k2],places=7)
+
+
+ def test_estrada_index(self):
+ answer=1041.2470334195475
+ result=estrada_index(nx.karate_club_graph())
+ assert_almost_equal(answer,result,places=7)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py
new file mode 100644
index 0000000..46e8a2d
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py
@@ -0,0 +1,211 @@
+#!/usr/bin/env python
+from nose.tools import *
+from nose import SkipTest
+import networkx
+from nose.plugins.attrib import attr
+
+from networkx import edge_current_flow_betweenness_centrality \
+ as edge_current_flow
+
+from networkx import approximate_current_flow_betweenness_centrality \
+ as approximate_cfbc
+
+class TestFlowBetweennessCentrality(object):
+ numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
+ @classmethod
+ def setupClass(cls):
+ global np
+ try:
+ import numpy as np
+ import scipy
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+ def test_K4_normalized(self):
+ """Betweenness centrality: K4"""
+ G=networkx.complete_graph(4)
+ b=networkx.current_flow_betweenness_centrality(G,normalized=True)
+ b_answer={0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25}
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+ G.add_edge(0,1,{'weight':0.5,'other':0.3})
+ b=networkx.current_flow_betweenness_centrality(G,normalized=True,weight=None)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+ wb_answer={0: 0.2222222, 1: 0.2222222, 2: 0.30555555, 3: 0.30555555}
+ b=networkx.current_flow_betweenness_centrality(G,normalized=True)
+ for n in sorted(G):
+ assert_almost_equal(b[n],wb_answer[n])
+ wb_answer={0: 0.2051282, 1: 0.2051282, 2: 0.33974358, 3: 0.33974358}
+ b=networkx.current_flow_betweenness_centrality(G,normalized=True,weight='other')
+ for n in sorted(G):
+ assert_almost_equal(b[n],wb_answer[n])
+
+ def test_K4(self):
+ """Betweenness centrality: K4"""
+ G=networkx.complete_graph(4)
+ for solver in ['full','lu','cg']:
+ b=networkx.current_flow_betweenness_centrality(G, normalized=False,
+ solver=solver)
+ b_answer={0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75}
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+ def test_P4_normalized(self):
+ """Betweenness centrality: P4 normalized"""
+ G=networkx.path_graph(4)
+ b=networkx.current_flow_betweenness_centrality(G,normalized=True)
+ b_answer={0: 0, 1: 2./3, 2: 2./3, 3:0}
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+ def test_P4(self):
+ """Betweenness centrality: P4"""
+ G=networkx.path_graph(4)
+ b=networkx.current_flow_betweenness_centrality(G,normalized=False)
+ b_answer={0: 0, 1: 2, 2: 2, 3: 0}
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_star(self):
+ """Betweenness centrality: star """
+ G=networkx.Graph()
+ G.add_star(['a','b','c','d'])
+ b=networkx.current_flow_betweenness_centrality(G,normalized=True)
+ b_answer={'a': 1.0, 'b': 0.0, 'c': 0.0, 'd':0.0}
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+
+ def test_solers(self):
+ """Betweenness centrality: alternate solvers"""
+ G=networkx.complete_graph(4)
+ for solver in ['full','lu','cg']:
+ b=networkx.current_flow_betweenness_centrality(G,normalized=False,
+ solver=solver)
+ b_answer={0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75}
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+
+class TestApproximateFlowBetweennessCentrality(object):
+ numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
+ @classmethod
+ def setupClass(cls):
+ global np
+ global assert_allclose
+ try:
+ import numpy as np
+ import scipy
+ from numpy.testing import assert_allclose
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+ def test_K4_normalized(self):
+ "Approximate current-flow betweenness centrality: K4 normalized"
+ G=networkx.complete_graph(4)
+ b=networkx.current_flow_betweenness_centrality(G,normalized=True)
+ epsilon=0.1
+ ba = approximate_cfbc(G,normalized=True, epsilon=0.5*epsilon)
+ for n in sorted(G):
+ assert_allclose(b[n],ba[n],atol=epsilon)
+
+ def test_K4(self):
+ "Approximate current-flow betweenness centrality: K4"
+ G=networkx.complete_graph(4)
+ b=networkx.current_flow_betweenness_centrality(G,normalized=False)
+ epsilon=0.1
+ ba = approximate_cfbc(G,normalized=False, epsilon=0.5*epsilon)
+ for n in sorted(G):
+ assert_allclose(b[n],ba[n],atol=epsilon*len(G)**2)
+
+ def test_star(self):
+ "Approximate current-flow betweenness centrality: star"
+ G=networkx.Graph()
+ G.add_star(['a','b','c','d'])
+ b=networkx.current_flow_betweenness_centrality(G,normalized=True)
+ epsilon=0.1
+ ba = approximate_cfbc(G,normalized=True, epsilon=0.5*epsilon)
+ for n in sorted(G):
+ assert_allclose(b[n],ba[n],atol=epsilon)
+
+ def test_grid(self):
+ "Approximate current-flow betweenness centrality: 2d grid"
+ G=networkx.grid_2d_graph(4,4)
+ b=networkx.current_flow_betweenness_centrality(G,normalized=True)
+ epsilon=0.1
+ ba = approximate_cfbc(G,normalized=True, epsilon=0.5*epsilon)
+ for n in sorted(G):
+ assert_allclose(b[n],ba[n],atol=epsilon)
+
+ def test_solvers(self):
+ "Approximate current-flow betweenness centrality: solvers"
+ G=networkx.complete_graph(4)
+ epsilon=0.1
+ for solver in ['full','lu','cg']:
+ b=approximate_cfbc(G,normalized=False,solver=solver,
+ epsilon=0.5*epsilon)
+ b_answer={0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75}
+ for n in sorted(G):
+ assert_allclose(b[n],b_answer[n],atol=epsilon)
+
+
+
+
+
+class TestWeightedFlowBetweennessCentrality(object):
+ pass
+
+
+class TestEdgeFlowBetweennessCentrality(object):
+ numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
+ @classmethod
+ def setupClass(cls):
+ global np
+ try:
+ import numpy as np
+ import scipy
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+ def test_K4(self):
+ """Edge flow betweenness centrality: K4"""
+ G=networkx.complete_graph(4)
+ b=edge_current_flow(G,normalized=True)
+ b_answer=dict.fromkeys(G.edges(),0.25)
+ for (s,t),v1 in b_answer.items():
+ v2=b.get((s,t),b.get((t,s)))
+ assert_almost_equal(v1,v2)
+
+ def test_K4_normalized(self):
+ """Edge flow betweenness centrality: K4"""
+ G=networkx.complete_graph(4)
+ b=edge_current_flow(G,normalized=False)
+ b_answer=dict.fromkeys(G.edges(),0.75)
+ for (s,t),v1 in b_answer.items():
+ v2=b.get((s,t),b.get((t,s)))
+ assert_almost_equal(v1,v2)
+
+ def test_C4(self):
+ """Edge flow betweenness centrality: C4"""
+ G=networkx.cycle_graph(4)
+ b=edge_current_flow(G,normalized=False)
+ b_answer={(0, 1):1.25,(0, 3):1.25, (1, 2):1.25, (2, 3): 1.25}
+ for (s,t),v1 in b_answer.items():
+ v2=b.get((s,t),b.get((t,s)))
+ assert_almost_equal(v1,v2)
+
+
+ def test_P4(self):
+ """Edge betweenness centrality: P4"""
+ G=networkx.path_graph(4)
+ b=edge_current_flow(G,normalized=False)
+ b_answer={(0, 1):1.5,(1, 2):2.0, (2, 3):1.5}
+ for (s,t),v1 in b_answer.items():
+ v2=b.get((s,t),b.get((t,s)))
+ assert_almost_equal(v1,v2)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py
new file mode 100644
index 0000000..4c7acd6
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py
@@ -0,0 +1,181 @@
+#!/usr/bin/env python
+from nose.tools import *
+from nose import SkipTest
+import networkx
+from nose.plugins.attrib import attr
+
+from networkx import edge_current_flow_betweenness_centrality \
+ as edge_current_flow
+
+from networkx import edge_current_flow_betweenness_centrality_subset \
+ as edge_current_flow_subset
+
+class TestFlowBetweennessCentrality(object):
+ numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
+ @classmethod
+ def setupClass(cls):
+ global np
+ try:
+ import numpy as np
+ import scipy
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+
+ def test_K4_normalized(self):
+ """Betweenness centrality: K4"""
+ G=networkx.complete_graph(4)
+ b=networkx.current_flow_betweenness_centrality_subset(G,
+ G.nodes(),
+ G.nodes(),
+ normalized=True)
+ b_answer=networkx.current_flow_betweenness_centrality(G,normalized=True)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+ def test_K4(self):
+ """Betweenness centrality: K4"""
+ G=networkx.complete_graph(4)
+ b=networkx.current_flow_betweenness_centrality_subset(G,
+ G.nodes(),
+ G.nodes(),
+ normalized=True)
+ b_answer=networkx.current_flow_betweenness_centrality(G,normalized=True)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+ # test weighted network
+ G.add_edge(0,1,{'weight':0.5,'other':0.3})
+ b=networkx.current_flow_betweenness_centrality_subset(G,
+ G.nodes(),
+ G.nodes(),
+ normalized=True,
+ weight=None)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+ b=networkx.current_flow_betweenness_centrality_subset(G,
+ G.nodes(),
+ G.nodes(),
+ normalized=True)
+ b_answer=networkx.current_flow_betweenness_centrality(G,normalized=True)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+ b=networkx.current_flow_betweenness_centrality_subset(G,
+ G.nodes(),
+ G.nodes(),
+ normalized=True,
+ weight='other')
+ b_answer=networkx.current_flow_betweenness_centrality(G,normalized=True,weight='other')
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+ def test_P4_normalized(self):
+ """Betweenness centrality: P4 normalized"""
+ G=networkx.path_graph(4)
+ b=networkx.current_flow_betweenness_centrality_subset(G,
+ G.nodes(),
+ G.nodes(),
+ normalized=True)
+ b_answer=networkx.current_flow_betweenness_centrality(G,normalized=True)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+ def test_P4(self):
+ """Betweenness centrality: P4"""
+ G=networkx.path_graph(4)
+ b=networkx.current_flow_betweenness_centrality_subset(G,
+ G.nodes(),
+ G.nodes(),
+ normalized=True)
+ b_answer=networkx.current_flow_betweenness_centrality(G,normalized=True)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_star(self):
+ """Betweenness centrality: star """
+ G=networkx.Graph()
+ G.add_star(['a','b','c','d'])
+ b=networkx.current_flow_betweenness_centrality_subset(G,
+ G.nodes(),
+ G.nodes(),
+ normalized=True)
+ b_answer=networkx.current_flow_betweenness_centrality(G,normalized=True)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+
+# class TestWeightedFlowBetweennessCentrality():
+# pass
+
+
+class TestEdgeFlowBetweennessCentrality(object):
+ numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
+ @classmethod
+ def setupClass(cls):
+ global np
+ try:
+ import numpy as np
+ import scipy
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+ def test_K4_normalized(self):
+ """Betweenness centrality: K4"""
+ G=networkx.complete_graph(4)
+ b=edge_current_flow_subset(G,G.nodes(),G.nodes(),normalized=True)
+ b_answer=edge_current_flow(G,normalized=True)
+ for (s,t),v1 in b_answer.items():
+ v2=b.get((s,t),b.get((t,s)))
+ assert_almost_equal(v1,v2)
+
+ def test_K4(self):
+ """Betweenness centrality: K4"""
+ G=networkx.complete_graph(4)
+ b=edge_current_flow_subset(G,G.nodes(),G.nodes(),normalized=False)
+ b_answer=edge_current_flow(G,normalized=False)
+ for (s,t),v1 in b_answer.items():
+ v2=b.get((s,t),b.get((t,s)))
+ assert_almost_equal(v1,v2)
+ # test weighted network
+ G.add_edge(0,1,{'weight':0.5,'other':0.3})
+ b=edge_current_flow_subset(G,G.nodes(),G.nodes(),normalized=False,weight=None)
+ # weight is None => same as unweighted network
+ for (s,t),v1 in b_answer.items():
+ v2=b.get((s,t),b.get((t,s)))
+ assert_almost_equal(v1,v2)
+
+ b=edge_current_flow_subset(G,G.nodes(),G.nodes(),normalized=False)
+ b_answer=edge_current_flow(G,normalized=False)
+ for (s,t),v1 in b_answer.items():
+ v2=b.get((s,t),b.get((t,s)))
+ assert_almost_equal(v1,v2)
+
+ b=edge_current_flow_subset(G,G.nodes(),G.nodes(),normalized=False,weight='other')
+ b_answer=edge_current_flow(G,normalized=False,weight='other')
+ for (s,t),v1 in b_answer.items():
+ v2=b.get((s,t),b.get((t,s)))
+ assert_almost_equal(v1,v2)
+
+
+ def test_C4(self):
+ """Edge betweenness centrality: C4"""
+ G=networkx.cycle_graph(4)
+ b=edge_current_flow_subset(G,G.nodes(),G.nodes(),normalized=True)
+ b_answer=edge_current_flow(G,normalized=True)
+ for (s,t),v1 in b_answer.items():
+ v2=b.get((s,t),b.get((t,s)))
+ assert_almost_equal(v1,v2)
+
+
+ def test_P4(self):
+ """Edge betweenness centrality: P4"""
+ G=networkx.path_graph(4)
+ b=edge_current_flow_subset(G,G.nodes(),G.nodes(),normalized=True)
+ b_answer=edge_current_flow(G,normalized=True)
+ for (s,t),v1 in b_answer.items():
+ v2=b.get((s,t),b.get((t,s)))
+ assert_almost_equal(v1,v2)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_current_flow_closeness.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_current_flow_closeness.py
new file mode 100644
index 0000000..28598d4
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_current_flow_closeness.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+from nose.tools import *
+from nose import SkipTest
+import networkx
+
+class TestFlowClosenessCentrality(object):
+ numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
+ @classmethod
+ def setupClass(cls):
+ global np
+ try:
+ import numpy as np
+ import scipy
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+
+ def test_K4(self):
+ """Closeness centrality: K4"""
+ G=networkx.complete_graph(4)
+ b=networkx.current_flow_closeness_centrality(G,normalized=True)
+ b_answer={0: 2.0, 1: 2.0, 2: 2.0, 3: 2.0}
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+ def test_P4_normalized(self):
+ """Closeness centrality: P4 normalized"""
+ G=networkx.path_graph(4)
+ b=networkx.current_flow_closeness_centrality(G,normalized=True)
+ b_answer={0: 1./2, 1: 3./4, 2: 3./4, 3:1./2}
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+ def test_P4(self):
+ """Closeness centrality: P4"""
+ G=networkx.path_graph(4)
+ b=networkx.current_flow_closeness_centrality(G,normalized=False)
+ b_answer={0: 1.0/6, 1: 1.0/4, 2: 1.0/4, 3:1.0/6}
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+ def test_star(self):
+ """Closeness centrality: star """
+ G=networkx.Graph()
+ G.add_star(['a','b','c','d'])
+ b=networkx.current_flow_closeness_centrality(G,normalized=True)
+ b_answer={'a': 1.0, 'b': 0.6, 'c': 0.6, 'd':0.6}
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+
+class TestWeightedFlowClosenessCentrality(object):
+ pass
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_degree_centrality.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_degree_centrality.py
new file mode 100644
index 0000000..9109ddc
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_degree_centrality.py
@@ -0,0 +1,92 @@
+"""
+ Unit tests for degree centrality.
+"""
+
+from nose.tools import *
+
+import networkx as nx
+
+
+class TestDegreeCentrality:
+ def __init__(self):
+
+ self.K = nx.krackhardt_kite_graph()
+ self.P3 = nx.path_graph(3)
+ self.K5 = nx.complete_graph(5)
+
+ F = nx.Graph() # Florentine families
+ F.add_edge('Acciaiuoli','Medici')
+ F.add_edge('Castellani','Peruzzi')
+ F.add_edge('Castellani','Strozzi')
+ F.add_edge('Castellani','Barbadori')
+ F.add_edge('Medici','Barbadori')
+ F.add_edge('Medici','Ridolfi')
+ F.add_edge('Medici','Tornabuoni')
+ F.add_edge('Medici','Albizzi')
+ F.add_edge('Medici','Salviati')
+ F.add_edge('Salviati','Pazzi')
+ F.add_edge('Peruzzi','Strozzi')
+ F.add_edge('Peruzzi','Bischeri')
+ F.add_edge('Strozzi','Ridolfi')
+ F.add_edge('Strozzi','Bischeri')
+ F.add_edge('Ridolfi','Tornabuoni')
+ F.add_edge('Tornabuoni','Guadagni')
+ F.add_edge('Albizzi','Ginori')
+ F.add_edge('Albizzi','Guadagni')
+ F.add_edge('Bischeri','Guadagni')
+ F.add_edge('Guadagni','Lamberteschi')
+ self.F = F
+
+ G = nx.DiGraph()
+ G.add_edge(0,5)
+ G.add_edge(1,5)
+ G.add_edge(2,5)
+ G.add_edge(3,5)
+ G.add_edge(4,5)
+ G.add_edge(5,6)
+ G.add_edge(5,7)
+ G.add_edge(5,8)
+ self.G = G
+
+ def test_degree_centrality_1(self):
+ d = nx.degree_centrality(self.K5)
+ exact = dict(zip(range(5), [1]*5))
+ for n,dc in d.items():
+ assert_almost_equal(exact[n], dc)
+
+ def test_degree_centrality_2(self):
+ d = nx.degree_centrality(self.P3)
+ exact = {0:0.5, 1:1, 2:0.5}
+ for n,dc in d.items():
+ assert_almost_equal(exact[n], dc)
+
+ def test_degree_centrality_3(self):
+ d = nx.degree_centrality(self.K)
+ exact = {0:.444, 1:.444, 2:.333, 3:.667, 4:.333,
+ 5:.556, 6:.556, 7:.333, 8:.222, 9:.111}
+ for n,dc in d.items():
+ assert_almost_equal(exact[n], float("%5.3f" % dc))
+
+ def test_degree_centrality_4(self):
+ d = nx.degree_centrality(self.F)
+ names = sorted(self.F.nodes())
+ dcs = [0.071, 0.214, 0.143, 0.214, 0.214, 0.071, 0.286,
+ 0.071, 0.429, 0.071, 0.214, 0.214, 0.143, 0.286, 0.214]
+ exact = dict(zip(names, dcs))
+ for n,dc in d.items():
+ assert_almost_equal(exact[n], float("%5.3f" % dc))
+
+ def test_indegree_centrality(self):
+ d = nx.in_degree_centrality(self.G)
+ exact = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0,
+ 5: 0.625, 6: 0.125, 7: 0.125, 8: 0.125}
+ for n,dc in d.items():
+ assert_almost_equal(exact[n], dc)
+
+ def test_outdegree_centrality(self):
+ d = nx.out_degree_centrality(self.G)
+ exact = {0: 0.125, 1: 0.125, 2: 0.125, 3: 0.125,
+ 4: 0.125, 5: 0.375, 6: 0.0, 7: 0.0, 8: 0.0}
+ for n,dc in d.items():
+ assert_almost_equal(exact[n], dc)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py
new file mode 100644
index 0000000..22b859c
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+import math
+from nose import SkipTest
+from nose.tools import *
+import networkx
+
+class TestEigenvectorCentrality(object):
+ numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
+ @classmethod
+ def setupClass(cls):
+ global np
+ try:
+ import numpy as np
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+ def test_K5(self):
+ """Eigenvector centrality: K5"""
+ G=networkx.complete_graph(5)
+ b=networkx.eigenvector_centrality(G)
+ v=math.sqrt(1/5.0)
+ b_answer=dict.fromkeys(G,v)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+ nstart = dict([(n,1) for n in G])
+ b=networkx.eigenvector_centrality(G,nstart=nstart)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n])
+
+
+ b=networkx.eigenvector_centrality_numpy(G)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n],places=3)
+
+
+ def test_P3(self):
+ """Eigenvector centrality: P3"""
+ G=networkx.path_graph(3)
+ b_answer={0: 0.5, 1: 0.7071, 2: 0.5}
+ b=networkx.eigenvector_centrality_numpy(G)
+ for n in sorted(G):
+ assert_almost_equal(b[n],b_answer[n],places=4)
+
+
+ @raises(networkx.NetworkXError)
+ def test_maxiter(self):
+ G=networkx.path_graph(3)
+ b=networkx.eigenvector_centrality(G,max_iter=0)
+
+class TestEigenvectorCentralityDirected(object):
+ numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
+ @classmethod
+ def setupClass(cls):
+ global np
+ try:
+ import numpy as np
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+ def setUp(self):
+
+ G=networkx.DiGraph()
+
+ edges=[(1,2),(1,3),(2,4),(3,2),(3,5),(4,2),(4,5),(4,6),\
+ (5,6),(5,7),(5,8),(6,8),(7,1),(7,5),\
+ (7,8),(8,6),(8,7)]
+
+ G.add_edges_from(edges,weight=2.0)
+ self.G=G
+ self.G.evc=[0.25368793, 0.19576478, 0.32817092, 0.40430835,
+ 0.48199885, 0.15724483, 0.51346196, 0.32475403]
+
+ H=networkx.DiGraph()
+
+ edges=[(1,2),(1,3),(2,4),(3,2),(3,5),(4,2),(4,5),(4,6),\
+ (5,6),(5,7),(5,8),(6,8),(7,1),(7,5),\
+ (7,8),(8,6),(8,7)]
+
+ G.add_edges_from(edges)
+ self.H=G
+ self.H.evc=[0.25368793, 0.19576478, 0.32817092, 0.40430835,
+ 0.48199885, 0.15724483, 0.51346196, 0.32475403]
+
+
+ def test_eigenvector_centrality_weighted(self):
+ G=self.G
+ p=networkx.eigenvector_centrality_numpy(G)
+ for (a,b) in zip(list(p.values()),self.G.evc):
+ assert_almost_equal(a,b)
+
+ def test_eigenvector_centrality_unweighted(self):
+ G=self.H
+ p=networkx.eigenvector_centrality_numpy(G)
+ for (a,b) in zip(list(p.values()),self.G.evc):
+ assert_almost_equal(a,b)
+
+
+class TestEigenvectorCentralityExceptions(object):
+ numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
+ @classmethod
+ def setupClass(cls):
+ global np
+ try:
+ import numpy as np
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+ numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
+ @raises(networkx.NetworkXException)
+ def test_multigraph(self):
+ e = networkx.eigenvector_centrality(networkx.MultiGraph())
+
+ @raises(networkx.NetworkXException)
+ def test_multigraph_numpy(self):
+ e = networkx.eigenvector_centrality_numpy(networkx.MultiGraph())
+
+
+ @raises(networkx.NetworkXException)
+ def test_empty(self):
+ e = networkx.eigenvector_centrality(networkx.Graph())
+
+ @raises(networkx.NetworkXException)
+ def test_empty_numpy(self):
+ e = networkx.eigenvector_centrality_numpy(networkx.Graph())
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_katz_centrality.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_katz_centrality.py
new file mode 100644
index 0000000..9e8e6d2
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_katz_centrality.py
@@ -0,0 +1,289 @@
+# -*- coding: utf-8 -*-
+import math
+from nose import SkipTest
+from nose.tools import *
+import networkx
+
+class TestKatzCentrality(object):
+
+ def test_K5(self):
+ """Katz centrality: K5"""
+ G = networkx.complete_graph(5)
+ alpha = 0.1
+ b = networkx.katz_centrality(G, alpha)
+ v = math.sqrt(1 / 5.0)
+ b_answer = dict.fromkeys(G, v)
+ for n in sorted(G):
+ assert_almost_equal(b[n], b_answer[n])
+ nstart = dict([(n, 1) for n in G])
+ b = networkx.katz_centrality(G, alpha, nstart=nstart)
+ for n in sorted(G):
+ assert_almost_equal(b[n], b_answer[n])
+
+ def test_P3(self):
+ """Katz centrality: P3"""
+ alpha = 0.1
+ G = networkx.path_graph(3)
+ b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449,
+ 2: 0.5598852584152162}
+ b = networkx.katz_centrality(G, alpha)
+ for n in sorted(G):
+ assert_almost_equal(b[n], b_answer[n], places=4)
+
+ @raises(networkx.NetworkXError)
+ def test_maxiter(self):
+ alpha = 0.1
+ G = networkx.path_graph(3)
+ b = networkx.katz_centrality(G, alpha, max_iter=0)
+
+ def test_beta_as_scalar(self):
+ alpha = 0.1
+ beta = 0.1
+ b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449,
+ 2: 0.5598852584152162}
+ G = networkx.path_graph(3)
+ b = networkx.katz_centrality(G, alpha, beta)
+ for n in sorted(G):
+ assert_almost_equal(b[n], b_answer[n], places=4)
+
+ def test_beta_as_dict(self):
+ alpha = 0.1
+ beta = {0: 1.0, 1: 1.0, 2: 1.0}
+ b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449,
+ 2: 0.5598852584152162}
+ G = networkx.path_graph(3)
+ b = networkx.katz_centrality(G, alpha, beta)
+ for n in sorted(G):
+ assert_almost_equal(b[n], b_answer[n], places=4)
+
+
+ def test_multiple_alpha(self):
+ alpha_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
+ for alpha in alpha_list:
+ b_answer = {0.1: {0: 0.5598852584152165, 1: 0.6107839182711449,
+ 2: 0.5598852584152162},
+ 0.2: {0: 0.5454545454545454, 1: 0.6363636363636365,
+ 2: 0.5454545454545454},
+ 0.3: {0: 0.5333964609104419, 1: 0.6564879518897746,
+ 2: 0.5333964609104419},
+ 0.4: {0: 0.5232045649263551, 1: 0.6726915834767423,
+ 2: 0.5232045649263551},
+ 0.5: {0: 0.5144957746691622, 1: 0.6859943117075809,
+ 2: 0.5144957746691622},
+ 0.6: {0: 0.5069794004195823, 1: 0.6970966755769258,
+ 2: 0.5069794004195823}}
+ G = networkx.path_graph(3)
+ b = networkx.katz_centrality(G, alpha)
+ for n in sorted(G):
+ assert_almost_equal(b[n], b_answer[alpha][n], places=4)
+
+ @raises(networkx.NetworkXException)
+ def test_multigraph(self):
+ e = networkx.katz_centrality(networkx.MultiGraph(), 0.1)
+
+ def test_empty(self):
+ e = networkx.katz_centrality(networkx.Graph(), 0.1)
+ assert_equal(e, {})
+
+ @raises(networkx.NetworkXException)
+ def test_bad_beta(self):
+ G = networkx.Graph([(0,1)])
+ beta = {0:77}
+ e = networkx.katz_centrality(G, 0.1,beta=beta)
+
+ @raises(networkx.NetworkXException)
+ def test_bad_beta_numbe(self):
+ G = networkx.Graph([(0,1)])
+ e = networkx.katz_centrality(G, 0.1,beta='foo')
+
+
+class TestKatzCentralityNumpy(object):
+ numpy = 1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
+ @classmethod
+ def setupClass(cls):
+ global np
+ try:
+ import numpy as np
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+ def test_K5(self):
+ """Katz centrality: K5"""
+ G = networkx.complete_graph(5)
+ alpha = 0.1
+ b = networkx.katz_centrality(G, alpha)
+ v = math.sqrt(1 / 5.0)
+ b_answer = dict.fromkeys(G, v)
+ for n in sorted(G):
+ assert_almost_equal(b[n], b_answer[n])
+ nstart = dict([(n, 1) for n in G])
+ b = networkx.eigenvector_centrality_numpy(G)
+ for n in sorted(G):
+ assert_almost_equal(b[n], b_answer[n], places=3)
+
+ def test_P3(self):
+ """Katz centrality: P3"""
+ alpha = 0.1
+ G = networkx.path_graph(3)
+ b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449,
+ 2: 0.5598852584152162}
+ b = networkx.katz_centrality_numpy(G, alpha)
+ for n in sorted(G):
+ assert_almost_equal(b[n], b_answer[n], places=4)
+
+
+ def test_beta_as_scalar(self):
+ alpha = 0.1
+ beta = 0.1
+ b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449,
+ 2: 0.5598852584152162}
+ G = networkx.path_graph(3)
+ b = networkx.katz_centrality_numpy(G, alpha, beta)
+ for n in sorted(G):
+ assert_almost_equal(b[n], b_answer[n], places=4)
+
+
+ def test_beta_as_dict(self):
+ alpha = 0.1
+ beta = {0: 1.0, 1: 1.0, 2: 1.0}
+ b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449,
+ 2: 0.5598852584152162}
+ G = networkx.path_graph(3)
+ b = networkx.katz_centrality_numpy(G, alpha, beta)
+ for n in sorted(G):
+ assert_almost_equal(b[n], b_answer[n], places=4)
+
+
+ def test_multiple_alpha(self):
+ alpha_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
+ for alpha in alpha_list:
+ b_answer = {0.1: {0: 0.5598852584152165, 1: 0.6107839182711449,
+ 2: 0.5598852584152162},
+ 0.2: {0: 0.5454545454545454, 1: 0.6363636363636365,
+ 2: 0.5454545454545454},
+ 0.3: {0: 0.5333964609104419, 1: 0.6564879518897746,
+ 2: 0.5333964609104419},
+ 0.4: {0: 0.5232045649263551, 1: 0.6726915834767423,
+ 2: 0.5232045649263551},
+ 0.5: {0: 0.5144957746691622, 1: 0.6859943117075809,
+ 2: 0.5144957746691622},
+ 0.6: {0: 0.5069794004195823, 1: 0.6970966755769258,
+ 2: 0.5069794004195823}}
+ G = networkx.path_graph(3)
+ b = networkx.katz_centrality_numpy(G, alpha)
+ for n in sorted(G):
+ assert_almost_equal(b[n], b_answer[alpha][n], places=4)
+
+ @raises(networkx.NetworkXException)
+ def test_multigraph(self):
+ e = networkx.katz_centrality(networkx.MultiGraph(), 0.1)
+
+ def test_empty(self):
+ e = networkx.katz_centrality(networkx.Graph(), 0.1)
+ assert_equal(e, {})
+
+ @raises(networkx.NetworkXException)
+ def test_bad_beta(self):
+ G = networkx.Graph([(0,1)])
+ beta = {0:77}
+ e = networkx.katz_centrality_numpy(G, 0.1,beta=beta)
+
+ @raises(networkx.NetworkXException)
+ def test_bad_beta_numbe(self):
+ G = networkx.Graph([(0,1)])
+ e = networkx.katz_centrality_numpy(G, 0.1,beta='foo')
+
+
+class TestKatzCentralityDirected(object):
+ def setUp(self):
+ G = networkx.DiGraph()
+ edges = [(1, 2),(1, 3),(2, 4),(3, 2),(3, 5),(4, 2),(4, 5),(4, 6),(5, 6),
+ (5, 7),(5, 8),(6, 8),(7, 1),(7, 5),(7, 8),(8, 6),(8, 7)]
+ G.add_edges_from(edges, weight=2.0)
+ self.G = G
+ self.G.alpha = 0.1
+ self.G.evc = [
+ 0.3289589783189635,
+ 0.2832077296243516,
+ 0.3425906003685471,
+ 0.3970420865198392,
+ 0.41074871061646284,
+ 0.272257430756461,
+ 0.4201989685435462,
+ 0.34229059218038554,
+ ]
+
+ H = networkx.DiGraph(edges)
+ self.H = G
+ self.H.alpha = 0.1
+ self.H.evc = [
+ 0.3289589783189635,
+ 0.2832077296243516,
+ 0.3425906003685471,
+ 0.3970420865198392,
+ 0.41074871061646284,
+ 0.272257430756461,
+ 0.4201989685435462,
+ 0.34229059218038554,
+ ]
+
+ def test_eigenvector_centrality_weighted(self):
+ G = self.G
+ alpha = self.G.alpha
+ p = networkx.katz_centrality(G, alpha)
+ for (a, b) in zip(list(p.values()), self.G.evc):
+ assert_almost_equal(a, b)
+
+ def test_eigenvector_centrality_unweighted(self):
+ G = self.H
+ alpha = self.H.alpha
+ p = networkx.katz_centrality(G, alpha)
+ for (a, b) in zip(list(p.values()), self.G.evc):
+ assert_almost_equal(a, b)
+
+
+class TestKatzCentralityDirectedNumpy(TestKatzCentralityDirected):
+ numpy = 1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
+
+ @classmethod
+ def setupClass(cls):
+ global np
+ try:
+ import numpy as np
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+ def test_eigenvector_centrality_weighted(self):
+ G = self.G
+ alpha = self.G.alpha
+ p = networkx.katz_centrality_numpy(G, alpha)
+ for (a, b) in zip(list(p.values()), self.G.evc):
+ assert_almost_equal(a, b)
+
+ def test_eigenvector_centrality_unweighted(self):
+ G = self.H
+ alpha = self.H.alpha
+ p = networkx.katz_centrality_numpy(G, alpha)
+ for (a, b) in zip(list(p.values()), self.G.evc):
+ assert_almost_equal(a, b)
+
+class TestKatzEigenvectorVKatz(object):
+ numpy = 1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
+
+ @classmethod
+ def setupClass(cls):
+ global np
+ global eigvals
+ try:
+ import numpy as np
+ from numpy.linalg import eigvals
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+ def test_eigenvector_v_katz_random(self):
+ G = networkx.gnp_random_graph(10,0.5)
+ l = float(max(eigvals(networkx.adjacency_matrix(G))))
+ e = networkx.eigenvector_centrality_numpy(G)
+ k = networkx.katz_centrality_numpy(G, 1.0/l)
+ for n in G:
+ assert_almost_equal(e[n], k[n])
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_load_centrality.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_load_centrality.py
new file mode 100644
index 0000000..8939d26
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/centrality/tests/test_load_centrality.py
@@ -0,0 +1,273 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+
+class TestLoadCentrality:
+
+ def setUp(self):
+
+ G=nx.Graph();
+ G.add_edge(0,1,weight=3)
+ G.add_edge(0,2,weight=2)
+ G.add_edge(0,3,weight=6)
+ G.add_edge(0,4,weight=4)
+ G.add_edge(1,3,weight=5)
+ G.add_edge(1,5,weight=5)
+ G.add_edge(2,4,weight=1)
+ G.add_edge(3,4,weight=2)
+ G.add_edge(3,5,weight=1)
+ G.add_edge(4,5,weight=4)
+ self.G=G
+ self.exact_weighted={0: 4.0, 1: 0.0, 2: 8.0, 3: 6.0, 4: 8.0, 5: 0.0}
+ self.K = nx.krackhardt_kite_graph()
+ self.P3 = nx.path_graph(3)
+ self.P4 = nx.path_graph(4)
+ self.K5 = nx.complete_graph(5)
+
+ self.C4=nx.cycle_graph(4)
+ self.T=nx.balanced_tree(r=2, h=2)
+ self.Gb = nx.Graph()
+ self.Gb.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3),
+ (2, 4), (4, 5), (3, 5)])
+ self.F = nx.florentine_families_graph()
+ self.D = nx.cycle_graph(3, create_using=nx.DiGraph())
+ self.D.add_edges_from([(3, 0), (4, 3)])
+
+ def test_not_strongly_connected(self):
+ b = nx.load_centrality(self.D)
+ result = {0: 5./12,
+ 1: 1./4,
+ 2: 1./12,
+ 3: 1./4,
+ 4: 0.000}
+ for n in sorted(self.D):
+ assert_almost_equal(result[n], b[n], places=3)
+ assert_almost_equal(result[n], nx.load_centrality(self.D, n), places=3)
+
+ def test_weighted_load(self):
+ b=nx.load_centrality(self.G,weight='weight',normalized=False)
+ for n in sorted(self.G):
+ assert_equal(b[n],self.exact_weighted[n])
+
+ def test_k5_load(self):
+ G=self.K5
+ c=nx.load_centrality(G)
+ d={0: 0.000,
+ 1: 0.000,
+ 2: 0.000,
+ 3: 0.000,
+ 4: 0.000}
+ for n in sorted(G):
+ assert_almost_equal(c[n],d[n],places=3)
+
+ def test_p3_load(self):
+ G=self.P3
+ c=nx.load_centrality(G)
+ d={0: 0.000,
+ 1: 1.000,
+ 2: 0.000}
+ for n in sorted(G):
+ assert_almost_equal(c[n],d[n],places=3)
+ c=nx.load_centrality(G,v=1)
+ assert_almost_equal(c,1.0)
+ c=nx.load_centrality(G,v=1,normalized=True)
+ assert_almost_equal(c,1.0)
+
+
+ def test_p2_load(self):
+ G=nx.path_graph(2)
+ c=nx.load_centrality(G)
+ d={0: 0.000,
+ 1: 0.000}
+ for n in sorted(G):
+ assert_almost_equal(c[n],d[n],places=3)
+
+
+ def test_krackhardt_load(self):
+ G=self.K
+ c=nx.load_centrality(G)
+ d={0: 0.023,
+ 1: 0.023,
+ 2: 0.000,
+ 3: 0.102,
+ 4: 0.000,
+ 5: 0.231,
+ 6: 0.231,
+ 7: 0.389,
+ 8: 0.222,
+ 9: 0.000}
+ for n in sorted(G):
+ assert_almost_equal(c[n],d[n],places=3)
+
+ def test_florentine_families_load(self):
+ G=self.F
+ c=nx.load_centrality(G)
+ d={'Acciaiuoli': 0.000,
+ 'Albizzi': 0.211,
+ 'Barbadori': 0.093,
+ 'Bischeri': 0.104,
+ 'Castellani': 0.055,
+ 'Ginori': 0.000,
+ 'Guadagni': 0.251,
+ 'Lamberteschi': 0.000,
+ 'Medici': 0.522,
+ 'Pazzi': 0.000,
+ 'Peruzzi': 0.022,
+ 'Ridolfi': 0.117,
+ 'Salviati': 0.143,
+ 'Strozzi': 0.106,
+ 'Tornabuoni': 0.090}
+ for n in sorted(G):
+ assert_almost_equal(c[n],d[n],places=3)
+
+
+ def test_unnormalized_k5_load(self):
+ G=self.K5
+ c=nx.load_centrality(G,normalized=False)
+ d={0: 0.000,
+ 1: 0.000,
+ 2: 0.000,
+ 3: 0.000,
+ 4: 0.000}
+ for n in sorted(G):
+ assert_almost_equal(c[n],d[n],places=3)
+
+ def test_unnormalized_p3_load(self):
+ G=self.P3
+ c=nx.load_centrality(G,normalized=False)
+ d={0: 0.000,
+ 1: 2.000,
+ 2: 0.000}
+ for n in sorted(G):
+ assert_almost_equal(c[n],d[n],places=3)
+
+
+ def test_unnormalized_krackhardt_load(self):
+ G=self.K
+ c=nx.load_centrality(G,normalized=False)
+ d={0: 1.667,
+ 1: 1.667,
+ 2: 0.000,
+ 3: 7.333,
+ 4: 0.000,
+ 5: 16.667,
+ 6: 16.667,
+ 7: 28.000,
+ 8: 16.000,
+ 9: 0.000}
+
+ for n in sorted(G):
+ assert_almost_equal(c[n],d[n],places=3)
+
+ def test_unnormalized_florentine_families_load(self):
+ G=self.F
+ c=nx.load_centrality(G,normalized=False)
+
+ d={'Acciaiuoli': 0.000,
+ 'Albizzi': 38.333,
+ 'Barbadori': 17.000,
+ 'Bischeri': 19.000,
+ 'Castellani': 10.000,
+ 'Ginori': 0.000,
+ 'Guadagni': 45.667,
+ 'Lamberteschi': 0.000,
+ 'Medici': 95.000,
+ 'Pazzi': 0.000,
+ 'Peruzzi': 4.000,
+ 'Ridolfi': 21.333,
+ 'Salviati': 26.000,
+ 'Strozzi': 19.333,
+ 'Tornabuoni': 16.333}
+ for n in sorted(G):
+ assert_almost_equal(c[n],d[n],places=3)
+
+
+ def test_load_betweenness_difference(self):
+ # Difference Between Load and Betweenness
+ # --------------------------------------- The smallest graph
+ # that shows the difference between load and betweenness is
+ # G=ladder_graph(3) (Graph B below)
+
+ # Graph A and B are from Tao Zhou, Jian-Guo Liu, Bing-Hong
+ # Wang: Comment on ``Scientific collaboration
+ # networks. II. Shortest paths, weighted networks, and
+ # centrality". http://arxiv.org/pdf/physics/0511084
+
+ # Notice that unlike here, their calculation adds to 1 to the
+ # betweennes of every node i for every path from i to every
+ # other node. This is exactly what it should be, based on
+ # Eqn. (1) in their paper: the eqn is B(v) = \sum_{s\neq t,
+ # s\neq v}{\frac{\sigma_{st}(v)}{\sigma_{st}}}, therefore,
+ # they allow v to be the target node.
+
+ # We follow Brandes 2001, who follows Freeman 1977 that make
+ # the sum for betweenness of v exclude paths where v is either
+ # the source or target node. To agree with their numbers, we
+ # must additionally, remove edge (4,8) from the graph, see AC
+ # example following (there is a mistake in the figure in their
+ # paper - personal communication).
+
+ # A = nx.Graph()
+ # A.add_edges_from([(0,1), (1,2), (1,3), (2,4),
+ # (3,5), (4,6), (4,7), (4,8),
+ # (5,8), (6,9), (7,9), (8,9)])
+ B = nx.Graph() # ladder_graph(3)
+ B.add_edges_from([(0,1), (0,2), (1,3), (2,3), (2,4), (4,5), (3,5)])
+ c = nx.load_centrality(B,normalized=False)
+ d={0: 1.750,
+ 1: 1.750,
+ 2: 6.500,
+ 3: 6.500,
+ 4: 1.750,
+ 5: 1.750}
+ for n in sorted(B):
+ assert_almost_equal(c[n],d[n],places=3)
+
+
+ def test_c4_edge_load(self):
+ G=self.C4
+ c = nx.edge_load(G)
+ d={(0, 1): 6.000,
+ (0, 3): 6.000,
+ (1, 2): 6.000,
+ (2, 3): 6.000}
+ for n in G.edges():
+ assert_almost_equal(c[n],d[n],places=3)
+
+ def test_p4_edge_load(self):
+ G=self.P4
+ c = nx.edge_load(G)
+ d={(0, 1): 6.000,
+ (1, 2): 8.000,
+ (2, 3): 6.000}
+ for n in G.edges():
+ assert_almost_equal(c[n],d[n],places=3)
+
+ def test_k5_edge_load(self):
+ G=self.K5
+ c = nx.edge_load(G)
+ d={(0, 1): 5.000,
+ (0, 2): 5.000,
+ (0, 3): 5.000,
+ (0, 4): 5.000,
+ (1, 2): 5.000,
+ (1, 3): 5.000,
+ (1, 4): 5.000,
+ (2, 3): 5.000,
+ (2, 4): 5.000,
+ (3, 4): 5.000}
+ for n in G.edges():
+ assert_almost_equal(c[n],d[n],places=3)
+
+ def test_tree_edge_load(self):
+ G=self.T
+ c = nx.edge_load(G)
+ d={(0, 1): 24.000,
+ (0, 2): 24.000,
+ (1, 3): 12.000,
+ (1, 4): 12.000,
+ (2, 5): 12.000,
+ (2, 6): 12.000}
+ for n in G.edges():
+ assert_almost_equal(c[n],d[n],places=3)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/chordal/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/chordal/__init__.py
new file mode 100644
index 0000000..cf8e951
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/chordal/__init__.py
@@ -0,0 +1,3 @@
+from networkx.algorithms.chordal.chordal_alg import *
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/chordal/chordal_alg.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/chordal/chordal_alg.py
new file mode 100644
index 0000000..8eb6404
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/chordal/chordal_alg.py
@@ -0,0 +1,347 @@
+# -*- coding: utf-8 -*-
+"""
+Algorithms for chordal graphs.
+
+A graph is chordal if every cycle of length at least 4 has a chord
+(an edge joining two nodes not adjacent in the cycle).
+http://en.wikipedia.org/wiki/Chordal_graph
+"""
+import networkx as nx
+import random
+import sys
+
+__authors__ = "\n".join(['Jesus Cerquides <cerquide@iiia.csic.es>'])
+# Copyright (C) 2010 by
+# Jesus Cerquides <cerquide@iiia.csic.es>
+# All rights reserved.
+# BSD license.
+
+__all__ = ['is_chordal',
+ 'find_induced_nodes',
+ 'chordal_graph_cliques',
+ 'chordal_graph_treewidth',
+ 'NetworkXTreewidthBoundExceeded']
+
+class NetworkXTreewidthBoundExceeded(nx.NetworkXException):
+ """Exception raised when a treewidth bound has been provided and it has
+ been exceeded"""
+
+
+def is_chordal(G):
+ """Checks whether G is a chordal graph.
+
+ A graph is chordal if every cycle of length at least 4 has a chord
+ (an edge joining two nodes not adjacent in the cycle).
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph.
+
+ Returns
+ -------
+ chordal : bool
+ True if G is a chordal graph and False otherwise.
+
+ Raises
+ ------
+ NetworkXError
+ The algorithm does not support DiGraph, MultiGraph and MultiDiGraph.
+ If the input graph is an instance of one of these classes, a
+ NetworkXError is raised.
+
+ Examples
+ --------
+ >>> import networkx as nx
+ >>> e=[(1,2),(1,3),(2,3),(2,4),(3,4),(3,5),(3,6),(4,5),(4,6),(5,6)]
+ >>> G=nx.Graph(e)
+ >>> nx.is_chordal(G)
+ True
+
+ Notes
+ -----
+ The routine tries to go through every node following maximum cardinality
+ search. It returns False when it finds that the separator for any node
+ is not a clique. Based on the algorithms in [1]_.
+
+ References
+ ----------
+ .. [1] R. E. Tarjan and M. Yannakakis, Simple linear-time algorithms
+ to test chordality of graphs, test acyclicity of hypergraphs, and
+ selectively reduce acyclic hypergraphs, SIAM J. Comput., 13 (1984),
+ pp. 566–579.
+ """
+ if G.is_directed():
+ raise nx.NetworkXError('Directed graphs not supported')
+ if G.is_multigraph():
+ raise nx.NetworkXError('Multiply connected graphs not supported.')
+ if len(_find_chordality_breaker(G))==0:
+ return True
+ else:
+ return False
+
+def find_induced_nodes(G,s,t,treewidth_bound=sys.maxsize):
+ """Returns the set of induced nodes in the path from s to t.
+
+ Parameters
+ ----------
+ G : graph
+ A chordal NetworkX graph
+ s : node
+ Source node to look for induced nodes
+ t : node
+ Destination node to look for induced nodes
+ treewith_bound: float
+ Maximum treewidth acceptable for the graph H. The search
+ for induced nodes will end as soon as the treewidth_bound is exceeded.
+
+ Returns
+ -------
+ I : Set of nodes
+ The set of induced nodes in the path from s to t in G
+
+ Raises
+ ------
+ NetworkXError
+ The algorithm does not support DiGraph, MultiGraph and MultiDiGraph.
+ If the input graph is an instance of one of these classes, a
+ NetworkXError is raised.
+ The algorithm can only be applied to chordal graphs. If
+ the input graph is found to be non-chordal, a NetworkXError is raised.
+
+ Examples
+ --------
+ >>> import networkx as nx
+ >>> G=nx.Graph()
+ >>> G = nx.generators.classic.path_graph(10)
+ >>> I = nx.find_induced_nodes(G,1,9,2)
+ >>> list(I)
+ [1, 2, 3, 4, 5, 6, 7, 8, 9]
+
+ Notes
+ -----
+ G must be a chordal graph and (s,t) an edge that is not in G.
+
+ If a treewidth_bound is provided, the search for induced nodes will end
+ as soon as the treewidth_bound is exceeded.
+
+ The algorithm is inspired by Algorithm 4 in [1]_.
+ A formal definition of induced node can also be found on that reference.
+
+ References
+ ----------
+ .. [1] Learning Bounded Treewidth Bayesian Networks.
+ Gal Elidan, Stephen Gould; JMLR, 9(Dec):2699--2731, 2008.
+ http://jmlr.csail.mit.edu/papers/volume9/elidan08a/elidan08a.pdf
+ """
+ if not is_chordal(G):
+ raise nx.NetworkXError("Input graph is not chordal.")
+
+ H = nx.Graph(G)
+ H.add_edge(s,t)
+ I = set()
+ triplet = _find_chordality_breaker(H,s,treewidth_bound)
+ while triplet:
+ (u,v,w) = triplet
+ I.update(triplet)
+ for n in triplet:
+ if n!=s:
+ H.add_edge(s,n)
+ triplet = _find_chordality_breaker(H,s,treewidth_bound)
+ if I:
+ # Add t and the second node in the induced path from s to t.
+ I.add(t)
+ for u in G[s]:
+ if len(I & set(G[u]))==2:
+ I.add(u)
+ break
+ return I
+
+def chordal_graph_cliques(G):
+ """Returns the set of maximal cliques of a chordal graph.
+
+ The algorithm breaks the graph in connected components and performs a
+ maximum cardinality search in each component to get the cliques.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ Returns
+ -------
+ cliques : A set containing the maximal cliques in G.
+
+ Raises
+ ------
+ NetworkXError
+ The algorithm does not support DiGraph, MultiGraph and MultiDiGraph.
+ If the input graph is an instance of one of these classes, a
+ NetworkXError is raised.
+ The algorithm can only be applied to chordal graphs. If the
+ input graph is found to be non-chordal, a NetworkXError is raised.
+
+ Examples
+ --------
+ >>> import networkx as nx
+ >>> e= [(1,2),(1,3),(2,3),(2,4),(3,4),(3,5),(3,6),(4,5),(4,6),(5,6),(7,8)]
+ >>> G = nx.Graph(e)
+ >>> G.add_node(9)
+ >>> setlist = nx.chordal_graph_cliques(G)
+ """
+ if not is_chordal(G):
+ raise nx.NetworkXError("Input graph is not chordal.")
+
+ cliques = set()
+ for C in nx.connected.connected_component_subgraphs(G):
+ cliques |= _connected_chordal_graph_cliques(C)
+
+ return cliques
+
+
+def chordal_graph_treewidth(G):
+ """Returns the treewidth of the chordal graph G.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ Returns
+ -------
+ treewidth : int
+ The size of the largest clique in the graph minus one.
+
+ Raises
+ ------
+ NetworkXError
+ The algorithm does not support DiGraph, MultiGraph and MultiDiGraph.
+ If the input graph is an instance of one of these classes, a
+ NetworkXError is raised.
+ The algorithm can only be applied to chordal graphs. If
+ the input graph is found to be non-chordal, a NetworkXError is raised.
+
+ Examples
+ --------
+ >>> import networkx as nx
+ >>> e = [(1,2),(1,3),(2,3),(2,4),(3,4),(3,5),(3,6),(4,5),(4,6),(5,6),(7,8)]
+ >>> G = nx.Graph(e)
+ >>> G.add_node(9)
+ >>> nx.chordal_graph_treewidth(G)
+ 3
+
+ References
+ ----------
+ .. [1] http://en.wikipedia.org/wiki/Tree_decomposition#Treewidth
+ """
+ if not is_chordal(G):
+ raise nx.NetworkXError("Input graph is not chordal.")
+
+ max_clique = -1
+ for clique in nx.chordal_graph_cliques(G):
+ max_clique = max(max_clique,len(clique))
+ return max_clique - 1
+
+def _is_complete_graph(G):
+ """Returns True if G is a complete graph."""
+ if G.number_of_selfloops()>0:
+ raise nx.NetworkXError("Self loop found in _is_complete_graph()")
+ n = G.number_of_nodes()
+ if n < 2:
+ return True
+ e = G.number_of_edges()
+ max_edges = ((n * (n-1))/2)
+ return e == max_edges
+
+
+def _find_missing_edge(G):
+ """ Given a non-complete graph G, returns a missing edge."""
+ nodes=set(G)
+ for u in G:
+ missing=nodes-set(list(G[u].keys())+[u])
+ if missing:
+ return (u,missing.pop())
+
+
+def _max_cardinality_node(G,choices,wanna_connect):
+ """Returns a the node in choices that has more connections in G
+ to nodes in wanna_connect.
+ """
+# max_number = None
+ max_number = -1
+ for x in choices:
+ number=len([y for y in G[x] if y in wanna_connect])
+ if number > max_number:
+ max_number = number
+ max_cardinality_node = x
+ return max_cardinality_node
+
+
+def _find_chordality_breaker(G,s=None,treewidth_bound=sys.maxsize):
+ """ Given a graph G, starts a max cardinality search
+ (starting from s if s is given and from a random node otherwise)
+ trying to find a non-chordal cycle.
+
+ If it does find one, it returns (u,v,w) where u,v,w are the three
+ nodes that together with s are involved in the cycle.
+ """
+
+ unnumbered = set(G)
+ if s is None:
+ s = random.choice(list(unnumbered))
+ unnumbered.remove(s)
+ numbered = set([s])
+# current_treewidth = None
+ current_treewidth = -1
+ while unnumbered:# and current_treewidth <= treewidth_bound:
+ v = _max_cardinality_node(G,unnumbered,numbered)
+ unnumbered.remove(v)
+ numbered.add(v)
+ clique_wanna_be = set(G[v]) & numbered
+ sg = G.subgraph(clique_wanna_be)
+ if _is_complete_graph(sg):
+ # The graph seems to be chordal by now. We update the treewidth
+ current_treewidth = max(current_treewidth,len(clique_wanna_be))
+ if current_treewidth > treewidth_bound:
+ raise nx.NetworkXTreewidthBoundExceeded(\
+ "treewidth_bound exceeded: %s"%current_treewidth)
+ else:
+ # sg is not a clique,
+ # look for an edge that is not included in sg
+ (u,w) = _find_missing_edge(sg)
+ return (u,v,w)
+ return ()
+
+
+
+def _connected_chordal_graph_cliques(G):
+ """Return the set of maximal cliques of a connected chordal graph."""
+ if G.number_of_nodes() == 1:
+ x = frozenset(G.nodes())
+ return set([x])
+ else:
+ cliques = set()
+ unnumbered = set(G.nodes())
+ v = random.choice(list(unnumbered))
+ unnumbered.remove(v)
+ numbered = set([v])
+ clique_wanna_be = set([v])
+ while unnumbered:
+ v = _max_cardinality_node(G,unnumbered,numbered)
+ unnumbered.remove(v)
+ numbered.add(v)
+ new_clique_wanna_be = set(G.neighbors(v)) & numbered
+ sg = G.subgraph(clique_wanna_be)
+ if _is_complete_graph(sg):
+ new_clique_wanna_be.add(v)
+ if not new_clique_wanna_be >= clique_wanna_be:
+ cliques.add(frozenset(clique_wanna_be))
+ clique_wanna_be = new_clique_wanna_be
+ else:
+ raise nx.NetworkXError("Input graph is not chordal.")
+ cliques.add(frozenset(clique_wanna_be))
+ return cliques
+
+
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/chordal/tests/test_chordal.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/chordal/tests/test_chordal.py
new file mode 100644
index 0000000..4ec0b5b
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/chordal/tests/test_chordal.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+class TestMCS:
+
+ def setUp(self):
+ # simple graph
+ connected_chordal_G=nx.Graph()
+ connected_chordal_G.add_edges_from([(1,2),(1,3),(2,3),(2,4),(3,4),
+ (3,5),(3,6),(4,5),(4,6),(5,6)])
+ self.connected_chordal_G=connected_chordal_G
+
+ chordal_G = nx.Graph()
+ chordal_G.add_edges_from([(1,2),(1,3),(2,3),(2,4),(3,4),
+ (3,5),(3,6),(4,5),(4,6),(5,6),(7,8)])
+ chordal_G.add_node(9)
+ self.chordal_G=chordal_G
+
+ non_chordal_G = nx.Graph()
+ non_chordal_G.add_edges_from([(1,2),(1,3),(2,4),(2,5),(3,4),(3,5)])
+ self.non_chordal_G = non_chordal_G
+
+ def test_is_chordal(self):
+ assert_false(nx.is_chordal(self.non_chordal_G))
+ assert_true(nx.is_chordal(self.chordal_G))
+ assert_true(nx.is_chordal(self.connected_chordal_G))
+ assert_true(nx.is_chordal(nx.complete_graph(3)))
+ assert_true(nx.is_chordal(nx.cycle_graph(3)))
+ assert_false(nx.is_chordal(nx.cycle_graph(5)))
+
+ def test_induced_nodes(self):
+ G = nx.generators.classic.path_graph(10)
+ I = nx.find_induced_nodes(G,1,9,2)
+ assert_equal(I,set([1,2,3,4,5,6,7,8,9]))
+ assert_raises(nx.NetworkXTreewidthBoundExceeded,
+ nx.find_induced_nodes,G,1,9,1)
+ I = nx.find_induced_nodes(self.chordal_G,1,6)
+ assert_equal(I,set([1,2,4,6]))
+ assert_raises(nx.NetworkXError,
+ nx.find_induced_nodes,self.non_chordal_G,1,5)
+
+ def test_chordal_find_cliques(self):
+ cliques = set([frozenset([9]),frozenset([7,8]),frozenset([1,2,3]),
+ frozenset([2,3,4]),frozenset([3,4,5,6])])
+ assert_equal(nx.chordal_graph_cliques(self.chordal_G),cliques)
+
+ def test_chordal_find_cliques_path(self):
+ G = nx.path_graph(10)
+ cliqueset = nx.chordal_graph_cliques(G)
+ for (u,v) in G.edges_iter():
+ assert_true(frozenset([u,v]) in cliqueset
+ or frozenset([v,u]) in cliqueset)
+
+ def test_chordal_find_cliquesCC(self):
+ cliques = set([frozenset([1,2,3]),frozenset([2,3,4]),
+ frozenset([3,4,5,6])])
+ assert_equal(nx.chordal_graph_cliques(self.connected_chordal_G),cliques)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/clique.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/clique.py
new file mode 100644
index 0000000..08d9ade
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/clique.py
@@ -0,0 +1,516 @@
+"""
+=======
+Cliques
+=======
+
+Find and manipulate cliques of graphs.
+
+Note that finding the largest clique of a graph has been
+shown to be an NP-complete problem; the algorithms here
+could take a long time to run.
+
+http://en.wikipedia.org/wiki/Clique_problem
+"""
+# Copyright (C) 2004-2008 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx
+from networkx.utils.decorators import *
+__author__ = """Dan Schult (dschult@colgate.edu)"""
+__all__ = ['find_cliques', 'find_cliques_recursive', 'make_max_clique_graph',
+ 'make_clique_bipartite' ,'graph_clique_number',
+ 'graph_number_of_cliques', 'node_clique_number',
+ 'number_of_cliques', 'cliques_containing_node',
+ 'project_down', 'project_up']
+
+
+@not_implemented_for('directed')
+def find_cliques(G):
+ """Search for all maximal cliques in a graph.
+
+ Maximal cliques are the largest complete subgraph containing
+ a given node. The largest maximal clique is sometimes called
+ the maximum clique.
+
+ Returns
+ -------
+ generator of lists: genetor of member list for each maximal clique
+
+ See Also
+ --------
+ find_cliques_recursive :
+ A recursive version of the same algorithm
+
+ Notes
+ -----
+ To obtain a list of cliques, use list(find_cliques(G)).
+
+ Based on the algorithm published by Bron & Kerbosch (1973) [1]_
+ as adapated by Tomita, Tanaka and Takahashi (2006) [2]_
+ and discussed in Cazals and Karande (2008) [3]_.
+ The method essentially unrolls the recursion used in
+ the references to avoid issues of recursion stack depth.
+
+ This algorithm is not suitable for directed graphs.
+
+ This algorithm ignores self-loops and parallel edges as
+ clique is not conventionally defined with such edges.
+
+ There are often many cliques in graphs. This algorithm can
+ run out of memory for large graphs.
+
+ References
+ ----------
+ .. [1] Bron, C. and Kerbosch, J. 1973.
+ Algorithm 457: finding all cliques of an undirected graph.
+ Commun. ACM 16, 9 (Sep. 1973), 575-577.
+ http://portal.acm.org/citation.cfm?doid=362342.362367
+
+ .. [2] Etsuji Tomita, Akira Tanaka, Haruhisa Takahashi,
+ The worst-case time complexity for generating all maximal
+ cliques and computational experiments,
+ Theoretical Computer Science, Volume 363, Issue 1,
+ Computing and Combinatorics,
+ 10th Annual International Conference on
+ Computing and Combinatorics (COCOON 2004), 25 October 2006, Pages 28-42
+ http://dx.doi.org/10.1016/j.tcs.2006.06.015
+
+ .. [3] F. Cazals, C. Karande,
+ A note on the problem of reporting maximal cliques,
+ Theoretical Computer Science,
+ Volume 407, Issues 1-3, 6 November 2008, Pages 564-568,
+ http://dx.doi.org/10.1016/j.tcs.2008.05.010
+ """
+ # Cache nbrs and find first pivot (highest degree)
+ maxconn=-1
+ nnbrs={}
+ pivotnbrs=set() # handle empty graph
+ for n,nbrs in G.adjacency_iter():
+ nbrs=set(nbrs)
+ nbrs.discard(n)
+ conn = len(nbrs)
+ if conn > maxconn:
+ nnbrs[n] = pivotnbrs = nbrs
+ maxconn = conn
+ else:
+ nnbrs[n] = nbrs
+ # Initial setup
+ cand=set(nnbrs)
+ smallcand = set(cand - pivotnbrs)
+ done=set()
+ stack=[]
+ clique_so_far=[]
+ # Start main loop
+ while smallcand or stack:
+ try:
+ # Any nodes left to check?
+ n=smallcand.pop()
+ except KeyError:
+ # back out clique_so_far
+ cand,done,smallcand = stack.pop()
+ clique_so_far.pop()
+ continue
+ # Add next node to clique
+ clique_so_far.append(n)
+ cand.remove(n)
+ done.add(n)
+ nn=nnbrs[n]
+ new_cand = cand & nn
+ new_done = done & nn
+ # check if we have more to search
+ if not new_cand:
+ if not new_done:
+ # Found a clique!
+ yield clique_so_far[:]
+ clique_so_far.pop()
+ continue
+ # Shortcut--only one node left!
+ if not new_done and len(new_cand)==1:
+ yield clique_so_far + list(new_cand)
+ clique_so_far.pop()
+ continue
+ # find pivot node (max connected in cand)
+ # look in done nodes first
+ numb_cand=len(new_cand)
+ maxconndone=-1
+ for n in new_done:
+ cn = new_cand & nnbrs[n]
+ conn=len(cn)
+ if conn > maxconndone:
+ pivotdonenbrs=cn
+ maxconndone=conn
+ if maxconndone==numb_cand:
+ break
+ # Shortcut--this part of tree already searched
+ if maxconndone == numb_cand:
+ clique_so_far.pop()
+ continue
+ # still finding pivot node
+ # look in cand nodes second
+ maxconn=-1
+ for n in new_cand:
+ cn = new_cand & nnbrs[n]
+ conn=len(cn)
+ if conn > maxconn:
+ pivotnbrs=cn
+ maxconn=conn
+ if maxconn == numb_cand-1:
+ break
+ # pivot node is max connected in cand from done or cand
+ if maxconndone > maxconn:
+ pivotnbrs = pivotdonenbrs
+ # save search status for later backout
+ stack.append( (cand, done, smallcand) )
+ cand=new_cand
+ done=new_done
+ smallcand = cand - pivotnbrs
+
+
+def find_cliques_recursive(G):
+ """Recursive search for all maximal cliques in a graph.
+
+ Maximal cliques are the largest complete subgraph containing
+ a given point. The largest maximal clique is sometimes called
+ the maximum clique.
+
+ Returns
+ -------
+ list of lists: list of members in each maximal clique
+
+ See Also
+ --------
+ find_cliques : An nonrecursive version of the same algorithm
+
+ Notes
+ -----
+ Based on the algorithm published by Bron & Kerbosch (1973) [1]_
+ as adapated by Tomita, Tanaka and Takahashi (2006) [2]_
+ and discussed in Cazals and Karande (2008) [3]_.
+
+ This implementation returns a list of lists each of
+ which contains the members of a maximal clique.
+
+ This algorithm ignores self-loops and parallel edges as
+ clique is not conventionally defined with such edges.
+
+ References
+ ----------
+ .. [1] Bron, C. and Kerbosch, J. 1973.
+ Algorithm 457: finding all cliques of an undirected graph.
+ Commun. ACM 16, 9 (Sep. 1973), 575-577.
+ http://portal.acm.org/citation.cfm?doid=362342.362367
+
+ .. [2] Etsuji Tomita, Akira Tanaka, Haruhisa Takahashi,
+ The worst-case time complexity for generating all maximal
+ cliques and computational experiments,
+ Theoretical Computer Science, Volume 363, Issue 1,
+ Computing and Combinatorics,
+ 10th Annual International Conference on
+ Computing and Combinatorics (COCOON 2004), 25 October 2006, Pages 28-42
+ http://dx.doi.org/10.1016/j.tcs.2006.06.015
+
+ .. [3] F. Cazals, C. Karande,
+ A note on the problem of reporting maximal cliques,
+ Theoretical Computer Science,
+ Volume 407, Issues 1-3, 6 November 2008, Pages 564-568,
+ http://dx.doi.org/10.1016/j.tcs.2008.05.010
+ """
+ nnbrs={}
+ for n,nbrs in G.adjacency_iter():
+ nbrs=set(nbrs)
+ nbrs.discard(n)
+ nnbrs[n]=nbrs
+ if not nnbrs: return [] # empty graph
+ cand=set(nnbrs)
+ done=set()
+ clique_so_far=[]
+ cliques=[]
+ _extend(nnbrs,cand,done,clique_so_far,cliques)
+ return cliques
+
+def _extend(nnbrs,cand,done,so_far,cliques):
+ # find pivot node (max connections in cand)
+ maxconn=-1
+ numb_cand=len(cand)
+ for n in done:
+ cn = cand & nnbrs[n]
+ conn=len(cn)
+ if conn > maxconn:
+ pivotnbrs=cn
+ maxconn=conn
+ if conn==numb_cand:
+ # All possible cliques already found
+ return
+ for n in cand:
+ cn = cand & nnbrs[n]
+ conn=len(cn)
+ if conn > maxconn:
+ pivotnbrs=cn
+ maxconn=conn
+ # Use pivot to reduce number of nodes to examine
+ smallercand = set(cand - pivotnbrs)
+ for n in smallercand:
+ cand.remove(n)
+ so_far.append(n)
+ nn=nnbrs[n]
+ new_cand=cand & nn
+ new_done=done & nn
+ if not new_cand and not new_done:
+ # Found the clique
+ cliques.append(so_far[:])
+ elif not new_done and len(new_cand) is 1:
+ # shortcut if only one node left
+ cliques.append(so_far+list(new_cand))
+ else:
+ _extend(nnbrs, new_cand, new_done, so_far, cliques)
+ done.add(so_far.pop())
+
+
+def make_max_clique_graph(G,create_using=None,name=None):
+ """ Create the maximal clique graph of a graph.
+
+ Finds the maximal cliques and treats these as nodes.
+ The nodes are connected if they have common members in
+ the original graph. Theory has done a lot with clique
+ graphs, but I haven't seen much on maximal clique graphs.
+
+ Notes
+ -----
+ This should be the same as make_clique_bipartite followed
+ by project_up, but it saves all the intermediate steps.
+ """
+ cliq=list(map(set,find_cliques(G)))
+ if create_using:
+ B=create_using
+ B.clear()
+ else:
+ B=networkx.Graph()
+ if name is not None:
+ B.name=name
+
+ for i,cl in enumerate(cliq):
+ B.add_node(i+1)
+ for j,other_cl in enumerate(cliq[:i]):
+ # if not cl.isdisjoint(other_cl): #Requires 2.6
+ intersect=cl & other_cl
+ if intersect: # Not empty
+ B.add_edge(i+1,j+1)
+ return B
+
+def make_clique_bipartite(G,fpos=None,create_using=None,name=None):
+ """Create a bipartite clique graph from a graph G.
+
+ Nodes of G are retained as the "bottom nodes" of B and
+ cliques of G become "top nodes" of B.
+ Edges are present if a bottom node belongs to the clique
+ represented by the top node.
+
+ Returns a Graph with additional attribute dict B.node_type
+ which is keyed by nodes to "Bottom" or "Top" appropriately.
+
+ if fpos is not None, a second additional attribute dict B.pos
+ is created to hold the position tuple of each node for viewing
+ the bipartite graph.
+ """
+ cliq=list(find_cliques(G))
+ if create_using:
+ B=create_using
+ B.clear()
+ else:
+ B=networkx.Graph()
+ if name is not None:
+ B.name=name
+
+ B.add_nodes_from(G)
+ B.node_type={} # New Attribute for B
+ for n in B:
+ B.node_type[n]="Bottom"
+
+ if fpos:
+ B.pos={} # New Attribute for B
+ delta_cpos=1./len(cliq)
+ delta_ppos=1./G.order()
+ cpos=0.
+ ppos=0.
+ for i,cl in enumerate(cliq):
+ name= -i-1 # Top nodes get negative names
+ B.add_node(name)
+ B.node_type[name]="Top"
+ if fpos:
+ if name not in B.pos:
+ B.pos[name]=(0.2,cpos)
+ cpos +=delta_cpos
+ for v in cl:
+ B.add_edge(name,v)
+ if fpos is not None:
+ if v not in B.pos:
+ B.pos[v]=(0.8,ppos)
+ ppos +=delta_ppos
+ return B
+
+def project_down(B,create_using=None,name=None):
+ """Project a bipartite graph B down onto its "bottom nodes".
+
+ The nodes retain their names and are connected if they
+ share a common top node in the bipartite graph.
+
+ Returns a Graph.
+ """
+ if create_using:
+ G=create_using
+ G.clear()
+ else:
+ G=networkx.Graph()
+ if name is not None:
+ G.name=name
+
+ for v,Bvnbrs in B.adjacency_iter():
+ if B.node_type[v]=="Bottom":
+ G.add_node(v)
+ for cv in Bvnbrs:
+ G.add_edges_from([(v,u) for u in B[cv] if u!=v])
+ return G
+
+def project_up(B,create_using=None,name=None):
+ """Project a bipartite graph B down onto its "bottom nodes".
+
+ The nodes retain their names and are connected if they
+ share a common Bottom Node in the Bipartite Graph.
+
+ Returns a Graph.
+ """
+ if create_using:
+ G=create_using
+ G.clear()
+ else:
+ G=networkx.Graph()
+ if name is not None:
+ G.name=name
+
+ for v,Bvnbrs in B.adjacency_iter():
+ if B.node_type[v]=="Top":
+ vname= -v #Change sign of name for Top Nodes
+ G.add_node(vname)
+ for cv in Bvnbrs:
+ # Note: -u changes the name (not Top node anymore)
+ G.add_edges_from([(vname,-u) for u in B[cv] if u!=v])
+ return G
+
+def graph_clique_number(G,cliques=None):
+ """Return the clique number (size of the largest clique) for G.
+
+ An optional list of cliques can be input if already computed.
+ """
+ if cliques is None:
+ cliques=find_cliques(G)
+ return max( [len(c) for c in cliques] )
+
+
+def graph_number_of_cliques(G,cliques=None):
+ """Returns the number of maximal cliques in G.
+
+ An optional list of cliques can be input if already computed.
+ """
+ if cliques is None:
+ cliques=list(find_cliques(G))
+ return len(cliques)
+
+
+def node_clique_number(G,nodes=None,cliques=None):
+ """ Returns the size of the largest maximal clique containing
+ each given node.
+
+ Returns a single or list depending on input nodes.
+ Optional list of cliques can be input if already computed.
+ """
+ if cliques is None:
+ if nodes is not None:
+ # Use ego_graph to decrease size of graph
+ if isinstance(nodes,list):
+ d={}
+ for n in nodes:
+ H=networkx.ego_graph(G,n)
+ d[n]=max( (len(c) for c in find_cliques(H)) )
+ else:
+ H=networkx.ego_graph(G,nodes)
+ d=max( (len(c) for c in find_cliques(H)) )
+ return d
+ # nodes is None--find all cliques
+ cliques=list(find_cliques(G))
+
+ if nodes is None:
+ nodes=G.nodes() # none, get entire graph
+
+ if not isinstance(nodes, list): # check for a list
+ v=nodes
+ # assume it is a single value
+ d=max([len(c) for c in cliques if v in c])
+ else:
+ d={}
+ for v in nodes:
+ d[v]=max([len(c) for c in cliques if v in c])
+ return d
+
+ # if nodes is None: # none, use entire graph
+ # nodes=G.nodes()
+ # elif not isinstance(nodes, list): # check for a list
+ # nodes=[nodes] # assume it is a single value
+
+ # if cliques is None:
+ # cliques=list(find_cliques(G))
+ # d={}
+ # for v in nodes:
+ # d[v]=max([len(c) for c in cliques if v in c])
+
+ # if nodes in G:
+ # return d[v] #return single value
+ # return d
+
+
+def number_of_cliques(G,nodes=None,cliques=None):
+ """Returns the number of maximal cliques for each node.
+
+ Returns a single or list depending on input nodes.
+ Optional list of cliques can be input if already computed.
+ """
+ if cliques is None:
+ cliques=list(find_cliques(G))
+
+ if nodes is None:
+ nodes=G.nodes() # none, get entire graph
+
+ if not isinstance(nodes, list): # check for a list
+ v=nodes
+ # assume it is a single value
+ numcliq=len([1 for c in cliques if v in c])
+ else:
+ numcliq={}
+ for v in nodes:
+ numcliq[v]=len([1 for c in cliques if v in c])
+ return numcliq
+
+
+def cliques_containing_node(G,nodes=None,cliques=None):
+ """Returns a list of cliques containing the given node.
+
+ Returns a single list or list of lists depending on input nodes.
+ Optional list of cliques can be input if already computed.
+ """
+ if cliques is None:
+ cliques=list(find_cliques(G))
+
+ if nodes is None:
+ nodes=G.nodes() # none, get entire graph
+
+ if not isinstance(nodes, list): # check for a list
+ v=nodes
+ # assume it is a single value
+ vcliques=[c for c in cliques if v in c]
+ else:
+ vcliques={}
+ for v in nodes:
+ vcliques[v]=[c for c in cliques if v in c]
+ return vcliques
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/cluster.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/cluster.py
new file mode 100644
index 0000000..a442431
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/cluster.py
@@ -0,0 +1,363 @@
+# -*- coding: utf-8 -*-
+"""Algorithms to characterize the number of triangles in a graph."""
+from itertools import combinations
+import networkx as nx
+from networkx import NetworkXError
+__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>',
+ 'Dan Schult (dschult@colgate.edu)',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Jordi Torrents <jtorrents@milnou.net>'])
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+__all__= ['triangles', 'average_clustering', 'clustering', 'transitivity',
+ 'square_clustering']
+
+def triangles(G, nodes=None):
+ """Compute the number of triangles.
+
+ Finds the number of triangles that include a node as one vertex.
+
+ Parameters
+ ----------
+ G : graph
+ A networkx graph
+ nodes : container of nodes, optional (default= all nodes in G)
+ Compute triangles for nodes in this container.
+
+ Returns
+ -------
+ out : dictionary
+ Number of triangles keyed by node label.
+
+ Examples
+ --------
+ >>> G=nx.complete_graph(5)
+ >>> print(nx.triangles(G,0))
+ 6
+ >>> print(nx.triangles(G))
+ {0: 6, 1: 6, 2: 6, 3: 6, 4: 6}
+ >>> print(list(nx.triangles(G,(0,1)).values()))
+ [6, 6]
+
+ Notes
+ -----
+ When computing triangles for the entire graph each triangle is counted
+ three times, once at each node. Self loops are ignored.
+
+ """
+ if G.is_directed():
+ raise NetworkXError("triangles() is not defined for directed graphs.")
+ if nodes in G:
+ # return single value
+ return next(_triangles_and_degree_iter(G,nodes))[2] // 2
+ return dict( (v,t // 2) for v,d,t in _triangles_and_degree_iter(G,nodes))
+
+def _triangles_and_degree_iter(G,nodes=None):
+ """ Return an iterator of (node, degree, triangles).
+
+ This double counts triangles so you may want to divide by 2.
+ See degree() and triangles() for definitions and details.
+
+ """
+ if G.is_multigraph():
+ raise NetworkXError("Not defined for multigraphs.")
+
+ if nodes is None:
+ nodes_nbrs = G.adj.items()
+ else:
+ nodes_nbrs= ( (n,G[n]) for n in G.nbunch_iter(nodes) )
+
+ for v,v_nbrs in nodes_nbrs:
+ vs=set(v_nbrs)-set([v])
+ ntriangles=0
+ for w in vs:
+ ws=set(G[w])-set([w])
+ ntriangles+=len(vs.intersection(ws))
+ yield (v,len(vs),ntriangles)
+
+
+def _weighted_triangles_and_degree_iter(G, nodes=None, weight='weight'):
+ """ Return an iterator of (node, degree, weighted_triangles).
+
+ Used for weighted clustering.
+
+ """
+ if G.is_multigraph():
+ raise NetworkXError("Not defined for multigraphs.")
+
+ if weight is None or G.edges()==[]:
+ max_weight=1.0
+ else:
+ max_weight=float(max(d.get(weight,1.0)
+ for u,v,d in G.edges(data=True)))
+ if nodes is None:
+ nodes_nbrs = G.adj.items()
+ else:
+ nodes_nbrs= ( (n,G[n]) for n in G.nbunch_iter(nodes) )
+
+ for i,nbrs in nodes_nbrs:
+ inbrs=set(nbrs)-set([i])
+ weighted_triangles=0.0
+ seen=set()
+ for j in inbrs:
+ wij=G[i][j].get(weight,1.0)/max_weight
+ seen.add(j)
+ jnbrs=set(G[j])-seen # this keeps from double counting
+ for k in inbrs&jnbrs:
+ wjk=G[j][k].get(weight,1.0)/max_weight
+ wki=G[i][k].get(weight,1.0)/max_weight
+ weighted_triangles+=(wij*wjk*wki)**(1.0/3.0)
+ yield (i,len(inbrs),weighted_triangles*2)
+
+
+def average_clustering(G, nodes=None, weight=None, count_zeros=True):
+ r"""Compute the average clustering coefficient for the graph G.
+
+ The clustering coefficient for the graph is the average,
+
+ .. math::
+
+ C = \frac{1}{n}\sum_{v \in G} c_v,
+
+ where `n` is the number of nodes in `G`.
+
+ Parameters
+ ----------
+ G : graph
+
+ nodes : container of nodes, optional (default=all nodes in G)
+ Compute average clustering for nodes in this container.
+
+ weight : string or None, optional (default=None)
+ The edge attribute that holds the numerical value used as a weight.
+ If None, then each edge has weight 1.
+
+ count_zeros : bool (default=False)
+ If False include only the nodes with nonzero clustering in the average.
+
+ Returns
+ -------
+ avg : float
+ Average clustering
+
+ Examples
+ --------
+ >>> G=nx.complete_graph(5)
+ >>> print(nx.average_clustering(G))
+ 1.0
+
+ Notes
+ -----
+ This is a space saving routine; it might be faster
+ to use the clustering function to get a list and then take the average.
+
+ Self loops are ignored.
+
+ References
+ ----------
+ .. [1] Generalizations of the clustering coefficient to weighted
+ complex networks by J. Saramäki, M. Kivelä, J.-P. Onnela,
+ K. Kaski, and J. Kertész, Physical Review E, 75 027105 (2007).
+ http://jponnela.com/web_documents/a9.pdf
+ .. [2] Marcus Kaiser, Mean clustering coefficients: the role of isolated
+ nodes and leafs on clustering measures for small-world networks.
+ http://arxiv.org/abs/0802.2512
+ """
+ c=clustering(G,nodes,weight=weight).values()
+ if not count_zeros:
+ c = [v for v in c if v > 0]
+ return sum(c)/float(len(c))
+
+def clustering(G, nodes=None, weight=None):
+ r"""Compute the clustering coefficient for nodes.
+
+ For unweighted graphs, the clustering of a node `u`
+ is the fraction of possible triangles through that node that exist,
+
+ .. math::
+
+ c_u = \frac{2 T(u)}{deg(u)(deg(u)-1)},
+
+ where `T(u)` is the number of triangles through node `u` and
+ `deg(u)` is the degree of `u`.
+
+ For weighted graphs, the clustering is defined
+ as the geometric average of the subgraph edge weights [1]_,
+
+ .. math::
+
+ c_u = \frac{1}{deg(u)(deg(u)-1))}
+ \sum_{uv} (\hat{w}_{uv} \hat{w}_{uw} \hat{w}_{vw})^{1/3}.
+
+ The edge weights `\hat{w}_{uv}` are normalized by the maximum weight in the
+ network `\hat{w}_{uv} = w_{uv}/\max(w)`.
+
+ The value of `c_u` is assigned to 0 if `deg(u) < 2`.
+
+ Parameters
+ ----------
+ G : graph
+
+ nodes : container of nodes, optional (default=all nodes in G)
+ Compute clustering for nodes in this container.
+
+ weight : string or None, optional (default=None)
+ The edge attribute that holds the numerical value used as a weight.
+ If None, then each edge has weight 1.
+
+ Returns
+ -------
+ out : float, or dictionary
+ Clustering coefficient at specified nodes
+
+ Examples
+ --------
+ >>> G=nx.complete_graph(5)
+ >>> print(nx.clustering(G,0))
+ 1.0
+ >>> print(nx.clustering(G))
+ {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0}
+
+ Notes
+ -----
+ Self loops are ignored.
+
+ References
+ ----------
+ .. [1] Generalizations of the clustering coefficient to weighted
+ complex networks by J. Saramäki, M. Kivelä, J.-P. Onnela,
+ K. Kaski, and J. Kertész, Physical Review E, 75 027105 (2007).
+ http://jponnela.com/web_documents/a9.pdf
+ """
+ if G.is_directed():
+ raise NetworkXError('Clustering algorithms are not defined ',
+ 'for directed graphs.')
+ if weight is not None:
+ td_iter=_weighted_triangles_and_degree_iter(G,nodes,weight)
+ else:
+ td_iter=_triangles_and_degree_iter(G,nodes)
+
+ clusterc={}
+
+ for v,d,t in td_iter:
+ if t==0:
+ clusterc[v]=0.0
+ else:
+ clusterc[v]=t/float(d*(d-1))
+
+ if nodes in G:
+ return list(clusterc.values())[0] # return single value
+ return clusterc
+
+def transitivity(G):
+ r"""Compute graph transitivity, the fraction of all possible triangles
+ present in G.
+
+ Possible triangles are identified by the number of "triads"
+ (two edges with a shared vertex).
+
+ The transitivity is
+
+ .. math::
+
+ T = 3\frac{\#triangles}{\#triads}.
+
+ Parameters
+ ----------
+ G : graph
+
+ Returns
+ -------
+ out : float
+ Transitivity
+
+ Examples
+ --------
+ >>> G = nx.complete_graph(5)
+ >>> print(nx.transitivity(G))
+ 1.0
+ """
+ triangles=0 # 6 times number of triangles
+ contri=0 # 2 times number of connected triples
+ for v,d,t in _triangles_and_degree_iter(G):
+ contri += d*(d-1)
+ triangles += t
+ if triangles==0: # we had no triangles or possible triangles
+ return 0.0
+ else:
+ return triangles/float(contri)
+
+def square_clustering(G, nodes=None):
+ r""" Compute the squares clustering coefficient for nodes.
+
+ For each node return the fraction of possible squares that exist at
+ the node [1]_
+
+ .. math::
+ C_4(v) = \frac{ \sum_{u=1}^{k_v}
+ \sum_{w=u+1}^{k_v} q_v(u,w) }{ \sum_{u=1}^{k_v}
+ \sum_{w=u+1}^{k_v} [a_v(u,w) + q_v(u,w)]},
+
+ where `q_v(u,w)` are the number of common neighbors of `u` and `w`
+ other than `v` (ie squares), and
+ `a_v(u,w) = (k_u - (1+q_v(u,w)+\theta_{uv}))(k_w - (1+q_v(u,w)+\theta_{uw}))`,
+ where `\theta_{uw} = 1` if `u` and `w` are connected and 0 otherwise.
+
+ Parameters
+ ----------
+ G : graph
+
+ nodes : container of nodes, optional (default=all nodes in G)
+ Compute clustering for nodes in this container.
+
+ Returns
+ -------
+ c4 : dictionary
+ A dictionary keyed by node with the square clustering coefficient value.
+
+ Examples
+ --------
+ >>> G=nx.complete_graph(5)
+ >>> print(nx.square_clustering(G,0))
+ 1.0
+ >>> print(nx.square_clustering(G))
+ {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0}
+
+ Notes
+ -----
+ While `C_3(v)` (triangle clustering) gives the probability that
+ two neighbors of node v are connected with each other, `C_4(v)` is
+ the probability that two neighbors of node v share a common
+ neighbor different from v. This algorithm can be applied to both
+ bipartite and unipartite networks.
+
+ References
+ ----------
+ .. [1] Pedro G. Lind, Marta C. González, and Hans J. Herrmann. 2005
+ Cycles and clustering in bipartite networks.
+ Physical Review E (72) 056127.
+ """
+ if nodes is None:
+ node_iter = G
+ else:
+ node_iter = G.nbunch_iter(nodes)
+ clustering = {}
+ for v in node_iter:
+ clustering[v] = 0.0
+ potential=0
+ for u,w in combinations(G[v], 2):
+ squares = len((set(G[u]) & set(G[w])) - set([v]))
+ clustering[v] += squares
+ degm = squares + 1.0
+ if w in G[u]:
+ degm += 1
+ potential += (len(G[u]) - degm) * (len(G[w]) - degm) + squares
+ if potential > 0:
+ clustering[v] /= potential
+ if nodes in G:
+ return list(clustering.values())[0] # return single value
+ return clustering
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/community/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/community/__init__.py
new file mode 100644
index 0000000..c3c2285
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/community/__init__.py
@@ -0,0 +1 @@
+from networkx.algorithms.community.kclique import *
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/community/kclique.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/community/kclique.py
new file mode 100644
index 0000000..dc95b58
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/community/kclique.py
@@ -0,0 +1,82 @@
+#-*- coding: utf-8 -*-
+# Copyright (C) 2011 by
+# Conrad Lee <conradlee@gmail.com>
+# Aric Hagberg <hagberg@lanl.gov>
+# All rights reserved.
+# BSD license.
+from collections import defaultdict
+import networkx as nx
+__author__ = """\n""".join(['Conrad Lee <conradlee@gmail.com>',
+ 'Aric Hagberg <aric.hagberg@gmail.com>'])
+__all__ = ['k_clique_communities']
+
+def k_clique_communities(G, k, cliques=None):
+ """Find k-clique communities in graph using the percolation method.
+
+ A k-clique community is the union of all cliques of size k that
+ can be reached through adjacent (sharing k-1 nodes) k-cliques.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ k : int
+ Size of smallest clique
+
+ cliques: list or generator
+ Precomputed cliques (use networkx.find_cliques(G))
+
+ Returns
+ -------
+ Yields sets of nodes, one for each k-clique community.
+
+ Examples
+ --------
+ >>> G = nx.complete_graph(5)
+ >>> K5 = nx.convert_node_labels_to_integers(G,first_label=2)
+ >>> G.add_edges_from(K5.edges())
+ >>> c = list(nx.k_clique_communities(G, 4))
+ >>> list(c[0])
+ [0, 1, 2, 3, 4, 5, 6]
+ >>> list(nx.k_clique_communities(G, 6))
+ []
+
+ References
+ ----------
+ .. [1] Gergely Palla, Imre Derényi, Illés Farkas1, and Tamás Vicsek,
+ Uncovering the overlapping community structure of complex networks
+ in nature and society Nature 435, 814-818, 2005,
+ doi:10.1038/nature03607
+ """
+ if k < 2:
+ raise nx.NetworkXError("k=%d, k must be greater than 1."%k)
+ if cliques is None:
+ cliques = nx.find_cliques(G)
+ cliques = [frozenset(c) for c in cliques if len(c) >= k]
+
+ # First index which nodes are in which cliques
+ membership_dict = defaultdict(list)
+ for clique in cliques:
+ for node in clique:
+ membership_dict[node].append(clique)
+
+ # For each clique, see which adjacent cliques percolate
+ perc_graph = nx.Graph()
+ perc_graph.add_nodes_from(cliques)
+ for clique in cliques:
+ for adj_clique in _get_adjacent_cliques(clique, membership_dict):
+ if len(clique.intersection(adj_clique)) >= (k - 1):
+ perc_graph.add_edge(clique, adj_clique)
+
+ # Connected components of clique graph with perc edges
+ # are the percolated cliques
+ for component in nx.connected_components(perc_graph):
+ yield(frozenset.union(*component))
+
+def _get_adjacent_cliques(clique, membership_dict):
+ adjacent_cliques = set()
+ for n in clique:
+ for adj_clique in membership_dict[n]:
+ if clique != adj_clique:
+ adjacent_cliques.add(adj_clique)
+ return adjacent_cliques
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/community/tests/test_kclique.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/community/tests/test_kclique.py
new file mode 100644
index 0000000..8debca6
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/community/tests/test_kclique.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+from itertools import combinations
+from networkx import k_clique_communities
+
+def test_overlaping_K5():
+ G = nx.Graph()
+ G.add_edges_from(combinations(range(5), 2)) # Add a five clique
+ G.add_edges_from(combinations(range(2,7), 2)) # Add another five clique
+ c = list(nx.k_clique_communities(G, 4))
+ assert_equal(c,[frozenset([0, 1, 2, 3, 4, 5, 6])])
+ c= list(nx.k_clique_communities(G, 5))
+ assert_equal(set(c),set([frozenset([0,1,2,3,4]),frozenset([2,3,4,5,6])]))
+
+def test_isolated_K5():
+ G = nx.Graph()
+ G.add_edges_from(combinations(range(0,5), 2)) # Add a five clique
+ G.add_edges_from(combinations(range(5,10), 2)) # Add another five clique
+ c= list(nx.k_clique_communities(G, 5))
+ assert_equal(set(c),set([frozenset([0,1,2,3,4]),frozenset([5,6,7,8,9])]))
+
+def test_zachary():
+ z = nx.karate_club_graph()
+ # clique percolation with k=2 is just connected components
+ zachary_k2_ground_truth = set([frozenset(z.nodes())])
+ zachary_k3_ground_truth = set([frozenset([0, 1, 2, 3, 7, 8, 12, 13, 14,
+ 15, 17, 18, 19, 20, 21, 22, 23,
+ 26, 27, 28, 29, 30, 31, 32, 33]),
+ frozenset([0, 4, 5, 6, 10, 16]),
+ frozenset([24, 25, 31])])
+ zachary_k4_ground_truth = set([frozenset([0, 1, 2, 3, 7, 13]),
+ frozenset([8, 32, 30, 33]),
+ frozenset([32, 33, 29, 23])])
+ zachary_k5_ground_truth = set([frozenset([0, 1, 2, 3, 7, 13])])
+ zachary_k6_ground_truth = set([])
+
+ assert set(k_clique_communities(z, 2)) == zachary_k2_ground_truth
+ assert set(k_clique_communities(z, 3)) == zachary_k3_ground_truth
+ assert set(k_clique_communities(z, 4)) == zachary_k4_ground_truth
+ assert set(k_clique_communities(z, 5)) == zachary_k5_ground_truth
+ assert set(k_clique_communities(z, 6)) == zachary_k6_ground_truth
+
+@raises(nx.NetworkXError)
+def test_bad_k():
+ c = list(k_clique_communities(nx.Graph(),1))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/__init__.py
new file mode 100644
index 0000000..36c9391
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/__init__.py
@@ -0,0 +1,5 @@
+from networkx.algorithms.components.connected import *
+from networkx.algorithms.components.strongly_connected import *
+from networkx.algorithms.components.weakly_connected import *
+from networkx.algorithms.components.attracting import *
+from networkx.algorithms.components.biconnected import *
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/attracting.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/attracting.py
new file mode 100644
index 0000000..d0e75c2
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/attracting.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+"""
+Attracting components.
+"""
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+__authors__ = "\n".join(['Christopher Ellison'])
+__all__ = ['number_attracting_components',
+ 'attracting_components',
+ 'is_attracting_component',
+ 'attracting_component_subgraphs',
+ ]
+
+def attracting_components(G):
+ """Returns a list of attracting components in `G`.
+
+ An attracting component in a directed graph `G` is a strongly connected
+ component with the property that a random walker on the graph will never
+ leave the component, once it enters the component.
+
+ The nodes in attracting components can also be thought of as recurrent
+ nodes. If a random walker enters the attractor containing the node, then
+ the node will be visited infinitely often.
+
+ Parameters
+ ----------
+ G : DiGraph, MultiDiGraph
+ The graph to be analyzed.
+
+ Returns
+ -------
+ attractors : list
+ The list of attracting components, sorted from largest attracting
+ component to smallest attracting component.
+
+ See Also
+ --------
+ number_attracting_components
+ is_attracting_component
+ attracting_component_subgraphs
+
+ """
+ scc = nx.strongly_connected_components(G)
+ cG = nx.condensation(G, scc)
+ attractors = [scc[n] for n in cG if cG.out_degree(n) == 0]
+ attractors.sort(key=len,reverse=True)
+ return attractors
+
+
+def number_attracting_components(G):
+ """Returns the number of attracting components in `G`.
+
+ Parameters
+ ----------
+ G : DiGraph, MultiDiGraph
+ The graph to be analyzed.
+
+ Returns
+ -------
+ n : int
+ The number of attracting components in G.
+
+ See Also
+ --------
+ attracting_components
+ is_attracting_component
+ attracting_component_subgraphs
+
+ """
+ n = len(attracting_components(G))
+ return n
+
+
+def is_attracting_component(G):
+ """Returns True if `G` consists of a single attracting component.
+
+ Parameters
+ ----------
+ G : DiGraph, MultiDiGraph
+ The graph to be analyzed.
+
+ Returns
+ -------
+ attracting : bool
+ True if `G` has a single attracting component. Otherwise, False.
+
+ See Also
+ --------
+ attracting_components
+ number_attracting_components
+ attracting_component_subgraphs
+
+ """
+ ac = attracting_components(G)
+ if len(ac[0]) == len(G):
+ attracting = True
+ else:
+ attracting = False
+ return attracting
+
+
+def attracting_component_subgraphs(G):
+ """Returns a list of attracting component subgraphs from `G`.
+
+ Parameters
+ ----------
+ G : DiGraph, MultiDiGraph
+ The graph to be analyzed.
+
+ Returns
+ -------
+ subgraphs : list
+ A list of node-induced subgraphs of the attracting components of `G`.
+
+ Notes
+ -----
+ Graph, node, and edge attributes are copied to the subgraphs.
+
+ See Also
+ --------
+ attracting_components
+ number_attracting_components
+ is_attracting_component
+
+ """
+ subgraphs = [G.subgraph(ac).copy() for ac in attracting_components(G)]
+ return subgraphs
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/biconnected.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/biconnected.py
new file mode 100644
index 0000000..0185c2c
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/biconnected.py
@@ -0,0 +1,417 @@
+# -*- coding: utf-8 -*-
+"""
+Biconnected components and articulation points.
+"""
+# Copyright (C) 2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+from itertools import chain
+import networkx as nx
+__author__ = '\n'.join(['Jordi Torrents <jtorrents@milnou.net>',
+ 'Dan Schult <dschult@colgate.edu>',
+ 'Aric Hagberg <aric.hagberg@gmail.com>'])
+__all__ = ['biconnected_components',
+ 'biconnected_component_edges',
+ 'biconnected_component_subgraphs',
+ 'is_biconnected',
+ 'articulation_points',
+ ]
+
+def is_biconnected(G):
+ """Return True if the graph is biconnected, False otherwise.
+
+ A graph is biconnected if, and only if, it cannot be disconnected by
+ removing only one node (and all edges incident on that node). If
+ removing a node increases the number of disconnected components
+ in the graph, that node is called an articulation point, or cut
+ vertex. A biconnected graph has no articulation points.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+ An undirected graph.
+
+ Returns
+ -------
+ biconnected : bool
+ True if the graph is biconnected, False otherwise.
+
+ Raises
+ ------
+ NetworkXError :
+ If the input graph is not undirected.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> print(nx.is_biconnected(G))
+ False
+ >>> G.add_edge(0,3)
+ >>> print(nx.is_biconnected(G))
+ True
+
+ See Also
+ --------
+ biconnected_components,
+ articulation_points,
+ biconnected_component_edges,
+ biconnected_component_subgraphs
+
+ Notes
+ -----
+ The algorithm to find articulation points and biconnected
+ components is implemented using a non-recursive depth-first-search
+ (DFS) that keeps track of the highest level that back edges reach
+ in the DFS tree. A node `n` is an articulation point if, and only
+ if, there exists a subtree rooted at `n` such that there is no
+ back edge from any successor of `n` that links to a predecessor of
+ `n` in the DFS tree. By keeping track of all the edges traversed
+ by the DFS we can obtain the biconnected components because all
+ edges of a bicomponent will be traversed consecutively between
+ articulation points.
+
+ References
+ ----------
+ .. [1] Hopcroft, J.; Tarjan, R. (1973).
+ "Efficient algorithms for graph manipulation".
+ Communications of the ACM 16: 372–378. doi:10.1145/362248.362272
+ """
+ bcc = list(biconnected_components(G))
+ if not bcc: # No bicomponents (it could be an empty graph)
+ return False
+ return len(bcc[0]) == len(G)
+
+def biconnected_component_edges(G):
+ """Return a generator of lists of edges, one list for each biconnected
+ component of the input graph.
+
+ Biconnected components are maximal subgraphs such that the removal of a
+ node (and all edges incident on that node) will not disconnect the
+ subgraph. Note that nodes may be part of more than one biconnected
+ component. Those nodes are articulation points, or cut vertices. However,
+ each edge belongs to one, and only one, biconnected component.
+
+ Notice that by convention a dyad is considered a biconnected component.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+ An undirected graph.
+
+ Returns
+ -------
+ edges : generator
+ Generator of lists of edges, one list for each bicomponent.
+
+ Raises
+ ------
+ NetworkXError :
+ If the input graph is not undirected.
+
+ Examples
+ --------
+ >>> G = nx.barbell_graph(4,2)
+ >>> print(nx.is_biconnected(G))
+ False
+ >>> components = nx.biconnected_component_edges(G)
+ >>> G.add_edge(2,8)
+ >>> print(nx.is_biconnected(G))
+ True
+ >>> components = nx.biconnected_component_edges(G)
+
+ See Also
+ --------
+ is_biconnected,
+ biconnected_components,
+ articulation_points,
+ biconnected_component_subgraphs
+
+ Notes
+ -----
+ The algorithm to find articulation points and biconnected
+ components is implemented using a non-recursive depth-first-search
+ (DFS) that keeps track of the highest level that back edges reach
+ in the DFS tree. A node `n` is an articulation point if, and only
+ if, there exists a subtree rooted at `n` such that there is no
+ back edge from any successor of `n` that links to a predecessor of
+ `n` in the DFS tree. By keeping track of all the edges traversed
+ by the DFS we can obtain the biconnected components because all
+ edges of a bicomponent will be traversed consecutively between
+ articulation points.
+
+ References
+ ----------
+ .. [1] Hopcroft, J.; Tarjan, R. (1973).
+ "Efficient algorithms for graph manipulation".
+ Communications of the ACM 16: 372–378. doi:10.1145/362248.362272
+ """
+ return sorted(_biconnected_dfs(G,components=True), key=len, reverse=True)
+
+def biconnected_components(G):
+ """Return a generator of sets of nodes, one set for each biconnected
+ component of the graph
+
+ Biconnected components are maximal subgraphs such that the removal of a
+ node (and all edges incident on that node) will not disconnect the
+ subgraph. Note that nodes may be part of more than one biconnected
+ component. Those nodes are articulation points, or cut vertices. The
+ removal of articulation points will increase the number of connected
+ components of the graph.
+
+ Notice that by convention a dyad is considered a biconnected component.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+ An undirected graph.
+
+ Returns
+ -------
+ nodes : generator
+ Generator of sets of nodes, one set for each biconnected component.
+
+ Raises
+ ------
+ NetworkXError :
+ If the input graph is not undirected.
+
+ Examples
+ --------
+ >>> G = nx.barbell_graph(4,2)
+ >>> print(nx.is_biconnected(G))
+ False
+ >>> components = nx.biconnected_components(G)
+ >>> G.add_edge(2,8)
+ >>> print(nx.is_biconnected(G))
+ True
+ >>> components = nx.biconnected_components(G)
+
+ See Also
+ --------
+ is_biconnected,
+ articulation_points,
+ biconnected_component_edges,
+ biconnected_component_subgraphs
+
+ Notes
+ -----
+ The algorithm to find articulation points and biconnected
+ components is implemented using a non-recursive depth-first-search
+ (DFS) that keeps track of the highest level that back edges reach
+ in the DFS tree. A node `n` is an articulation point if, and only
+ if, there exists a subtree rooted at `n` such that there is no
+ back edge from any successor of `n` that links to a predecessor of
+ `n` in the DFS tree. By keeping track of all the edges traversed
+ by the DFS we can obtain the biconnected components because all
+ edges of a bicomponent will be traversed consecutively between
+ articulation points.
+
+ References
+ ----------
+ .. [1] Hopcroft, J.; Tarjan, R. (1973).
+ "Efficient algorithms for graph manipulation".
+ Communications of the ACM 16: 372–378. doi:10.1145/362248.362272
+ """
+ bicomponents = (set(chain.from_iterable(comp))
+ for comp in _biconnected_dfs(G,components=True))
+ return sorted(bicomponents, key=len, reverse=True)
+
+def biconnected_component_subgraphs(G):
+ """Return a generator of graphs, one graph for each biconnected component
+ of the input graph.
+
+ Biconnected components are maximal subgraphs such that the removal of a
+ node (and all edges incident on that node) will not disconnect the
+ subgraph. Note that nodes may be part of more than one biconnected
+ component. Those nodes are articulation points, or cut vertices. The
+ removal of articulation points will increase the number of connected
+ components of the graph.
+
+ Notice that by convention a dyad is considered a biconnected component.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+ An undirected graph.
+
+ Returns
+ -------
+ graphs : generator
+ Generator of graphs, one graph for each biconnected component.
+
+ Raises
+ ------
+ NetworkXError :
+ If the input graph is not undirected.
+
+ Examples
+ --------
+ >>> G = nx.barbell_graph(4,2)
+ >>> print(nx.is_biconnected(G))
+ False
+ >>> subgraphs = nx.biconnected_component_subgraphs(G)
+
+ See Also
+ --------
+ is_biconnected,
+ articulation_points,
+ biconnected_component_edges,
+ biconnected_components
+
+ Notes
+ -----
+ The algorithm to find articulation points and biconnected
+ components is implemented using a non-recursive depth-first-search
+ (DFS) that keeps track of the highest level that back edges reach
+ in the DFS tree. A node `n` is an articulation point if, and only
+ if, there exists a subtree rooted at `n` such that there is no
+ back edge from any successor of `n` that links to a predecessor of
+ `n` in the DFS tree. By keeping track of all the edges traversed
+ by the DFS we can obtain the biconnected components because all
+ edges of a bicomponent will be traversed consecutively between
+ articulation points.
+
+ Graph, node, and edge attributes are copied to the subgraphs.
+
+ References
+ ----------
+ .. [1] Hopcroft, J.; Tarjan, R. (1973).
+ "Efficient algorithms for graph manipulation".
+ Communications of the ACM 16: 372–378. doi:10.1145/362248.362272
+ """
+ def edge_subgraph(G,edges):
+ # create new graph and copy subgraph into it
+ H = G.__class__()
+ for u,v in edges:
+ H.add_edge(u,v,attr_dict=G[u][v])
+ for n in H:
+ H.node[n]=G.node[n].copy()
+ H.graph=G.graph.copy()
+ return H
+ return (edge_subgraph(G,edges) for edges in
+ sorted(_biconnected_dfs(G,components=True), key=len, reverse=True))
+
+def articulation_points(G):
+ """Return a generator of articulation points, or cut vertices, of a graph.
+
+ An articulation point or cut vertex is any node whose removal (along with
+ all its incident edges) increases the number of connected components of
+ a graph. An undirected connected graph without articulation points is
+ biconnected. Articulation points belong to more than one biconnected
+ component of a graph.
+
+ Notice that by convention a dyad is considered a biconnected component.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+ An undirected graph.
+
+ Returns
+ -------
+ articulation points : generator
+ generator of nodes
+
+ Raises
+ ------
+ NetworkXError :
+ If the input graph is not undirected.
+
+ Examples
+ --------
+ >>> G = nx.barbell_graph(4,2)
+ >>> print(nx.is_biconnected(G))
+ False
+ >>> list(nx.articulation_points(G))
+ [6, 5, 4, 3]
+ >>> G.add_edge(2,8)
+ >>> print(nx.is_biconnected(G))
+ True
+ >>> list(nx.articulation_points(G))
+ []
+
+ See Also
+ --------
+ is_biconnected,
+ biconnected_components,
+ biconnected_component_edges,
+ biconnected_component_subgraphs
+
+ Notes
+ -----
+ The algorithm to find articulation points and biconnected
+ components is implemented using a non-recursive depth-first-search
+ (DFS) that keeps track of the highest level that back edges reach
+ in the DFS tree. A node `n` is an articulation point if, and only
+ if, there exists a subtree rooted at `n` such that there is no
+ back edge from any successor of `n` that links to a predecessor of
+ `n` in the DFS tree. By keeping track of all the edges traversed
+ by the DFS we can obtain the biconnected components because all
+ edges of a bicomponent will be traversed consecutively between
+ articulation points.
+
+ References
+ ----------
+ .. [1] Hopcroft, J.; Tarjan, R. (1973).
+ "Efficient algorithms for graph manipulation".
+ Communications of the ACM 16: 372–378. doi:10.1145/362248.362272
+ """
+ return _biconnected_dfs(G,components=False)
+
+def _biconnected_dfs(G, components=True):
+ # depth-first search algorithm to generate articulation points
+ # and biconnected components
+ if G.is_directed():
+ raise nx.NetworkXError('Not allowed for directed graph G. '
+ 'Use UG=G.to_undirected() to create an '
+ 'undirected graph.')
+ visited = set()
+ for start in G:
+ if start in visited:
+ continue
+ discovery = {start:0} # "time" of first discovery of node during search
+ low = {start:0}
+ root_children = 0
+ visited.add(start)
+ edge_stack = []
+ stack = [(start, start, iter(G[start]))]
+ while stack:
+ grandparent, parent, children = stack[-1]
+ try:
+ child = next(children)
+ if grandparent == child:
+ continue
+ if child in visited:
+ if discovery[child] <= discovery[parent]: # back edge
+ low[parent] = min(low[parent],discovery[child])
+ if components:
+ edge_stack.append((parent,child))
+ else:
+ low[child] = discovery[child] = len(discovery)
+ visited.add(child)
+ stack.append((parent, child, iter(G[child])))
+ if components:
+ edge_stack.append((parent,child))
+ except StopIteration:
+ stack.pop()
+ if len(stack) > 1:
+ if low[parent] >= discovery[grandparent]:
+ if components:
+ ind = edge_stack.index((grandparent,parent))
+ yield edge_stack[ind:]
+ edge_stack=edge_stack[:ind]
+ else:
+ yield grandparent
+ low[grandparent] = min(low[parent], low[grandparent])
+ elif stack: # length 1 so grandparent is root
+ root_children += 1
+ if components:
+ ind = edge_stack.index((grandparent,parent))
+ yield edge_stack[ind:]
+ if not components:
+ # root node is articulation point if it has more than 1 child
+ if root_children > 1:
+ yield start
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/connected.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/connected.py
new file mode 100644
index 0000000..088a99e
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/connected.py
@@ -0,0 +1,192 @@
+# -*- coding: utf-8 -*-
+"""
+Connected components.
+"""
+__authors__ = "\n".join(['Eben Kenah',
+ 'Aric Hagberg (hagberg@lanl.gov)'
+ 'Christopher Ellison'])
+# Copyright (C) 2004-2010 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+__all__ = ['number_connected_components',
+ 'connected_components',
+ 'connected_component_subgraphs',
+ 'is_connected',
+ 'node_connected_component',
+ ]
+
+import networkx as nx
+
+def connected_components(G):
+ """Return nodes in connected components of graph.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+ An undirected graph.
+
+ Returns
+ -------
+ comp : list of lists
+ A list of nodes for each component of G.
+
+ See Also
+ --------
+ strongly_connected_components
+
+ Notes
+ -----
+ The list is ordered from largest connected component to smallest.
+ For undirected graphs only.
+ """
+ if G.is_directed():
+ raise nx.NetworkXError("""Not allowed for directed graph G.
+ Use UG=G.to_undirected() to create an undirected graph.""")
+ seen={}
+ components=[]
+ for v in G:
+ if v not in seen:
+ c=nx.single_source_shortest_path_length(G,v)
+ components.append(list(c.keys()))
+ seen.update(c)
+ components.sort(key=len,reverse=True)
+ return components
+
+
+def number_connected_components(G):
+ """Return number of connected components in graph.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+ An undirected graph.
+
+ Returns
+ -------
+ n : integer
+ Number of connected components
+
+ See Also
+ --------
+ connected_components
+
+ Notes
+ -----
+ For undirected graphs only.
+ """
+ return len(connected_components(G))
+
+
+def is_connected(G):
+ """Test graph connectivity.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+ An undirected graph.
+
+ Returns
+ -------
+ connected : bool
+ True if the graph is connected, false otherwise.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> print(nx.is_connected(G))
+ True
+
+ See Also
+ --------
+ connected_components
+
+ Notes
+ -----
+ For undirected graphs only.
+ """
+ if G.is_directed():
+ raise nx.NetworkXError(\
+ """Not allowed for directed graph G.
+Use UG=G.to_undirected() to create an undirected graph.""")
+
+ if len(G)==0:
+ raise nx.NetworkXPointlessConcept(
+ """Connectivity is undefined for the null graph.""")
+
+ return len(nx.single_source_shortest_path_length(G,
+ next(G.nodes_iter())))==len(G)
+
+
+def connected_component_subgraphs(G):
+ """Return connected components as subgraphs.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+ An undirected graph.
+
+ Returns
+ -------
+ glist : list
+ A list of graphs, one for each connected component of G.
+
+ Examples
+ --------
+ Get largest connected component as subgraph
+
+ >>> G=nx.path_graph(4)
+ >>> G.add_edge(5,6)
+ >>> H=nx.connected_component_subgraphs(G)[0]
+
+ See Also
+ --------
+ connected_components
+
+ Notes
+ -----
+ The list is ordered from largest connected component to smallest.
+ For undirected graphs only.
+
+ Graph, node, and edge attributes are copied to the subgraphs.
+ """
+ cc=connected_components(G)
+ graph_list=[]
+ for c in cc:
+ graph_list.append(G.subgraph(c).copy())
+ return graph_list
+
+
+def node_connected_component(G,n):
+ """Return nodes in connected components of graph containing node n.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+ An undirected graph.
+
+ n : node label
+ A node in G
+
+ Returns
+ -------
+ comp : lists
+ A list of nodes in component of G containing node n.
+
+ See Also
+ --------
+ connected_components
+
+ Notes
+ -----
+ For undirected graphs only.
+ """
+ if G.is_directed():
+ raise nx.NetworkXError("""Not allowed for directed graph G.
+ Use UG=G.to_undirected() to create an undirected graph.""")
+ return list(nx.single_source_shortest_path_length(G,n).keys())
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/strongly_connected.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/strongly_connected.py
new file mode 100644
index 0000000..fbdec13
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/strongly_connected.py
@@ -0,0 +1,359 @@
+# -*- coding: utf-8 -*-
+"""
+Strongly connected components.
+"""
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+__authors__ = "\n".join(['Eben Kenah',
+ 'Aric Hagberg (hagberg@lanl.gov)'
+ 'Christopher Ellison',
+ 'Ben Edwards (bedwards@cs.unm.edu)'])
+
+__all__ = ['number_strongly_connected_components',
+ 'strongly_connected_components',
+ 'strongly_connected_component_subgraphs',
+ 'is_strongly_connected',
+ 'strongly_connected_components_recursive',
+ 'kosaraju_strongly_connected_components',
+ 'condensation']
+
+def strongly_connected_components(G):
+ """Return nodes in strongly connected components of graph.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+ An directed graph.
+
+ Returns
+ -------
+ comp : list of lists
+ A list of nodes for each component of G.
+ The list is ordered from largest connected component to smallest.
+
+ Raises
+ ------
+ NetworkXError: If G is undirected.
+
+ See Also
+ --------
+ connected_components, weakly_connected_components
+
+ Notes
+ -----
+ Uses Tarjan's algorithm with Nuutila's modifications.
+ Nonrecursive version of algorithm.
+
+ References
+ ----------
+ .. [1] Depth-first search and linear graph algorithms, R. Tarjan
+ SIAM Journal of Computing 1(2):146-160, (1972).
+
+ .. [2] On finding the strongly connected components in a directed graph.
+ E. Nuutila and E. Soisalon-Soinen
+ Information Processing Letters 49(1): 9-14, (1994)..
+ """
+ if not G.is_directed():
+ raise nx.NetworkXError("""Not allowed for undirected graph G.
+ Use connected_components() """)
+ preorder={}
+ lowlink={}
+ scc_found={}
+ scc_queue = []
+ scc_list=[]
+ i=0 # Preorder counter
+ for source in G:
+ if source not in scc_found:
+ queue=[source]
+ while queue:
+ v=queue[-1]
+ if v not in preorder:
+ i=i+1
+ preorder[v]=i
+ done=1
+ v_nbrs=G[v]
+ for w in v_nbrs:
+ if w not in preorder:
+ queue.append(w)
+ done=0
+ break
+ if done==1:
+ lowlink[v]=preorder[v]
+ for w in v_nbrs:
+ if w not in scc_found:
+ if preorder[w]>preorder[v]:
+ lowlink[v]=min([lowlink[v],lowlink[w]])
+ else:
+ lowlink[v]=min([lowlink[v],preorder[w]])
+ queue.pop()
+ if lowlink[v]==preorder[v]:
+ scc_found[v]=True
+ scc=[v]
+ while scc_queue and preorder[scc_queue[-1]]>preorder[v]:
+ k=scc_queue.pop()
+ scc_found[k]=True
+ scc.append(k)
+ scc_list.append(scc)
+ else:
+ scc_queue.append(v)
+ scc_list.sort(key=len,reverse=True)
+ return scc_list
+
+
+def kosaraju_strongly_connected_components(G,source=None):
+ """Return nodes in strongly connected components of graph.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+ An directed graph.
+
+ Returns
+ -------
+ comp : list of lists
+ A list of nodes for each component of G.
+ The list is ordered from largest connected component to smallest.
+
+ Raises
+ ------
+ NetworkXError: If G is undirected
+
+ See Also
+ --------
+ connected_components
+
+ Notes
+ -----
+ Uses Kosaraju's algorithm.
+ """
+ if not G.is_directed():
+ raise nx.NetworkXError("""Not allowed for undirected graph G.
+ Use connected_components() """)
+ components=[]
+ G=G.reverse(copy=False)
+ post=list(nx.dfs_postorder_nodes(G,source=source))
+ G=G.reverse(copy=False)
+ seen={}
+ while post:
+ r=post.pop()
+ if r in seen:
+ continue
+ c=nx.dfs_preorder_nodes(G,r)
+ new=[v for v in c if v not in seen]
+ seen.update([(u,True) for u in new])
+ components.append(new)
+ components.sort(key=len,reverse=True)
+ return components
+
+
+def strongly_connected_components_recursive(G):
+ """Return nodes in strongly connected components of graph.
+
+ Recursive version of algorithm.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+ An directed graph.
+
+ Returns
+ -------
+ comp : list of lists
+ A list of nodes for each component of G.
+ The list is ordered from largest connected component to smallest.
+
+ Raises
+ ------
+ NetworkXError : If G is undirected
+
+ See Also
+ --------
+ connected_components
+
+ Notes
+ -----
+ Uses Tarjan's algorithm with Nuutila's modifications.
+
+ References
+ ----------
+ .. [1] Depth-first search and linear graph algorithms, R. Tarjan
+ SIAM Journal of Computing 1(2):146-160, (1972).
+
+ .. [2] On finding the strongly connected components in a directed graph.
+ E. Nuutila and E. Soisalon-Soinen
+ Information Processing Letters 49(1): 9-14, (1994)..
+ """
+ def visit(v,cnt):
+ root[v]=cnt
+ visited[v]=cnt
+ cnt+=1
+ stack.append(v)
+ for w in G[v]:
+ if w not in visited: visit(w,cnt)
+ if w not in component:
+ root[v]=min(root[v],root[w])
+ if root[v]==visited[v]:
+ component[v]=root[v]
+ tmpc=[v] # hold nodes in this component
+ while stack[-1]!=v:
+ w=stack.pop()
+ component[w]=root[v]
+ tmpc.append(w)
+ stack.remove(v)
+ scc.append(tmpc) # add to scc list
+
+ if not G.is_directed():
+ raise nx.NetworkXError("""Not allowed for undirected graph G.
+ Use connected_components() """)
+
+ scc=[]
+ visited={}
+ component={}
+ root={}
+ cnt=0
+ stack=[]
+ for source in G:
+ if source not in visited:
+ visit(source,cnt)
+
+ scc.sort(key=len,reverse=True)
+ return scc
+
+
+def strongly_connected_component_subgraphs(G):
+ """Return strongly connected components as subgraphs.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+ A graph.
+
+ Returns
+ -------
+ glist : list
+ A list of graphs, one for each strongly connected component of G.
+
+ See Also
+ --------
+ connected_component_subgraphs
+
+ Notes
+ -----
+ The list is ordered from largest strongly connected component to smallest.
+
+ Graph, node, and edge attributes are copied to the subgraphs.
+ """
+ cc=strongly_connected_components(G)
+ graph_list=[]
+ for c in cc:
+ graph_list.append(G.subgraph(c).copy())
+ return graph_list
+
+
+def number_strongly_connected_components(G):
+ """Return number of strongly connected components in graph.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ A directed graph.
+
+ Returns
+ -------
+ n : integer
+ Number of strongly connected components
+
+ See Also
+ --------
+ connected_components
+
+ Notes
+ -----
+ For directed graphs only.
+ """
+ return len(strongly_connected_components(G))
+
+
+def is_strongly_connected(G):
+ """Test directed graph for strong connectivity.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+ A directed graph.
+
+ Returns
+ -------
+ connected : bool
+ True if the graph is strongly connected, False otherwise.
+
+ See Also
+ --------
+ strongly_connected_components
+
+ Notes
+ -----
+ For directed graphs only.
+ """
+ if not G.is_directed():
+ raise nx.NetworkXError("""Not allowed for undirected graph G.
+ See is_connected() for connectivity test.""")
+
+ if len(G)==0:
+ raise nx.NetworkXPointlessConcept(
+ """Connectivity is undefined for the null graph.""")
+
+ return len(strongly_connected_components(G)[0])==len(G)
+
+def condensation(G, scc=None):
+ """Returns the condensation of G.
+
+ The condensation of G is the graph with each of the strongly connected
+ components contracted into a single node.
+
+ Parameters
+ ----------
+ G : NetworkX DiGraph
+ A directed graph.
+
+ scc: list (optional, default=None)
+ A list of strongly connected components. If provided, the elements in
+ `scc` must partition the nodes in `G`. If not provided, it will be
+ calculated as scc=nx.strongly_connected_components(G).
+
+ Returns
+ -------
+ C : NetworkX DiGraph
+ The condensation of G. The node labels are integers corresponding
+ to the index of the component in the list of strongly connected
+ components.
+
+ Raises
+ ------
+ NetworkXError: If G is not directed
+
+ Notes
+ -----
+ After contracting all strongly connected components to a single node,
+ the resulting graph is a directed acyclic graph.
+ """
+ if not G.is_directed():
+ raise nx.NetworkXError("""Not allowed for undirected graph G.
+ See is_connected() for connectivity test.""")
+ if scc is None:
+ scc = nx.strongly_connected_components(G)
+ mapping = {}
+ C = nx.DiGraph()
+ for i,component in enumerate(scc):
+ for n in component:
+ mapping[n] = i
+ C.add_nodes_from(range(len(scc)))
+ for u,v in G.edges():
+ if mapping[u] != mapping[v]:
+ C.add_edge(mapping[u],mapping[v])
+ return C
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/tests/test_attracting.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/tests/test_attracting.py
new file mode 100644
index 0000000..c2108dd
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/tests/test_attracting.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+
+class TestAttractingComponents(object):
+ def setUp(self):
+ self.G1 = nx.DiGraph()
+ self.G1.add_edges_from([(5,11),(11,2),(11,9),(11,10),
+ (7,11),(7,8),(8,9),(3,8),(3,10)])
+ self.G2 = nx.DiGraph()
+ self.G2.add_edges_from([(0,1),(0,2),(1,1),(1,2),(2,1)])
+
+ self.G3 = nx.DiGraph()
+ self.G3.add_edges_from([(0,1),(1,2),(2,1),(0,3),(3,4),(4,3)])
+
+ def test_attracting_components(self):
+ ac = nx.attracting_components(self.G1)
+ assert_true([2] in ac)
+ assert_true([9] in ac)
+ assert_true([10] in ac)
+
+ ac = nx.attracting_components(self.G2)
+ ac = [tuple(sorted(x)) for x in ac]
+ assert_true(ac == [(1,2)])
+
+ ac = nx.attracting_components(self.G3)
+ ac = [tuple(sorted(x)) for x in ac]
+ assert_true((1,2) in ac)
+ assert_true((3,4) in ac)
+ assert_equal(len(ac), 2)
+
+ def test_number_attacting_components(self):
+ assert_equal(len(nx.attracting_components(self.G1)), 3)
+ assert_equal(len(nx.attracting_components(self.G2)), 1)
+ assert_equal(len(nx.attracting_components(self.G3)), 2)
+
+ def test_is_attracting_component(self):
+ assert_false(nx.is_attracting_component(self.G1))
+ assert_false(nx.is_attracting_component(self.G2))
+ assert_false(nx.is_attracting_component(self.G3))
+ g2 = self.G3.subgraph([1,2])
+ assert_true(nx.is_attracting_component(g2))
+
+ def test_attracting_component_subgraphs(self):
+ subgraphs = nx.attracting_component_subgraphs(self.G1)
+ for subgraph in subgraphs:
+ assert_equal(len(subgraph), 1)
+
+ self.G2.add_edge(1,2,eattr='red') # test attrs copied to subgraphs
+ self.G2.node[2]['nattr']='blue'
+ self.G2.graph['gattr']='green'
+ subgraphs = nx.attracting_component_subgraphs(self.G2)
+ assert_equal(len(subgraphs), 1)
+ SG2=subgraphs[0]
+ assert_true(1 in SG2)
+ assert_true(2 in SG2)
+ assert_equal(SG2[1][2]['eattr'],'red')
+ assert_equal(SG2.node[2]['nattr'],'blue')
+ assert_equal(SG2.graph['gattr'],'green')
+ SG2.add_edge(1,2,eattr='blue')
+ assert_equal(SG2[1][2]['eattr'],'blue')
+ assert_equal(self.G2[1][2]['eattr'],'red')
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/tests/test_biconnected.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/tests/test_biconnected.py
new file mode 100644
index 0000000..85f967a
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/tests/test_biconnected.py
@@ -0,0 +1,191 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+from networkx.algorithms.components import biconnected
+
+def assert_components_equal(x,y):
+ sx = set((frozenset([frozenset(e) for e in c]) for c in x))
+ sy = set((frozenset([frozenset(e) for e in c]) for c in y))
+ assert_equal(sx,sy)
+
+def test_barbell():
+ G=nx.barbell_graph(8,4)
+ G.add_path([7,20,21,22])
+ G.add_cycle([22,23,24,25])
+ pts=set(biconnected.articulation_points(G))
+ assert_equal(pts,set([7,8,9,10,11,12,20,21,22]))
+
+ answer = [set([12, 13, 14, 15, 16, 17, 18, 19]),
+ set([0, 1, 2, 3, 4, 5, 6, 7]),
+ set([22, 23, 24, 25]),
+ set([11, 12]),
+ set([10, 11]),
+ set([9, 10]),
+ set([8, 9]),
+ set([7, 8]),
+ set([21, 22]),
+ set([20, 21]),
+ set([7, 20])]
+ bcc=list(biconnected.biconnected_components(G))
+ bcc.sort(key=len, reverse=True)
+ assert_equal(bcc,answer)
+
+ G.add_edge(2,17)
+ pts=set(biconnected.articulation_points(G))
+ assert_equal(pts,set([7,20,21,22]))
+
+def test_articulation_points_cycle():
+ G=nx.cycle_graph(3)
+ G.add_cycle([1,3,4])
+ pts=set(biconnected.articulation_points(G))
+ assert_equal(pts,set([1]))
+
+def test_is_biconnected():
+ G=nx.cycle_graph(3)
+ assert_true(biconnected.is_biconnected(G))
+ G.add_cycle([1,3,4])
+ assert_false(biconnected.is_biconnected(G))
+
+def test_empty_is_biconnected():
+ G=nx.empty_graph(5)
+ assert_false(biconnected.is_biconnected(G))
+ G.add_edge(0,1)
+ assert_false(biconnected.is_biconnected(G))
+
+def test_biconnected_components_cycle():
+ G=nx.cycle_graph(3)
+ G.add_cycle([1,3,4])
+ pts = set(map(frozenset,biconnected.biconnected_components(G)))
+ assert_equal(pts,set([frozenset([0,1,2]),frozenset([1,3,4])]))
+
+def test_biconnected_component_subgraphs_cycle():
+ G=nx.cycle_graph(3)
+ G.add_cycle([1,3,4,5])
+ G.add_edge(1,3,eattr='red') # test copying of edge data
+ G.node[1]['nattr']='blue'
+ G.graph['gattr']='green'
+ Gc = set(biconnected.biconnected_component_subgraphs(G))
+ assert_equal(len(Gc),2)
+ g1,g2=Gc
+ if 0 in g1:
+ assert_true(nx.is_isomorphic(g1,nx.Graph([(0,1),(0,2),(1,2)])))
+ assert_true(nx.is_isomorphic(g2,nx.Graph([(1,3),(1,5),(3,4),(4,5)])))
+ assert_equal(g2[1][3]['eattr'],'red')
+ assert_equal(g2.node[1]['nattr'],'blue')
+ assert_equal(g2.graph['gattr'],'green')
+ g2[1][3]['eattr']='blue'
+ assert_equal(g2[1][3]['eattr'],'blue')
+ assert_equal(G[1][3]['eattr'],'red')
+ else:
+ assert_true(nx.is_isomorphic(g1,nx.Graph([(1,3),(1,5),(3,4),(4,5)])))
+ assert_true(nx.is_isomorphic(g2,nx.Graph([(0,1),(0,2),(1,2)])))
+ assert_equal(g1[1][3]['eattr'],'red')
+ assert_equal(g1.node[1]['nattr'],'blue')
+ assert_equal(g1.graph['gattr'],'green')
+ g1[1][3]['eattr']='blue'
+ assert_equal(g1[1][3]['eattr'],'blue')
+ assert_equal(G[1][3]['eattr'],'red')
+
+
+def test_biconnected_components1():
+ # graph example from
+ # http://www.ibluemojo.com/school/articul_algorithm.html
+ edges=[(0,1),
+ (0,5),
+ (0,6),
+ (0,14),
+ (1,5),
+ (1,6),
+ (1,14),
+ (2,4),
+ (2,10),
+ (3,4),
+ (3,15),
+ (4,6),
+ (4,7),
+ (4,10),
+ (5,14),
+ (6,14),
+ (7,9),
+ (8,9),
+ (8,12),
+ (8,13),
+ (10,15),
+ (11,12),
+ (11,13),
+ (12,13)]
+ G=nx.Graph(edges)
+ pts = set(biconnected.articulation_points(G))
+ assert_equal(pts,set([4,6,7,8,9]))
+ comps = list(biconnected.biconnected_component_edges(G))
+ answer = [
+ [(3,4),(15,3),(10,15),(10,4),(2,10),(4,2)],
+ [(13,12),(13,8),(11,13),(12,11),(8,12)],
+ [(9,8)],
+ [(7,9)],
+ [(4,7)],
+ [(6,4)],
+ [(14,0),(5,1),(5,0),(14,5),(14,1),(6,14),(6,0),(1,6),(0,1)],
+ ]
+ assert_components_equal(comps,answer)
+
+def test_biconnected_components2():
+ G=nx.Graph()
+ G.add_cycle('ABC')
+ G.add_cycle('CDE')
+ G.add_cycle('FIJHG')
+ G.add_cycle('GIJ')
+ G.add_edge('E','G')
+ comps = list(biconnected.biconnected_component_edges(G))
+ answer = [
+ [tuple('GF'),tuple('FI'),tuple('IG'),tuple('IJ'),tuple('JG'),tuple('JH'),tuple('HG')],
+ [tuple('EG')],
+ [tuple('CD'),tuple('DE'),tuple('CE')],
+ [tuple('AB'),tuple('BC'),tuple('AC')]
+ ]
+ assert_components_equal(comps,answer)
+
+def test_biconnected_davis():
+ D = nx.davis_southern_women_graph()
+ bcc = list(biconnected.biconnected_components(D))[0]
+ assert_true(set(D) == bcc) # All nodes in a giant bicomponent
+ # So no articulation points
+ assert_equal(list(biconnected.articulation_points(D)),[])
+
+def test_biconnected_karate():
+ K = nx.karate_club_graph()
+ answer = [set([0, 1, 2, 3, 7, 8, 9, 12, 13, 14, 15, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33]),
+ set([0, 4, 5, 6, 10, 16]),
+ set([0, 11])]
+ bcc = list(biconnected.biconnected_components(K))
+ bcc.sort(key=len, reverse=True)
+ assert_true(list(biconnected.biconnected_components(K)) == answer)
+ assert_equal(list(biconnected.articulation_points(K)),[0])
+
+def test_biconnected_eppstein():
+ # tests from http://www.ics.uci.edu/~eppstein/PADS/Biconnectivity.py
+ G1 = nx.Graph({
+ 0: [1,2,5],
+ 1: [0,5],
+ 2: [0,3,4],
+ 3: [2,4,5,6],
+ 4: [2,3,5,6],
+ 5: [0,1,3,4],
+ 6: [3,4]})
+ G2 = nx.Graph({
+ 0: [2,5],
+ 1: [3,8],
+ 2: [0,3,5],
+ 3: [1,2,6,8],
+ 4: [7],
+ 5: [0,2],
+ 6: [3,8],
+ 7: [4],
+ 8: [1,3,6]})
+ assert_true(biconnected.is_biconnected(G1))
+ assert_false(biconnected.is_biconnected(G2))
+ answer_G2 = [set([1, 3, 6, 8]), set([0, 2, 5]), set([2, 3]), set([4, 7])]
+ bcc = list(biconnected.biconnected_components(G2))
+ bcc.sort(key=len, reverse=True)
+ assert_equal(bcc, answer_G2)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/tests/test_connected.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/tests/test_connected.py
new file mode 100644
index 0000000..ae0247b
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/tests/test_connected.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+from networkx import convert_node_labels_to_integers as cnlti
+from networkx import NetworkXError
+
+class TestConnected:
+
+ def setUp(self):
+ G1=cnlti(nx.grid_2d_graph(2,2),first_label=0,ordering="sorted")
+ G2=cnlti(nx.lollipop_graph(3,3),first_label=4,ordering="sorted")
+ G3=cnlti(nx.house_graph(),first_label=10,ordering="sorted")
+ self.G=nx.union(G1,G2)
+ self.G=nx.union(self.G,G3)
+ self.DG=nx.DiGraph([(1,2),(1,3),(2,3)])
+ self.grid=cnlti(nx.grid_2d_graph(4,4),first_label=1)
+
+ def test_connected_components(self):
+ cc=nx.connected_components
+ G=self.G
+ C=[[0, 1, 2, 3], [4, 5, 6, 7, 8, 9], [10, 11, 12, 13, 14]]
+ assert_equal(sorted([sorted(g) for g in cc(G)]),sorted(C))
+
+ def test_number_connected_components(self):
+ ncc=nx.number_connected_components
+ assert_equal(ncc(self.G),3)
+
+ def test_number_connected_components2(self):
+ ncc=nx.number_connected_components
+ assert_equal(ncc(self.grid),1)
+
+ def test_connected_components2(self):
+ cc=nx.connected_components
+ G=self.grid
+ C=[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]]
+ assert_equal(sorted([sorted(g) for g in cc(G)]),sorted(C))
+
+ def test_node_connected_components(self):
+ ncc=nx.node_connected_component
+ G=self.grid
+ C=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
+ assert_equal(sorted(ncc(G,1)),sorted(C))
+
+ def test_connected_component_subgraphs(self):
+ G=self.grid
+ G.add_edge(1,2,eattr='red') # test attributes copied to subgraphs
+ G.node[1]['nattr']='blue'
+ G.graph['gattr']='green'
+ ccs=nx.connected_component_subgraphs(G)
+ assert_equal(len(ccs),1)
+ sg=ccs[0]
+ assert_equal(sorted(sg.nodes()),list(range(1,17)))
+ assert_equal(sg[1][2]['eattr'],'red')
+ assert_equal(sg.node[1]['nattr'],'blue')
+ assert_equal(sg.graph['gattr'],'green')
+ sg[1][2]['eattr']='blue'
+ assert_equal(G[1][2]['eattr'],'red')
+ assert_equal(sg[1][2]['eattr'],'blue')
+
+
+ def test_is_connected(self):
+ assert_true(nx.is_connected(self.grid))
+ G=nx.Graph()
+ G.add_nodes_from([1,2])
+ assert_false(nx.is_connected(G))
+
+ def test_connected_raise(self):
+ assert_raises(NetworkXError,nx.connected_components,self.DG)
+ assert_raises(NetworkXError,nx.number_connected_components,self.DG)
+ assert_raises(NetworkXError,nx.connected_component_subgraphs,self.DG)
+ assert_raises(NetworkXError,nx.node_connected_component,self.DG,1)
+ assert_raises(NetworkXError,nx.is_connected,self.DG)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/tests/test_strongly_connected.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/tests/test_strongly_connected.py
new file mode 100644
index 0000000..d2569a9
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/tests/test_strongly_connected.py
@@ -0,0 +1,138 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+from networkx import NetworkXError
+
+class TestStronglyConnected:
+
+ def setUp(self):
+ self.gc=[]
+ G=nx.DiGraph()
+ G.add_edges_from([(1,2),(2,3),(2,8),(3,4),(3,7),
+ (4,5),(5,3),(5,6),(7,4),(7,6),(8,1),(8,7)])
+ C=[[3, 4, 5, 7], [1, 2, 8], [6]]
+ self.gc.append((G,C))
+
+ G= nx.DiGraph()
+ G.add_edges_from([(1,2),(1,3),(1,4),(4,2),(3,4),(2,3)])
+ C = [[2, 3, 4],[1]]
+ self.gc.append((G,C))
+
+ G = nx.DiGraph()
+ G.add_edges_from([(1,2),(2,3),(3,2),(2,1)])
+ C = [[1, 2, 3]]
+ self.gc.append((G,C))
+
+ # Eppstein's tests
+ G = nx.DiGraph({ 0:[1],1:[2,3],2:[4,5],3:[4,5],4:[6],5:[],6:[]})
+ C = [[0],[1],[2],[3],[4],[5],[6]]
+ self.gc.append((G,C))
+
+ G = nx.DiGraph({0:[1],1:[2,3,4],2:[0,3],3:[4],4:[3]})
+ C = [[0,1,2],[3,4]]
+ self.gc.append((G,C))
+
+
+ def test_tarjan(self):
+ scc=nx.strongly_connected_components
+ for G,C in self.gc:
+ assert_equal(sorted([sorted(g) for g in scc(G)]),sorted(C))
+
+
+ def test_tarjan_recursive(self):
+ scc=nx.strongly_connected_components_recursive
+ for G,C in self.gc:
+ assert_equal(sorted([sorted(g) for g in scc(G)]),sorted(C))
+
+
+ def test_kosaraju(self):
+ scc=nx.kosaraju_strongly_connected_components
+ for G,C in self.gc:
+ assert_equal(sorted([sorted(g) for g in scc(G)]),sorted(C))
+
+ def test_number_strongly_connected_components(self):
+ ncc=nx.number_strongly_connected_components
+ for G,C in self.gc:
+ assert_equal(ncc(G),len(C))
+
+ def test_is_strongly_connected(self):
+ for G,C in self.gc:
+ if len(C)==1:
+ assert_true(nx.is_strongly_connected(G))
+ else:
+ assert_false(nx.is_strongly_connected(G))
+
+
+ def test_strongly_connected_component_subgraphs(self):
+ scc=nx.strongly_connected_component_subgraphs
+ for G,C in self.gc:
+ assert_equal(sorted([sorted(g.nodes()) for g in scc(G)]),sorted(C))
+ G,C=self.gc[0]
+ G.add_edge(1,2,eattr='red')
+ G.node[1]['nattr']='blue'
+ G.graph['gattr']='green'
+ sgs=scc(G)[1]
+ assert_equal(sgs[1][2]['eattr'],'red')
+ assert_equal(sgs.node[1]['nattr'],'blue')
+ assert_equal(sgs.graph['gattr'],'green')
+ sgs[1][2]['eattr']='blue'
+ assert_equal(G[1][2]['eattr'],'red')
+ assert_equal(sgs[1][2]['eattr'],'blue')
+
+ def test_contract_scc1(self):
+ G = nx.DiGraph()
+ G.add_edges_from([(1,2),(2,3),(2,11),(2,12),(3,4),(4,3),(4,5),
+ (5,6),(6,5),(6,7),(7,8),(7,9),(7,10),(8,9),
+ (9,7),(10,6),(11,2),(11,4),(11,6),(12,6),(12,11)])
+ scc = nx.strongly_connected_components(G)
+ cG = nx.condensation(G, scc)
+ # DAG
+ assert_true(nx.is_directed_acyclic_graph(cG))
+ # # nodes
+ assert_equal(sorted(cG.nodes()),[0,1,2,3])
+ # # edges
+ mapping={}
+ for i,component in enumerate(scc):
+ for n in component:
+ mapping[n] = i
+ edge=(mapping[2],mapping[3])
+ assert_true(cG.has_edge(*edge))
+ edge=(mapping[2],mapping[5])
+ assert_true(cG.has_edge(*edge))
+ edge=(mapping[3],mapping[5])
+ assert_true(cG.has_edge(*edge))
+
+ def test_contract_scc_isolate(self):
+ # Bug found and fixed in [1687].
+ G = nx.DiGraph()
+ G.add_edge(1,2)
+ G.add_edge(2,1)
+ scc = nx.strongly_connected_components(G)
+ cG = nx.condensation(G, scc)
+ assert_equal(cG.nodes(),[0])
+ assert_equal(cG.edges(),[])
+
+ def test_contract_scc_edge(self):
+ G = nx.DiGraph()
+ G.add_edge(1,2)
+ G.add_edge(2,1)
+ G.add_edge(2,3)
+ G.add_edge(3,4)
+ G.add_edge(4,3)
+ scc = nx.strongly_connected_components(G)
+ cG = nx.condensation(G, scc)
+ assert_equal(cG.nodes(),[0,1])
+ if 1 in scc[0]:
+ edge = (0,1)
+ else:
+ edge = (1,0)
+ assert_equal(cG.edges(),[edge])
+
+ def test_connected_raise(self):
+ G=nx.Graph()
+ assert_raises(NetworkXError,nx.strongly_connected_components,G)
+ assert_raises(NetworkXError,nx.kosaraju_strongly_connected_components,G)
+ assert_raises(NetworkXError,nx.strongly_connected_components_recursive,G)
+ assert_raises(NetworkXError,nx.strongly_connected_component_subgraphs,G)
+ assert_raises(NetworkXError,nx.is_strongly_connected,G)
+ assert_raises(NetworkXError,nx.condensation,G)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/tests/test_weakly_connected.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/tests/test_weakly_connected.py
new file mode 100644
index 0000000..d05e8ed
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/tests/test_weakly_connected.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+from networkx import NetworkXError
+
+class TestWeaklyConnected:
+
+ def setUp(self):
+ self.gc=[]
+ G=nx.DiGraph()
+ G.add_edges_from([(1,2),(2,3),(2,8),(3,4),(3,7),
+ (4,5),(5,3),(5,6),(7,4),(7,6),(8,1),(8,7)])
+ C=[[3, 4, 5, 7], [1, 2, 8], [6]]
+ self.gc.append((G,C))
+
+ G= nx.DiGraph()
+ G.add_edges_from([(1,2),(1,3),(1,4),(4,2),(3,4),(2,3)])
+ C = [[2, 3, 4],[1]]
+ self.gc.append((G,C))
+
+ G = nx.DiGraph()
+ G.add_edges_from([(1,2),(2,3),(3,2),(2,1)])
+ C = [[1, 2, 3]]
+ self.gc.append((G,C))
+
+ # Eppstein's tests
+ G = nx.DiGraph({ 0:[1],1:[2,3],2:[4,5],3:[4,5],4:[6],5:[],6:[]})
+ C = [[0],[1],[2],[3],[4],[5],[6]]
+ self.gc.append((G,C))
+
+ G = nx.DiGraph({0:[1],1:[2,3,4],2:[0,3],3:[4],4:[3]})
+ C = [[0,1,2],[3,4]]
+ self.gc.append((G,C))
+
+
+ def test_weakly_connected_components(self):
+ wcc=nx.weakly_connected_components
+ cc=nx.connected_components
+ for G,C in self.gc:
+ U=G.to_undirected()
+ w=sorted([sorted(g) for g in wcc(G)])
+ c=sorted([sorted(g) for g in cc(U)])
+ assert_equal(w,c)
+
+ def test_number_weakly_connected_components(self):
+ wcc=nx.number_weakly_connected_components
+ cc=nx.number_connected_components
+ for G,C in self.gc:
+ U=G.to_undirected()
+ w=wcc(G)
+ c=cc(U)
+ assert_equal(w,c)
+
+ def test_weakly_connected_component_subgraphs(self):
+ wcc=nx.weakly_connected_component_subgraphs
+ cc=nx.connected_component_subgraphs
+ for G,C in self.gc:
+ U=G.to_undirected()
+ w=sorted([sorted(g.nodes()) for g in wcc(G)])
+ c=sorted([sorted(g.nodes()) for g in cc(U)])
+ assert_equal(w,c)
+ G,C=self.gc[0]
+ G.add_edge(1,2,eattr='red')
+ G.node[1]['nattr']='blue'
+ G.graph['gattr']='green'
+ sgs=wcc(G)[0]
+ assert_equal(sgs[1][2]['eattr'],'red')
+ assert_equal(sgs.node[1]['nattr'],'blue')
+ assert_equal(sgs.graph['gattr'],'green')
+ sgs[1][2]['eattr']='blue'
+ assert_equal(G[1][2]['eattr'],'red')
+ assert_equal(sgs[1][2]['eattr'],'blue')
+
+ def test_is_weakly_connected(self):
+ wcc=nx.is_weakly_connected
+ cc=nx.is_connected
+ for G,C in self.gc:
+ U=G.to_undirected()
+ assert_equal(wcc(G),cc(U))
+
+
+ def test_connected_raise(self):
+ G=nx.Graph()
+ assert_raises(NetworkXError,nx.weakly_connected_components,G)
+ assert_raises(NetworkXError,nx.number_weakly_connected_components,G)
+ assert_raises(NetworkXError,nx.weakly_connected_component_subgraphs,G)
+ assert_raises(NetworkXError,nx.is_weakly_connected,G)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/weakly_connected.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/weakly_connected.py
new file mode 100644
index 0000000..410a6c2
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/components/weakly_connected.py
@@ -0,0 +1,126 @@
+# -*- coding: utf-8 -*-
+"""
+Weakly connected components.
+"""
+__authors__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)'
+ 'Christopher Ellison'])
+# Copyright (C) 2004-2010 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+__all__ = ['number_weakly_connected_components',
+ 'weakly_connected_components',
+ 'weakly_connected_component_subgraphs',
+ 'is_weakly_connected'
+ ]
+
+import networkx as nx
+
+def weakly_connected_components(G):
+ """Return weakly connected components of G.
+ """
+ if not G.is_directed():
+ raise nx.NetworkXError("""Not allowed for undirected graph G.
+ Use connected_components() """)
+ seen={}
+ components=[]
+ for v in G:
+ if v not in seen:
+ c=_single_source_shortest_unipath_length(G,v)
+ components.append(list(c.keys()))
+ seen.update(c)
+ components.sort(key=len,reverse=True)
+ return components
+
+
+def number_weakly_connected_components(G):
+ """Return the number of connected components in G.
+ For directed graphs only.
+ """
+ return len(weakly_connected_components(G))
+
+def weakly_connected_component_subgraphs(G):
+ """Return weakly connected components as subgraphs.
+
+ Graph, node, and edge attributes are copied to the subgraphs.
+ """
+ wcc=weakly_connected_components(G)
+ graph_list=[]
+ for c in wcc:
+ graph_list.append(G.subgraph(c).copy())
+ return graph_list
+
+def is_weakly_connected(G):
+ """Test directed graph for weak connectivity.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+ A directed graph.
+
+ Returns
+ -------
+ connected : bool
+ True if the graph is weakly connected, False otherwise.
+
+ See Also
+ --------
+ strongly_connected_components
+
+ Notes
+ -----
+ For directed graphs only.
+ """
+ if not G.is_directed():
+ raise nx.NetworkXError("""Not allowed for undirected graph G.
+ See is_connected() for connectivity test.""")
+
+ if len(G)==0:
+ raise nx.NetworkXPointlessConcept(
+ """Connectivity is undefined for the null graph.""")
+
+ return len(weakly_connected_components(G)[0])==len(G)
+
+def _single_source_shortest_unipath_length(G,source,cutoff=None):
+ """Compute the shortest path lengths from source to all reachable nodes.
+
+ The direction of the edge between nodes is ignored.
+
+ For directed graphs only.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : node
+ Starting node for path
+
+ cutoff : integer, optional
+ Depth to stop the search. Only paths of length <= cutoff are returned.
+
+ Returns
+ -------
+ lengths : dictionary
+ Dictionary of shortest path lengths keyed by target.
+ """
+ # namespace speedups
+ Gsucc = G.succ
+ Gpred = G.pred
+
+ seen={} # level (number of hops) when seen in BFS
+ level=0 # the current level
+ nextlevel = set([source]) # set of nodes to check at next level
+ while nextlevel:
+ thislevel=nextlevel # advance to next level
+ nextlevel = set() # and start a new list (fringe)
+ for v in thislevel:
+ if v not in seen:
+ seen[v]=level # set the level of vertex v
+ nextlevel.update(Gsucc[v]) # add successors of v
+ nextlevel.update(Gpred[v]) # add predecessors of v
+ if (cutoff is not None and cutoff <= level): break
+ level=level+1
+ return seen # return all path lengths as dictionary
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/connectivity/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/connectivity/__init__.py
new file mode 100644
index 0000000..d14c33e
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/connectivity/__init__.py
@@ -0,0 +1,4 @@
+"""Flow based connectivity and cut algorithms
+"""
+from networkx.algorithms.connectivity.connectivity import *
+from networkx.algorithms.connectivity.cuts import *
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/connectivity/connectivity.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/connectivity/connectivity.py
new file mode 100644
index 0000000..bf6f272
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/connectivity/connectivity.py
@@ -0,0 +1,607 @@
+# -*- coding: utf-8 -*-
+"""
+Flow based connectivity algorithms
+"""
+import itertools
+import networkx as nx
+
+__author__ = '\n'.join(['Jordi Torrents <jtorrents@milnou.net>'])
+
+__all__ = [ 'average_node_connectivity',
+ 'local_node_connectivity',
+ 'node_connectivity',
+ 'local_edge_connectivity',
+ 'edge_connectivity',
+ 'all_pairs_node_connectivity_matrix',
+ 'dominating_set',
+ ]
+
+def average_node_connectivity(G):
+ r"""Returns the average connectivity of a graph G.
+
+ The average connectivity `\bar{\kappa}` of a graph G is the average
+ of local node connectivity over all pairs of nodes of G [1]_ .
+
+ .. math::
+
+ \bar{\kappa}(G) = \frac{\sum_{u,v} \kappa_{G}(u,v)}{{n \choose 2}}
+
+ Parameters
+ ----------
+
+ G : NetworkX graph
+ Undirected graph
+
+ Returns
+ -------
+ K : float
+ Average node connectivity
+
+ See also
+ --------
+ local_node_connectivity
+ node_connectivity
+ local_edge_connectivity
+ edge_connectivity
+ max_flow
+ ford_fulkerson
+
+ References
+ ----------
+ .. [1] Beineke, L., O. Oellermann, and R. Pippert (2002). The average
+ connectivity of a graph. Discrete mathematics 252(1-3), 31-45.
+ http://www.sciencedirect.com/science/article/pii/S0012365X01001807
+
+ """
+ if G.is_directed():
+ iter_func = itertools.permutations
+ else:
+ iter_func = itertools.combinations
+
+ H, mapping = _aux_digraph_node_connectivity(G)
+ num = 0.
+ den = 0.
+ for u,v in iter_func(G, 2):
+ den += 1
+ num += local_node_connectivity(G, u, v, aux_digraph=H, mapping=mapping)
+
+ if den == 0: # Null Graph
+ return 0
+ return num/den
+
+def _aux_digraph_node_connectivity(G):
+ r""" Creates a directed graph D from an undirected graph G to compute flow
+ based node connectivity.
+
+ For an undirected graph G having `n` nodes and `m` edges we derive a
+ directed graph D with 2n nodes and 2m+n arcs by replacing each
+ original node `v` with two nodes `vA`,`vB` linked by an (internal)
+ arc in D. Then for each edge (u,v) in G we add two arcs (uB,vA)
+ and (vB,uA) in D. Finally we set the attribute capacity = 1 for each
+ arc in D [1].
+
+ For a directed graph having `n` nodes and `m` arcs we derive a
+ directed graph D with 2n nodes and m+n arcs by replacing each
+ original node `v` with two nodes `vA`,`vB` linked by an (internal)
+ arc `(vA,vB)` in D. Then for each arc (u,v) in G we add one arc (uB,vA)
+ in D. Finally we set the attribute capacity = 1 for each arc in D.
+
+ References
+ ----------
+ .. [1] Kammer, Frank and Hanjo Taubig. Graph Connectivity. in Brandes and
+ Erlebach, 'Network Analysis: Methodological Foundations', Lecture
+ Notes in Computer Science, Volume 3418, Springer-Verlag, 2005.
+ http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf
+
+ """
+ directed = G.is_directed()
+
+ mapping = {}
+ D = nx.DiGraph()
+ for i,node in enumerate(G):
+ mapping[node] = i
+ D.add_node('%dA' % i,id=node)
+ D.add_node('%dB' % i,id=node)
+ D.add_edge('%dA' % i, '%dB' % i, capacity=1)
+
+ edges = []
+ for (source, target) in G.edges():
+ edges.append(('%sB' % mapping[source], '%sA' % mapping[target]))
+ if not directed:
+ edges.append(('%sB' % mapping[target], '%sA' % mapping[source]))
+
+ D.add_edges_from(edges, capacity=1)
+ return D, mapping
+
+def local_node_connectivity(G, s, t, aux_digraph=None, mapping=None):
+ r"""Computes local node connectivity for nodes s and t.
+
+ Local node connectivity for two non adjacent nodes s and t is the
+ minimum number of nodes that must be removed (along with their incident
+ edges) to disconnect them.
+
+ This is a flow based implementation of node connectivity. We compute the
+ maximum flow on an auxiliary digraph build from the original input
+ graph (see below for details). This is equal to the local node
+ connectivity because the value of a maximum s-t-flow is equal to the
+ capacity of a minimum s-t-cut (Ford and Fulkerson theorem) [1]_ .
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ s : node
+ Source node
+
+ t : node
+ Target node
+
+ aux_digraph : NetworkX DiGraph (default=None)
+ Auxiliary digraph to compute flow based node connectivity. If None
+ the auxiliary digraph is build.
+
+ mapping : dict (default=None)
+ Dictionary with a mapping of node names in G and in the auxiliary digraph.
+
+ Returns
+ -------
+ K : integer
+ local node connectivity for nodes s and t
+
+ Examples
+ --------
+ >>> # Platonic icosahedral graph has node connectivity 5
+ >>> # for each non adjacent node pair
+ >>> G = nx.icosahedral_graph()
+ >>> nx.local_node_connectivity(G,0,6)
+ 5
+
+ Notes
+ -----
+ This is a flow based implementation of node connectivity. We compute the
+ maximum flow using the Ford and Fulkerson algorithm on an auxiliary digraph
+ build from the original input graph:
+
+ For an undirected graph G having `n` nodes and `m` edges we derive a
+ directed graph D with 2n nodes and 2m+n arcs by replacing each
+ original node `v` with two nodes `v_A`, `v_B` linked by an (internal)
+ arc in `D`. Then for each edge (`u`, `v`) in G we add two arcs
+ (`u_B`, `v_A`) and (`v_B`, `u_A`) in `D`. Finally we set the attribute
+ capacity = 1 for each arc in `D` [1]_ .
+
+ For a directed graph G having `n` nodes and `m` arcs we derive a
+ directed graph `D` with `2n` nodes and `m+n` arcs by replacing each
+ original node `v` with two nodes `v_A`, `v_B` linked by an (internal)
+ arc `(v_A, v_B)` in D. Then for each arc `(u,v)` in G we add one arc
+ `(u_B,v_A)` in `D`. Finally we set the attribute capacity = 1 for
+ each arc in `D`.
+
+ This is equal to the local node connectivity because the value of
+ a maximum s-t-flow is equal to the capacity of a minimum s-t-cut (Ford
+ and Fulkerson theorem).
+
+ See also
+ --------
+ node_connectivity
+ all_pairs_node_connectivity_matrix
+ local_edge_connectivity
+ edge_connectivity
+ max_flow
+ ford_fulkerson
+
+ References
+ ----------
+ .. [1] Kammer, Frank and Hanjo Taubig. Graph Connectivity. in Brandes and
+ Erlebach, 'Network Analysis: Methodological Foundations', Lecture
+ Notes in Computer Science, Volume 3418, Springer-Verlag, 2005.
+ http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf
+
+ """
+ if aux_digraph is None or mapping is None:
+ H, mapping = _aux_digraph_node_connectivity(G)
+ else:
+ H = aux_digraph
+ return nx.max_flow(H,'%sB' % mapping[s], '%sA' % mapping[t])
+
+def node_connectivity(G, s=None, t=None):
+ r"""Returns node connectivity for a graph or digraph G.
+
+ Node connectivity is equal to the minimum number of nodes that
+ must be removed to disconnect G or render it trivial. If source
+ and target nodes are provided, this function returns the local node
+ connectivity: the minimum number of nodes that must be removed to break
+ all paths from source to target in G.
+
+ This is a flow based implementation. The algorithm is based in
+ solving a number of max-flow problems (ie local st-node connectivity,
+ see local_node_connectivity) to determine the capacity of the
+ minimum cut on an auxiliary directed network that corresponds to the
+ minimum node cut of G. It handles both directed and undirected graphs.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ s : node
+ Source node. Optional (default=None)
+
+ t : node
+ Target node. Optional (default=None)
+
+ Returns
+ -------
+ K : integer
+ Node connectivity of G, or local node connectivity if source
+ and target were provided
+
+ Examples
+ --------
+ >>> # Platonic icosahedral graph is 5-node-connected
+ >>> G = nx.icosahedral_graph()
+ >>> nx.node_connectivity(G)
+ 5
+ >>> nx.node_connectivity(G, 3, 7)
+ 5
+
+ Notes
+ -----
+ This is a flow based implementation of node connectivity. The
+ algorithm works by solving `O((n-\delta-1+\delta(\delta-1)/2)` max-flow
+ problems on an auxiliary digraph. Where `\delta` is the minimum degree
+ of G. For details about the auxiliary digraph and the computation of
+ local node connectivity see local_node_connectivity.
+
+ This implementation is based on algorithm 11 in [1]_. We use the Ford
+ and Fulkerson algorithm to compute max flow (see ford_fulkerson).
+
+ See also
+ --------
+ local_node_connectivity
+ all_pairs_node_connectivity_matrix
+ local_edge_connectivity
+ edge_connectivity
+ max_flow
+ ford_fulkerson
+
+ References
+ ----------
+ .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms.
+ http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf
+
+ """
+ # Local node connectivity
+ if s is not None and t is not None:
+ if s not in G:
+ raise nx.NetworkXError('node %s not in graph' % s)
+ if t not in G:
+ raise nx.NetworkXError('node %s not in graph' % t)
+ return local_node_connectivity(G, s, t)
+ # Global node connectivity
+ if G.is_directed():
+ if not nx.is_weakly_connected(G):
+ return 0
+ iter_func = itertools.permutations
+ # I think that it is necessary to consider both predecessors
+ # and successors for directed graphs
+ def neighbors(v):
+ return itertools.chain.from_iterable([G.predecessors_iter(v),
+ G.successors_iter(v)])
+ else:
+ if not nx.is_connected(G):
+ return 0
+ iter_func = itertools.combinations
+ neighbors = G.neighbors_iter
+ # Initial guess \kappa = n - 1
+ K = G.order()-1
+ deg = G.degree()
+ min_deg = min(deg.values())
+ v = next(n for n,d in deg.items() if d==min_deg)
+ # Reuse the auxiliary digraph
+ H, mapping = _aux_digraph_node_connectivity(G)
+ # compute local node connectivity with all non-neighbors nodes
+ for w in set(G) - set(neighbors(v)) - set([v]):
+ K = min(K, local_node_connectivity(G, v, w,
+ aux_digraph=H, mapping=mapping))
+ # Same for non adjacent pairs of neighbors of v
+ for x,y in iter_func(neighbors(v), 2):
+ if y in G[x]: continue
+ K = min(K, local_node_connectivity(G, x, y,
+ aux_digraph=H, mapping=mapping))
+ return K
+
+def all_pairs_node_connectivity_matrix(G):
+ """Return a numpy 2d ndarray with node connectivity between all pairs
+ of nodes.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ Returns
+ -------
+ K : 2d numpy ndarray
+ node connectivity between all pairs of nodes.
+
+ See also
+ --------
+ local_node_connectivity
+ node_connectivity
+ local_edge_connectivity
+ edge_connectivity
+ max_flow
+ ford_fulkerson
+
+ """
+ try:
+ import numpy
+ except ImportError:
+ raise ImportError(\
+ "all_pairs_node_connectivity_matrix() requires NumPy")
+
+ n = G.order()
+ M = numpy.zeros((n, n), dtype=int)
+ # Create auxiliary Digraph
+ D, mapping = _aux_digraph_node_connectivity(G)
+
+ if G.is_directed():
+ for u, v in itertools.permutations(G, 2):
+ K = local_node_connectivity(G, u, v, aux_digraph=D, mapping=mapping)
+ M[mapping[u],mapping[v]] = K
+ else:
+ for u, v in itertools.combinations(G, 2):
+ K = local_node_connectivity(G, u, v, aux_digraph=D, mapping=mapping)
+ M[mapping[u],mapping[v]] = M[mapping[v],mapping[u]] = K
+
+ return M
+
+def _aux_digraph_edge_connectivity(G):
+ """Auxiliary digraph for computing flow based edge connectivity
+
+ If the input graph is undirected, we replace each edge (u,v) with
+ two reciprocal arcs (u,v) and (v,u) and then we set the attribute
+ 'capacity' for each arc to 1. If the input graph is directed we simply
+ add the 'capacity' attribute. Part of algorithm 1 in [1]_ .
+
+ References
+ ----------
+ .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. (this is a
+ chapter, look for the reference of the book).
+ http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf
+ """
+ if G.is_directed():
+ if nx.get_edge_attributes(G, 'capacity'):
+ return G
+ D = G.copy()
+ capacity = dict((e,1) for e in D.edges())
+ nx.set_edge_attributes(D, 'capacity', capacity)
+ return D
+ else:
+ D = G.to_directed()
+ capacity = dict((e,1) for e in D.edges())
+ nx.set_edge_attributes(D, 'capacity', capacity)
+ return D
+
+def local_edge_connectivity(G, u, v, aux_digraph=None):
+ r"""Returns local edge connectivity for nodes s and t in G.
+
+ Local edge connectivity for two nodes s and t is the minimum number
+ of edges that must be removed to disconnect them.
+
+ This is a flow based implementation of edge connectivity. We compute the
+ maximum flow on an auxiliary digraph build from the original
+ network (see below for details). This is equal to the local edge
+ connectivity because the value of a maximum s-t-flow is equal to the
+ capacity of a minimum s-t-cut (Ford and Fulkerson theorem) [1]_ .
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected or directed graph
+
+ s : node
+ Source node
+
+ t : node
+ Target node
+
+ aux_digraph : NetworkX DiGraph (default=None)
+ Auxiliary digraph to compute flow based edge connectivity. If None
+ the auxiliary digraph is build.
+
+ Returns
+ -------
+ K : integer
+ local edge connectivity for nodes s and t
+
+ Examples
+ --------
+ >>> # Platonic icosahedral graph has edge connectivity 5
+ >>> # for each non adjacent node pair
+ >>> G = nx.icosahedral_graph()
+ >>> nx.local_edge_connectivity(G,0,6)
+ 5
+
+ Notes
+ -----
+ This is a flow based implementation of edge connectivity. We compute the
+ maximum flow using the Ford and Fulkerson algorithm on an auxiliary digraph
+ build from the original graph:
+
+ If the input graph is undirected, we replace each edge (u,v) with
+ two reciprocal arcs `(u,v)` and `(v,u)` and then we set the attribute
+ 'capacity' for each arc to 1. If the input graph is directed we simply
+ add the 'capacity' attribute. This is an implementation of algorithm 1
+ in [1]_.
+
+ The maximum flow in the auxiliary network is equal to the local edge
+ connectivity because the value of a maximum s-t-flow is equal to the
+ capacity of a minimum s-t-cut (Ford and Fulkerson theorem).
+
+ See also
+ --------
+ local_node_connectivity
+ node_connectivity
+ edge_connectivity
+ max_flow
+ ford_fulkerson
+
+ References
+ ----------
+ .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms.
+ http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf
+
+ """
+ if aux_digraph is None:
+ H = _aux_digraph_edge_connectivity(G)
+ else:
+ H = aux_digraph
+ return nx.max_flow(H, u, v)
+
+def edge_connectivity(G, s=None, t=None):
+ r"""Returns the edge connectivity of the graph or digraph G.
+
+ The edge connectivity is equal to the minimum number of edges that
+ must be removed to disconnect G or render it trivial. If source
+ and target nodes are provided, this function returns the local edge
+ connectivity: the minimum number of edges that must be removed to
+ break all paths from source to target in G.
+
+ This is a flow based implementation. The algorithm is based in solving
+ a number of max-flow problems (ie local st-edge connectivity, see
+ local_edge_connectivity) to determine the capacity of the minimum
+ cut on an auxiliary directed network that corresponds to the minimum
+ edge cut of G. It handles both directed and undirected graphs.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected or directed graph
+
+ s : node
+ Source node. Optional (default=None)
+
+ t : node
+ Target node. Optional (default=None)
+
+ Returns
+ -------
+ K : integer
+ Edge connectivity for G, or local edge connectivity if source
+ and target were provided
+
+ Examples
+ --------
+ >>> # Platonic icosahedral graph is 5-edge-connected
+ >>> G = nx.icosahedral_graph()
+ >>> nx.edge_connectivity(G)
+ 5
+
+ Notes
+ -----
+ This is a flow based implementation of global edge connectivity. For
+ undirected graphs the algorithm works by finding a 'small' dominating
+ set of nodes of G (see algorithm 7 in [1]_ ) and computing local max flow
+ (see local_edge_connectivity) between an arbitrary node in the dominating
+ set and the rest of nodes in it. This is an implementation of
+ algorithm 6 in [1]_ .
+
+ For directed graphs, the algorithm does n calls to the max flow function.
+ This is an implementation of algorithm 8 in [1]_ . We use the Ford and
+ Fulkerson algorithm to compute max flow (see ford_fulkerson).
+
+ See also
+ --------
+ local_node_connectivity
+ node_connectivity
+ local_edge_connectivity
+ max_flow
+ ford_fulkerson
+
+ References
+ ----------
+ .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms.
+ http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf
+
+ """
+ # Local edge connectivity
+ if s is not None and t is not None:
+ if s not in G:
+ raise nx.NetworkXError('node %s not in graph' % s)
+ if t not in G:
+ raise nx.NetworkXError('node %s not in graph' % t)
+ return local_edge_connectivity(G, s, t)
+ # Global edge connectivity
+ if G.is_directed():
+ # Algorithm 8 in [1]
+ if not nx.is_weakly_connected(G):
+ return 0
+ # initial value for lambda is min degree (\delta(G))
+ L = min(G.degree().values())
+ # reuse auxiliary digraph
+ H = _aux_digraph_edge_connectivity(G)
+ nodes = G.nodes()
+ n = len(nodes)
+ for i in range(n):
+ try:
+ L = min(L, local_edge_connectivity(G, nodes[i],
+ nodes[i+1], aux_digraph=H))
+ except IndexError: # last node!
+ L = min(L, local_edge_connectivity(G, nodes[i],
+ nodes[0], aux_digraph=H))
+ return L
+ else: # undirected
+ # Algorithm 6 in [1]
+ if not nx.is_connected(G):
+ return 0
+ # initial value for lambda is min degree (\delta(G))
+ L = min(G.degree().values())
+ # reuse auxiliary digraph
+ H = _aux_digraph_edge_connectivity(G)
+ # A dominating set is \lambda-covering
+ # We need a dominating set with at least two nodes
+ for node in G:
+ D = dominating_set(G, start_with=node)
+ v = D.pop()
+ if D: break
+ else:
+ # in complete graphs the dominating sets will always be of one node
+ # thus we return min degree
+ return L
+ for w in D:
+ L = min(L, local_edge_connectivity(G, v, w, aux_digraph=H))
+ return L
+
+def dominating_set(G, start_with=None):
+ # Algorithm 7 in [1]
+ all_nodes = set(G)
+ if start_with is None:
+ v = set(G).pop() # pick a node
+ else:
+ if start_with not in G:
+ raise nx.NetworkXError('node %s not in G' % start_with)
+ v = start_with
+ D = set([v])
+ ND = set([nbr for nbr in G[v]])
+ other = all_nodes - ND - D
+ while other:
+ w = other.pop()
+ D.add(w)
+ ND.update([nbr for nbr in G[w] if nbr not in D])
+ other = all_nodes - ND - D
+ return D
+
+def is_dominating_set(G, nbunch):
+ # Proposed by Dan on the mailing list
+ allnodes=set(G)
+ testset=set(n for n in nbunch if n in G)
+ nbrs=set()
+ for n in testset:
+ nbrs.update(G[n])
+ if nbrs - allnodes: # some nodes left--not dominating
+ return False
+ else:
+ return True
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/connectivity/cuts.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/connectivity/cuts.py
new file mode 100644
index 0000000..a55bc0d
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/connectivity/cuts.py
@@ -0,0 +1,382 @@
+# -*- coding: utf-8 -*-
+"""
+Flow based cut algorithms
+"""
+# http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf
+# http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf
+import itertools
+from operator import itemgetter
+import networkx as nx
+from networkx.algorithms.connectivity.connectivity import \
+ _aux_digraph_node_connectivity, _aux_digraph_edge_connectivity, \
+ dominating_set, node_connectivity
+
+__author__ = '\n'.join(['Jordi Torrents <jtorrents@milnou.net>'])
+
+__all__ = [ 'minimum_st_node_cut',
+ 'minimum_node_cut',
+ 'minimum_st_edge_cut',
+ 'minimum_edge_cut',
+ ]
+
+def minimum_st_edge_cut(G, s, t, capacity='capacity'):
+ """Returns the edges of the cut-set of a minimum (s, t)-cut.
+
+ We use the max-flow min-cut theorem, i.e., the capacity of a minimum
+ capacity cut is equal to the flow value of a maximum flow.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Edges of the graph are expected to have an attribute called
+ 'capacity'. If this attribute is not present, the edge is
+ considered to have infinite capacity.
+
+ s : node
+ Source node for the flow.
+
+ t : node
+ Sink node for the flow.
+
+ capacity: string
+ Edges of the graph G are expected to have an attribute capacity
+ that indicates how much flow the edge can support. If this
+ attribute is not present, the edge is considered to have
+ infinite capacity. Default value: 'capacity'.
+
+ Returns
+ -------
+ cutset : set
+ Set of edges that, if removed from the graph, will disconnect it
+
+ Raises
+ ------
+ NetworkXUnbounded
+ If the graph has a path of infinite capacity, all cuts have
+ infinite capacity and the function raises a NetworkXError.
+
+ Examples
+ --------
+ >>> G = nx.DiGraph()
+ >>> G.add_edge('x','a', capacity = 3.0)
+ >>> G.add_edge('x','b', capacity = 1.0)
+ >>> G.add_edge('a','c', capacity = 3.0)
+ >>> G.add_edge('b','c', capacity = 5.0)
+ >>> G.add_edge('b','d', capacity = 4.0)
+ >>> G.add_edge('d','e', capacity = 2.0)
+ >>> G.add_edge('c','y', capacity = 2.0)
+ >>> G.add_edge('e','y', capacity = 3.0)
+ >>> sorted(nx.minimum_edge_cut(G, 'x', 'y'))
+ [('c', 'y'), ('x', 'b')]
+ >>> nx.min_cut(G, 'x', 'y')
+ 3.0
+ """
+ try:
+ flow, H = nx.ford_fulkerson_flow_and_auxiliary(G, s, t, capacity=capacity)
+ cutset = set()
+ # Compute reachable nodes from source in the residual network
+ reachable = set(nx.single_source_shortest_path(H,s))
+ # And unreachable nodes
+ others = set(H) - reachable # - set([s])
+ # Any edge in the original network linking these two partitions
+ # is part of the edge cutset
+ for u, nbrs in ((n, G[n]) for n in reachable):
+ cutset.update((u,v) for v in nbrs if v in others)
+ return cutset
+ except nx.NetworkXUnbounded:
+ # Should we raise any other exception or just let ford_fulkerson
+ # propagate nx.NetworkXUnbounded ?
+ raise nx.NetworkXUnbounded("Infinite capacity path, no minimum cut.")
+
+def minimum_st_node_cut(G, s, t, aux_digraph=None, mapping=None):
+ r"""Returns a set of nodes of minimum cardinality that disconnect source
+ from target in G.
+
+ This function returns the set of nodes of minimum cardinality that,
+ if removed, would destroy all paths among source and target in G.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ s : node
+ Source node.
+
+ t : node
+ Target node.
+
+ Returns
+ -------
+ cutset : set
+ Set of nodes that, if removed, would destroy all paths between
+ source and target in G.
+
+ Examples
+ --------
+ >>> # Platonic icosahedral graph has node connectivity 5
+ >>> G = nx.icosahedral_graph()
+ >>> len(nx.minimum_node_cut(G, 0, 6))
+ 5
+
+ Notes
+ -----
+ This is a flow based implementation of minimum node cut. The algorithm
+ is based in solving a number of max-flow problems (ie local st-node
+ connectivity, see local_node_connectivity) to determine the capacity
+ of the minimum cut on an auxiliary directed network that corresponds
+ to the minimum node cut of G. It handles both directed and undirected
+ graphs.
+
+ This implementation is based on algorithm 11 in [1]_. We use the Ford
+ and Fulkerson algorithm to compute max flow (see ford_fulkerson).
+
+ See also
+ --------
+ node_connectivity
+ edge_connectivity
+ minimum_edge_cut
+ max_flow
+ ford_fulkerson
+
+ References
+ ----------
+ .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms.
+ http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf
+
+ """
+ if aux_digraph is None or mapping is None:
+ H, mapping = _aux_digraph_node_connectivity(G)
+ else:
+ H = aux_digraph
+ edge_cut = minimum_st_edge_cut(H, '%sB' % mapping[s], '%sA' % mapping[t])
+ # Each node in the original graph maps to two nodes of the auxiliary graph
+ node_cut = set(H.node[node]['id'] for edge in edge_cut for node in edge)
+ return node_cut - set([s,t])
+
+def minimum_node_cut(G, s=None, t=None):
+ r"""Returns a set of nodes of minimum cardinality that disconnects G.
+
+ If source and target nodes are provided, this function returns the
+ set of nodes of minimum cardinality that, if removed, would destroy
+ all paths among source and target in G. If not, it returns a set
+ of nodes of minimum cardinality that disconnects G.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ s : node
+ Source node. Optional (default=None)
+
+ t : node
+ Target node. Optional (default=None)
+
+ Returns
+ -------
+ cutset : set
+ Set of nodes that, if removed, would disconnect G. If source
+ and target nodes are provided, the set contians the nodes that
+ if removed, would destroy all paths between source and target.
+
+ Examples
+ --------
+ >>> # Platonic icosahedral graph has node connectivity 5
+ >>> G = nx.icosahedral_graph()
+ >>> len(nx.minimum_node_cut(G))
+ 5
+ >>> # this is the minimum over any pair of non adjacent nodes
+ >>> from itertools import combinations
+ >>> for u,v in combinations(G, 2):
+ ... if v not in G[u]:
+ ... assert(len(nx.minimum_node_cut(G,u,v)) == 5)
+ ...
+
+ Notes
+ -----
+ This is a flow based implementation of minimum node cut. The algorithm
+ is based in solving a number of max-flow problems (ie local st-node
+ connectivity, see local_node_connectivity) to determine the capacity
+ of the minimum cut on an auxiliary directed network that corresponds
+ to the minimum node cut of G. It handles both directed and undirected
+ graphs.
+
+ This implementation is based on algorithm 11 in [1]_. We use the Ford
+ and Fulkerson algorithm to compute max flow (see ford_fulkerson).
+
+ See also
+ --------
+ node_connectivity
+ edge_connectivity
+ minimum_edge_cut
+ max_flow
+ ford_fulkerson
+
+ References
+ ----------
+ .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms.
+ http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf
+
+ """
+ # Local minimum node cut
+ if s is not None and t is not None:
+ if s not in G:
+ raise nx.NetworkXError('node %s not in graph' % s)
+ if t not in G:
+ raise nx.NetworkXError('node %s not in graph' % t)
+ return minimum_st_node_cut(G, s, t)
+ # Global minimum node cut
+ # Analog to the algoritm 11 for global node connectivity in [1]
+ if G.is_directed():
+ if not nx.is_weakly_connected(G):
+ raise nx.NetworkXError('Input graph is not connected')
+ iter_func = itertools.permutations
+ def neighbors(v):
+ return itertools.chain.from_iterable([G.predecessors_iter(v),
+ G.successors_iter(v)])
+ else:
+ if not nx.is_connected(G):
+ raise nx.NetworkXError('Input graph is not connected')
+ iter_func = itertools.combinations
+ neighbors = G.neighbors_iter
+ # Choose a node with minimum degree
+ deg = G.degree()
+ min_deg = min(deg.values())
+ v = next(n for n,d in deg.items() if d == min_deg)
+ # Initial node cutset is all neighbors of the node with minimum degree
+ min_cut = set(G[v])
+ # Reuse the auxiliary digraph
+ H, mapping = _aux_digraph_node_connectivity(G)
+ # compute st node cuts between v and all its non-neighbors nodes in G
+ # and store the minimum
+ for w in set(G) - set(neighbors(v)) - set([v]):
+ this_cut = minimum_st_node_cut(G, v, w, aux_digraph=H, mapping=mapping)
+ if len(min_cut) >= len(this_cut):
+ min_cut = this_cut
+ # Same for non adjacent pairs of neighbors of v
+ for x,y in iter_func(neighbors(v),2):
+ if y in G[x]: continue
+ this_cut = minimum_st_node_cut(G, x, y, aux_digraph=H, mapping=mapping)
+ if len(min_cut) >= len(this_cut):
+ min_cut = this_cut
+ return min_cut
+
+def minimum_edge_cut(G, s=None, t=None):
+ r"""Returns a set of edges of minimum cardinality that disconnects G.
+
+ If source and target nodes are provided, this function returns the
+ set of edges of minimum cardinality that, if removed, would break
+ all paths among source and target in G. If not, it returns a set of
+ edges of minimum cardinality that disconnects G.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ s : node
+ Source node. Optional (default=None)
+
+ t : node
+ Target node. Optional (default=None)
+
+ Returns
+ -------
+ cutset : set
+ Set of edges that, if removed, would disconnect G. If source
+ and target nodes are provided, the set contians the edges that
+ if removed, would destroy all paths between source and target.
+
+ Examples
+ --------
+ >>> # Platonic icosahedral graph has edge connectivity 5
+ >>> G = nx.icosahedral_graph()
+ >>> len(nx.minimum_edge_cut(G))
+ 5
+ >>> # this is the minimum over any pair of nodes
+ >>> from itertools import combinations
+ >>> for u,v in combinations(G, 2):
+ ... assert(len(nx.minimum_edge_cut(G,u,v)) == 5)
+ ...
+
+ Notes
+ -----
+ This is a flow based implementation of minimum edge cut. For
+ undirected graphs the algorithm works by finding a 'small' dominating
+ set of nodes of G (see algorithm 7 in [1]_) and computing the maximum
+ flow between an arbitrary node in the dominating set and the rest of
+ nodes in it. This is an implementation of algorithm 6 in [1]_.
+
+ For directed graphs, the algorithm does n calls to the max flow function.
+ This is an implementation of algorithm 8 in [1]_. We use the Ford and
+ Fulkerson algorithm to compute max flow (see ford_fulkerson).
+
+ See also
+ --------
+ node_connectivity
+ edge_connectivity
+ minimum_node_cut
+ max_flow
+ ford_fulkerson
+
+ References
+ ----------
+ .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms.
+ http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf
+
+ """
+ # reuse auxiliary digraph
+ H = _aux_digraph_edge_connectivity(G)
+ # Local minimum edge cut if s and t are not None
+ if s is not None and t is not None:
+ if s not in G:
+ raise nx.NetworkXError('node %s not in graph' % s)
+ if t not in G:
+ raise nx.NetworkXError('node %s not in graph' % t)
+ return minimum_st_edge_cut(H, s, t)
+ # Global minimum edge cut
+ # Analog to the algoritm for global edge connectivity
+ if G.is_directed():
+ # Based on algorithm 8 in [1]
+ if not nx.is_weakly_connected(G):
+ raise nx.NetworkXError('Input graph is not connected')
+ # Initial cutset is all edges of a node with minimum degree
+ deg = G.degree()
+ min_deg = min(deg.values())
+ node = next(n for n,d in deg.items() if d==min_deg)
+ min_cut = G.edges(node)
+ nodes = G.nodes()
+ n = len(nodes)
+ for i in range(n):
+ try:
+ this_cut = minimum_st_edge_cut(H, nodes[i], nodes[i+1])
+ if len(this_cut) <= len(min_cut):
+ min_cut = this_cut
+ except IndexError: # Last node!
+ this_cut = minimum_st_edge_cut(H, nodes[i], nodes[0])
+ if len(this_cut) <= len(min_cut):
+ min_cut = this_cut
+ return min_cut
+ else: # undirected
+ # Based on algorithm 6 in [1]
+ if not nx.is_connected(G):
+ raise nx.NetworkXError('Input graph is not connected')
+ # Initial cutset is all edges of a node with minimum degree
+ deg = G.degree()
+ min_deg = min(deg.values())
+ node = next(n for n,d in deg.items() if d==min_deg)
+ min_cut = G.edges(node)
+ # A dominating set is \lambda-covering
+ # We need a dominating set with at least two nodes
+ for node in G:
+ D = dominating_set(G, start_with=node)
+ v = D.pop()
+ if D: break
+ else:
+ # in complete graphs the dominating set will always be of one node
+ # thus we return min_cut, which now contains the edges of a node
+ # with minimum degree
+ return min_cut
+ for w in D:
+ this_cut = minimum_st_edge_cut(H, v, w)
+ if len(this_cut) <= len(min_cut):
+ min_cut = this_cut
+ return min_cut
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/connectivity/tests/test_connectivity.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/connectivity/tests/test_connectivity.py
new file mode 100644
index 0000000..2745504
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/connectivity/tests/test_connectivity.py
@@ -0,0 +1,145 @@
+from nose.tools import assert_equal, assert_true, assert_false
+import networkx as nx
+
+# helper functions for tests
+def _generate_no_biconnected(max_attempts=50):
+ attempts = 0
+ while True:
+ G = nx.fast_gnp_random_graph(100,0.0575)
+ if nx.is_connected(G) and not nx.is_biconnected(G):
+ attempts = 0
+ yield G
+ else:
+ if attempts >= max_attempts:
+ msg = "Tried %d times: no suitable Graph."
+ raise Exception(msg % max_attempts)
+ else:
+ attempts += 1
+
+def is_dominating_set(G, nbunch):
+ # Proposed by Dan on the mailing list
+ allnodes=set(G)
+ testset=set(n for n in nbunch if n in G)
+ nbrs=set()
+ for n in testset:
+ nbrs.update(G[n])
+ if nbrs - allnodes: # some nodes left--not dominating
+ return False
+ else:
+ return True
+
+# Tests for node and edge connectivity
+def test_average_connectivity():
+ # figure 1 from:
+ # Beineke, L., O. Oellermann, and R. Pippert (2002). The average
+ # connectivity of a graph. Discrete mathematics 252(1-3), 31-45
+ # http://www.sciencedirect.com/science/article/pii/S0012365X01001807
+ G1 = nx.path_graph(3)
+ G1.add_edges_from([(1,3),(1,4)])
+ assert_equal(nx.average_node_connectivity(G1),1)
+ G2 = nx.path_graph(3)
+ G2.add_edges_from([(1,3),(1,4),(0,3),(0,4),(3,4)])
+ assert_equal(nx.average_node_connectivity(G2),2.2)
+ G3 = nx.Graph()
+ assert_equal(nx.average_node_connectivity(G3),0)
+
+def test_articulation_points():
+ Ggen = _generate_no_biconnected()
+ for i in range(5):
+ G = next(Ggen)
+ assert_equal(nx.node_connectivity(G), 1)
+
+def test_brandes_erlebach():
+ # Figure 1 chapter 7: Connectivity
+ # http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf
+ G = nx.Graph()
+ G.add_edges_from([(1,2),(1,3),(1,4),(1,5),(2,3),(2,6),(3,4),
+ (3,6),(4,6),(4,7),(5,7),(6,8),(6,9),(7,8),
+ (7,10),(8,11),(9,10),(9,11),(10,11)])
+ assert_equal(3,nx.local_edge_connectivity(G,1,11))
+ assert_equal(3,nx.edge_connectivity(G,1,11))
+ assert_equal(2,nx.local_node_connectivity(G,1,11))
+ assert_equal(2,nx.node_connectivity(G,1,11))
+ assert_equal(2,nx.edge_connectivity(G)) # node 5 has degree 2
+ assert_equal(2,nx.node_connectivity(G))
+
+def test_white_harary_1():
+ # Figure 1b white and harary (2001)
+ # # http://eclectic.ss.uci.edu/~drwhite/sm-w23.PDF
+ # A graph with high adhesion (edge connectivity) and low cohesion
+ # (vertex connectivity)
+ G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4))
+ G.remove_node(7)
+ for i in range(4,7):
+ G.add_edge(0,i)
+ G = nx.disjoint_union(G, nx.complete_graph(4))
+ G.remove_node(G.order()-1)
+ for i in range(7,10):
+ G.add_edge(0,i)
+ assert_equal(1, nx.node_connectivity(G))
+ assert_equal(3, nx.edge_connectivity(G))
+
+def test_white_harary_2():
+ # Figure 8 white and harary (2001)
+ # # http://eclectic.ss.uci.edu/~drwhite/sm-w23.PDF
+ G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4))
+ G.add_edge(0,4)
+ # kappa <= lambda <= delta
+ assert_equal(3, min(nx.core_number(G).values()))
+ assert_equal(1, nx.node_connectivity(G))
+ assert_equal(1, nx.edge_connectivity(G))
+
+def test_complete_graphs():
+ for n in range(5, 25, 5):
+ G = nx.complete_graph(n)
+ assert_equal(n-1, nx.node_connectivity(G))
+ assert_equal(n-1, nx.node_connectivity(G.to_directed()))
+ assert_equal(n-1, nx.edge_connectivity(G))
+ assert_equal(n-1, nx.edge_connectivity(G.to_directed()))
+
+def test_empty_graphs():
+ for k in range(5, 25, 5):
+ G = nx.empty_graph(k)
+ assert_equal(0, nx.node_connectivity(G))
+ assert_equal(0, nx.edge_connectivity(G))
+
+def test_petersen():
+ G = nx.petersen_graph()
+ assert_equal(3, nx.node_connectivity(G))
+ assert_equal(3, nx.edge_connectivity(G))
+
+def test_tutte():
+ G = nx.tutte_graph()
+ assert_equal(3, nx.node_connectivity(G))
+ assert_equal(3, nx.edge_connectivity(G))
+
+def test_dodecahedral():
+ G = nx.dodecahedral_graph()
+ assert_equal(3, nx.node_connectivity(G))
+ assert_equal(3, nx.edge_connectivity(G))
+
+def test_octahedral():
+ G=nx.octahedral_graph()
+ assert_equal(4, nx.node_connectivity(G))
+ assert_equal(4, nx.edge_connectivity(G))
+
+def test_icosahedral():
+ G=nx.icosahedral_graph()
+ assert_equal(5, nx.node_connectivity(G))
+ assert_equal(5, nx.edge_connectivity(G))
+
+def test_directed_edge_connectivity():
+ G = nx.cycle_graph(10,create_using=nx.DiGraph()) # only one direction
+ D = nx.cycle_graph(10).to_directed() # 2 reciprocal edges
+ assert_equal(1, nx.edge_connectivity(G))
+ assert_equal(1, nx.local_edge_connectivity(G,1,4))
+ assert_equal(1, nx.edge_connectivity(G,1,4))
+ assert_equal(2, nx.edge_connectivity(D))
+ assert_equal(2, nx.local_edge_connectivity(D,1,4))
+ assert_equal(2, nx.edge_connectivity(D,1,4))
+
+def test_dominating_set():
+ for i in range(5):
+ G = nx.gnp_random_graph(100,0.1)
+ D = nx.dominating_set(G)
+ assert_true(is_dominating_set(G,D))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/connectivity/tests/test_cuts.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/connectivity/tests/test_cuts.py
new file mode 100644
index 0000000..0570494
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/connectivity/tests/test_cuts.py
@@ -0,0 +1,157 @@
+from nose.tools import assert_equal, assert_true, assert_false, assert_raises
+import networkx as nx
+
+# Tests for node and edge cutsets
+def _generate_no_biconnected(max_attempts=50):
+ attempts = 0
+ while True:
+ G = nx.fast_gnp_random_graph(100,0.0575)
+ if nx.is_connected(G) and not nx.is_biconnected(G):
+ attempts = 0
+ yield G
+ else:
+ if attempts >= max_attempts:
+ msg = "Tried %d times: no suitable Graph."%attempts
+ raise Exception(msg % max_attempts)
+ else:
+ attempts += 1
+
+def test_articulation_points():
+ Ggen = _generate_no_biconnected()
+ for i in range(5):
+ G = next(Ggen)
+ cut = nx.minimum_node_cut(G)
+ assert_true(len(cut) == 1)
+ assert_true(cut.pop() in set(nx.articulation_points(G)))
+
+def test_brandes_erlebach_book():
+ # Figure 1 chapter 7: Connectivity
+ # http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf
+ G = nx.Graph()
+ G.add_edges_from([(1,2),(1,3),(1,4),(1,5),(2,3),(2,6),(3,4),
+ (3,6),(4,6),(4,7),(5,7),(6,8),(6,9),(7,8),
+ (7,10),(8,11),(9,10),(9,11),(10,11)])
+ # edge cutsets
+ assert_equal(3, len(nx.minimum_edge_cut(G,1,11)))
+ edge_cut = nx.minimum_edge_cut(G)
+ assert_equal(2, len(edge_cut)) # Node 5 has only two edges
+ H = G.copy()
+ H.remove_edges_from(edge_cut)
+ assert_false(nx.is_connected(H))
+ # node cuts
+ assert_equal(set([6,7]), nx.minimum_st_node_cut(G,1,11))
+ assert_equal(set([6,7]), nx.minimum_node_cut(G,1,11))
+ node_cut = nx.minimum_node_cut(G)
+ assert_equal(2,len(node_cut))
+ H = G.copy()
+ H.remove_nodes_from(node_cut)
+ assert_false(nx.is_connected(H))
+
+def test_white_harary_paper():
+ # Figure 1b white and harary (2001)
+ # http://eclectic.ss.uci.edu/~drwhite/sm-w23.PDF
+ # A graph with high adhesion (edge connectivity) and low cohesion
+ # (node connectivity)
+ G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4))
+ G.remove_node(7)
+ for i in range(4,7):
+ G.add_edge(0,i)
+ G = nx.disjoint_union(G, nx.complete_graph(4))
+ G.remove_node(G.order()-1)
+ for i in range(7,10):
+ G.add_edge(0,i)
+ # edge cuts
+ edge_cut = nx.minimum_edge_cut(G)
+ assert_equal(3, len(edge_cut))
+ H = G.copy()
+ H.remove_edges_from(edge_cut)
+ assert_false(nx.is_connected(H))
+ # node cuts
+ node_cut = nx.minimum_node_cut(G)
+ assert_equal(set([0]), node_cut)
+ H = G.copy()
+ H.remove_nodes_from(node_cut)
+ assert_false(nx.is_connected(H))
+
+def test_petersen_cutset():
+ G = nx.petersen_graph()
+ # edge cuts
+ edge_cut = nx.minimum_edge_cut(G)
+ assert_equal(3, len(edge_cut))
+ H = G.copy()
+ H.remove_edges_from(edge_cut)
+ assert_false(nx.is_connected(H))
+ # node cuts
+ node_cut = nx.minimum_node_cut(G)
+ assert_equal(3,len(node_cut))
+ H = G.copy()
+ H.remove_nodes_from(node_cut)
+ assert_false(nx.is_connected(H))
+
+def test_octahedral_cutset():
+ G=nx.octahedral_graph()
+ # edge cuts
+ edge_cut = nx.minimum_edge_cut(G)
+ assert_equal(4, len(edge_cut))
+ H = G.copy()
+ H.remove_edges_from(edge_cut)
+ assert_false(nx.is_connected(H))
+ # node cuts
+ node_cut = nx.minimum_node_cut(G)
+ assert_equal(4,len(node_cut))
+ H = G.copy()
+ H.remove_nodes_from(node_cut)
+ assert_false(nx.is_connected(H))
+
+def test_icosahedral_cutset():
+ G=nx.icosahedral_graph()
+ # edge cuts
+ edge_cut = nx.minimum_edge_cut(G)
+ assert_equal(5, len(edge_cut))
+ H = G.copy()
+ H.remove_edges_from(edge_cut)
+ assert_false(nx.is_connected(H))
+ # node cuts
+ node_cut = nx.minimum_node_cut(G)
+ assert_equal(5,len(node_cut))
+ H = G.copy()
+ H.remove_nodes_from(node_cut)
+ assert_false(nx.is_connected(H))
+
+def test_node_cutset_exception():
+ G=nx.Graph()
+ G.add_edges_from([(1,2),(3,4)])
+ assert_raises(nx.NetworkXError, nx.minimum_node_cut,G)
+
+def test_node_cutset_random_graphs():
+ for i in range(5):
+ G = nx.fast_gnp_random_graph(50,0.2)
+ if not nx.is_connected(G):
+ ccs = iter(nx.connected_components(G))
+ start = next(ccs)[0]
+ G.add_edges_from( (start,c[0]) for c in ccs )
+ cutset = nx.minimum_node_cut(G)
+ assert_equal(nx.node_connectivity(G), len(cutset))
+ G.remove_nodes_from(cutset)
+ assert_false(nx.is_connected(G))
+
+def test_edge_cutset_random_graphs():
+ for i in range(5):
+ G = nx.fast_gnp_random_graph(50,0.2)
+ if not nx.is_connected(G):
+ ccs = iter(nx.connected_components(G))
+ start = next(ccs)[0]
+ G.add_edges_from( (start,c[0]) for c in ccs )
+ cutset = nx.minimum_edge_cut(G)
+ assert_equal(nx.edge_connectivity(G), len(cutset))
+ G.remove_edges_from(cutset)
+ assert_false(nx.is_connected(G))
+
+# Test empty graphs
+def test_empty_graphs():
+ G = nx.Graph()
+ D = nx.DiGraph()
+ assert_raises(nx.NetworkXPointlessConcept, nx.minimum_node_cut, G)
+ assert_raises(nx.NetworkXPointlessConcept, nx.minimum_node_cut, D)
+ assert_raises(nx.NetworkXPointlessConcept, nx.minimum_edge_cut, G)
+ assert_raises(nx.NetworkXPointlessConcept, nx.minimum_edge_cut, D)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/core.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/core.py
new file mode 100644
index 0000000..4daba25
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/core.py
@@ -0,0 +1,324 @@
+"""
+Find the k-cores of a graph.
+
+The k-core is found by recursively pruning nodes with degrees less than k.
+
+See the following reference for details:
+
+An O(m) Algorithm for Cores Decomposition of Networks
+Vladimir Batagelj and Matjaz Zaversnik, 2003.
+http://arxiv.org/abs/cs.DS/0310049
+
+"""
+__author__ = "\n".join(['Dan Schult (dschult@colgate.edu)',
+ 'Jason Grout (jason-sage@creativetrax.com)',
+ 'Aric Hagberg (hagberg@lanl.gov)'])
+
+# Copyright (C) 2004-2010 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+__all__ = ['core_number','k_core','k_shell','k_crust','k_corona','find_cores']
+
+import networkx as nx
+
+def core_number(G):
+ """Return the core number for each vertex.
+
+ A k-core is a maximal subgraph that contains nodes of degree k or more.
+
+ The core number of a node is the largest value k of a k-core containing
+ that node.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ A graph or directed graph
+
+ Returns
+ -------
+ core_number : dictionary
+ A dictionary keyed by node to the core number.
+
+ Raises
+ ------
+ NetworkXError
+ The k-core is not defined for graphs with self loops or parallel edges.
+
+ Notes
+ -----
+ Not implemented for graphs with parallel edges or self loops.
+
+ For directed graphs the node degree is defined to be the
+ in-degree + out-degree.
+
+ References
+ ----------
+ .. [1] An O(m) Algorithm for Cores Decomposition of Networks
+ Vladimir Batagelj and Matjaz Zaversnik, 2003.
+ http://arxiv.org/abs/cs.DS/0310049
+ """
+ if G.is_multigraph():
+ raise nx.NetworkXError(
+ 'MultiGraph and MultiDiGraph types not supported.')
+
+ if G.number_of_selfloops()>0:
+ raise nx.NetworkXError(
+ 'Input graph has self loops; the core number is not defined.',
+ 'Consider using G.remove_edges_from(G.selfloop_edges()).')
+
+ if G.is_directed():
+ import itertools
+ def neighbors(v):
+ return itertools.chain.from_iterable([G.predecessors_iter(v),
+ G.successors_iter(v)])
+ else:
+ neighbors=G.neighbors_iter
+ degrees=G.degree()
+ # sort nodes by degree
+ nodes=sorted(degrees,key=degrees.get)
+ bin_boundaries=[0]
+ curr_degree=0
+ for i,v in enumerate(nodes):
+ if degrees[v]>curr_degree:
+ bin_boundaries.extend([i]*(degrees[v]-curr_degree))
+ curr_degree=degrees[v]
+ node_pos = dict((v,pos) for pos,v in enumerate(nodes))
+ # initial guesses for core is degree
+ core=degrees
+ nbrs=dict((v,set(neighbors(v))) for v in G)
+ for v in nodes:
+ for u in nbrs[v]:
+ if core[u] > core[v]:
+ nbrs[u].remove(v)
+ pos=node_pos[u]
+ bin_start=bin_boundaries[core[u]]
+ node_pos[u]=bin_start
+ node_pos[nodes[bin_start]]=pos
+ nodes[bin_start],nodes[pos]=nodes[pos],nodes[bin_start]
+ bin_boundaries[core[u]]+=1
+ core[u]-=1
+ return core
+
+find_cores=core_number
+
+def k_core(G,k=None,core_number=None):
+ """Return the k-core of G.
+
+ A k-core is a maximal subgraph that contains nodes of degree k or more.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ A graph or directed graph
+ k : int, optional
+ The order of the core. If not specified return the main core.
+ core_number : dictionary, optional
+ Precomputed core numbers for the graph G.
+
+ Returns
+ -------
+ G : NetworkX graph
+ The k-core subgraph
+
+ Raises
+ ------
+ NetworkXError
+ The k-core is not defined for graphs with self loops or parallel edges.
+
+ Notes
+ -----
+ The main core is the core with the largest degree.
+
+ Not implemented for graphs with parallel edges or self loops.
+
+ For directed graphs the node degree is defined to be the
+ in-degree + out-degree.
+
+ Graph, node, and edge attributes are copied to the subgraph.
+
+ See Also
+ --------
+ core_number
+
+ References
+ ----------
+ .. [1] An O(m) Algorithm for Cores Decomposition of Networks
+ Vladimir Batagelj and Matjaz Zaversnik, 2003.
+ http://arxiv.org/abs/cs.DS/0310049
+ """
+ if core_number is None:
+ core_number=nx.core_number(G)
+ if k is None:
+ k=max(core_number.values()) # max core
+ nodes=(n for n in core_number if core_number[n]>=k)
+ return G.subgraph(nodes).copy()
+
+def k_shell(G,k=None,core_number=None):
+ """Return the k-shell of G.
+
+ The k-shell is the subgraph of nodes in the k-core containing
+ nodes of exactly degree k.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ A graph or directed graph.
+ k : int, optional
+ The order of the shell. If not specified return the main shell.
+ core_number : dictionary, optional
+ Precomputed core numbers for the graph G.
+
+
+ Returns
+ -------
+ G : NetworkX graph
+ The k-shell subgraph
+
+ Raises
+ ------
+ NetworkXError
+ The k-shell is not defined for graphs with self loops or parallel edges.
+
+ Notes
+ -----
+ This is similar to k_corona but in that case only neighbors in the
+ k-core are considered.
+
+ Not implemented for graphs with parallel edges or self loops.
+
+ For directed graphs the node degree is defined to be the
+ in-degree + out-degree.
+
+ Graph, node, and edge attributes are copied to the subgraph.
+
+ See Also
+ --------
+ core_number
+ k_corona
+
+
+ ----------
+ .. [1] A model of Internet topology using k-shell decomposition
+ Shai Carmi, Shlomo Havlin, Scott Kirkpatrick, Yuval Shavitt,
+ and Eran Shir, PNAS July 3, 2007 vol. 104 no. 27 11150-11154
+ http://www.pnas.org/content/104/27/11150.full
+ """
+ if core_number is None:
+ core_number=nx.core_number(G)
+ if k is None:
+ k=max(core_number.values()) # max core
+ nodes=(n for n in core_number if core_number[n]==k)
+ return G.subgraph(nodes).copy()
+
+def k_crust(G,k=None,core_number=None):
+ """Return the k-crust of G.
+
+ The k-crust is the graph G with the k-core removed.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ A graph or directed graph.
+ k : int, optional
+ The order of the shell. If not specified return the main crust.
+ core_number : dictionary, optional
+ Precomputed core numbers for the graph G.
+
+ Returns
+ -------
+ G : NetworkX graph
+ The k-crust subgraph
+
+ Raises
+ ------
+ NetworkXError
+ The k-crust is not defined for graphs with self loops or parallel edges.
+
+ Notes
+ -----
+ This definition of k-crust is different than the definition in [1]_.
+ The k-crust in [1]_ is equivalent to the k+1 crust of this algorithm.
+
+ Not implemented for graphs with parallel edges or self loops.
+
+ For directed graphs the node degree is defined to be the
+ in-degree + out-degree.
+
+ Graph, node, and edge attributes are copied to the subgraph.
+
+ See Also
+ --------
+ core_number
+
+ References
+ ----------
+ .. [1] A model of Internet topology using k-shell decomposition
+ Shai Carmi, Shlomo Havlin, Scott Kirkpatrick, Yuval Shavitt,
+ and Eran Shir, PNAS July 3, 2007 vol. 104 no. 27 11150-11154
+ http://www.pnas.org/content/104/27/11150.full
+ """
+ if core_number is None:
+ core_number=nx.core_number(G)
+ if k is None:
+ k=max(core_number.values())-1
+ nodes=(n for n in core_number if core_number[n]<=k)
+ return G.subgraph(nodes).copy()
+
+
+def k_corona(G, k, core_number=None):
+ """Return the k-crust of G.
+
+ The k-corona is the subset of vertices in the k-core which have
+ exactly k neighbours in the k-core.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ A graph or directed graph
+ k : int
+ The order of the corona.
+ core_number : dictionary, optional
+ Precomputed core numbers for the graph G.
+
+ Returns
+ -------
+ G : NetworkX graph
+ The k-corona subgraph
+
+ Raises
+ ------
+ NetworkXError
+ The k-cornoa is not defined for graphs with self loops or
+ parallel edges.
+
+ Notes
+ -----
+ Not implemented for graphs with parallel edges or self loops.
+
+ For directed graphs the node degree is defined to be the
+ in-degree + out-degree.
+
+ Graph, node, and edge attributes are copied to the subgraph.
+
+ See Also
+ --------
+ core_number
+
+ References
+ ----------
+ .. [1] k -core (bootstrap) percolation on complex networks:
+ Critical phenomena and nonlocal effects,
+ A. V. Goltsev, S. N. Dorogovtsev, and J. F. F. Mendes,
+ Phys. Rev. E 73, 056101 (2006)
+ http://link.aps.org/doi/10.1103/PhysRevE.73.056101
+ """
+
+ if core_number is None:
+ core_number = nx.core_number(G)
+ nodes = (n for n in core_number
+ if core_number[n] >= k
+ and len([v for v in G[n] if core_number[v] >= k]) == k)
+ return G.subgraph(nodes).copy()
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/cycles.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/cycles.py
new file mode 100644
index 0000000..4955538
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/cycles.py
@@ -0,0 +1,317 @@
+"""
+========================
+Cycle finding algorithms
+========================
+"""
+# Copyright (C) 2010-2012 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+from networkx.utils import *
+from collections import defaultdict
+
+__all__ = ['cycle_basis','simple_cycles','recursive_simple_cycles']
+__author__ = "\n".join(['Jon Olav Vik <jonovik@gmail.com>',
+ 'Dan Schult <dschult@colgate.edu>',
+ 'Aric Hagberg <hagberg@lanl.gov>'])
+
+@not_implemented_for('directed')
+@not_implemented_for('multigraph')
+def cycle_basis(G,root=None):
+ """ Returns a list of cycles which form a basis for cycles of G.
+
+ A basis for cycles of a network is a minimal collection of
+ cycles such that any cycle in the network can be written
+ as a sum of cycles in the basis. Here summation of cycles
+ is defined as "exclusive or" of the edges. Cycle bases are
+ useful, e.g. when deriving equations for electric circuits
+ using Kirchhoff's Laws.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+ root : node, optional
+ Specify starting node for basis.
+
+ Returns
+ -------
+ A list of cycle lists. Each cycle list is a list of nodes
+ which forms a cycle (loop) in G.
+
+ Examples
+ --------
+ >>> G=nx.Graph()
+ >>> G.add_cycle([0,1,2,3])
+ >>> G.add_cycle([0,3,4,5])
+ >>> print(nx.cycle_basis(G,0))
+ [[3, 4, 5, 0], [1, 2, 3, 0]]
+
+ Notes
+ -----
+ This is adapted from algorithm CACM 491 [1]_.
+
+ References
+ ----------
+ .. [1] Paton, K. An algorithm for finding a fundamental set of
+ cycles of a graph. Comm. ACM 12, 9 (Sept 1969), 514-518.
+
+ See Also
+ --------
+ simple_cycles
+ """
+ gnodes=set(G.nodes())
+ cycles=[]
+ while gnodes: # loop over connected components
+ if root is None:
+ root=gnodes.pop()
+ stack=[root]
+ pred={root:root}
+ used={root:set()}
+ while stack: # walk the spanning tree finding cycles
+ z=stack.pop() # use last-in so cycles easier to find
+ zused=used[z]
+ for nbr in G[z]:
+ if nbr not in used: # new node
+ pred[nbr]=z
+ stack.append(nbr)
+ used[nbr]=set([z])
+ elif nbr == z: # self loops
+ cycles.append([z])
+ elif nbr not in zused:# found a cycle
+ pn=used[nbr]
+ cycle=[nbr,z]
+ p=pred[z]
+ while p not in pn:
+ cycle.append(p)
+ p=pred[p]
+ cycle.append(p)
+ cycles.append(cycle)
+ used[nbr].add(z)
+ gnodes-=set(pred)
+ root=None
+ return cycles
+
+
+@not_implemented_for('undirected')
+def simple_cycles(G):
+ """Find simple cycles (elementary circuits) of a directed graph.
+
+ An simple cycle, or elementary circuit, is a closed path where no
+ node appears twice, except that the first and last node are the same.
+ Two elementary circuits are distinct if they are not cyclic permutations
+ of each other.
+
+ This is a nonrecursive, iterator/generator version of Johnson's
+ algorithm [1]_. There may be better algorithms for some cases [2]_ [3]_.
+
+ Parameters
+ ----------
+ G : NetworkX DiGraph
+ A directed graph
+
+ Returns
+ -------
+ cycle_generator: generator
+ A generator that produces elementary cycles of the graph. Each cycle is
+ a list of nodes with the first and last nodes being the same.
+
+ Examples
+ --------
+ >>> G = nx.DiGraph([(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)])
+ >>> list(nx.simple_cycles(G))
+ [[2], [2, 1], [2, 0], [2, 0, 1], [0]]
+
+ Notes
+ -----
+ The implementation follows pp. 79-80 in [1]_.
+
+ The time complexity is O((n+e)(c+1)) for n nodes, e edges and c
+ elementary circuits.
+
+ To filter the cycles so that they don't include certain nodes or edges,
+ copy your graph and eliminate those nodes or edges before calling.
+ >>> copyG = G.copy()
+ >>> copyG.remove_nodes_from([1])
+ >>> copyG.remove_edges_from([(0,1)])
+ >>> list(nx.simple_cycles(copyG))
+ [[2], [2, 0], [0]]
+
+ References
+ ----------
+ .. [1] Finding all the elementary circuits of a directed graph.
+ D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.
+ http://dx.doi.org/10.1137/0204007
+
+ .. [2] Enumerating the cycles of a digraph: a new preprocessing strategy.
+ G. Loizou and P. Thanish, Information Sciences, v. 27, 163-182, 1982.
+
+ .. [3] A search strategy for the elementary cycles of a directed graph.
+ J.L. Szwarcfiter and P.E. Lauer, BIT NUMERICAL MATHEMATICS,
+ v. 16, no. 2, 192-204, 1976.
+
+ See Also
+ --------
+ cycle_basis
+ """
+ def _unblock(thisnode,blocked,B):
+ stack=set([thisnode])
+ while stack:
+ node=stack.pop()
+ if node in blocked:
+ blocked.remove(node)
+ stack.update(B[node])
+ B[node].clear()
+
+ # Johnson's algorithm requires some ordering of the nodes.
+ # We assign the arbitrary ordering given by the strongly connected comps
+ # There is no need to track the ordering as each node removed as processed.
+ subG=G.copy() # save the actual graph so we can mutate it here
+ sccs = nx.strongly_connected_components(subG)
+ while sccs:
+ scc=sccs.pop()
+ # order of scc determines ordering of nodes
+ startnode = scc.pop()
+ # Processing node runs "circuit" routine from recursive version
+ path=[startnode]
+ blocked = set() # vertex: blocked from search?
+ closed = set() # nodes involved in a cycle
+ blocked.add(startnode)
+ B=defaultdict(set) # graph portions that yield no elementary circuit
+ stack=[ (startnode,list(subG[startnode])) ] # subG gives component nbrs
+ while stack:
+ thisnode,nbrs = stack[-1]
+ if nbrs:
+ nextnode = nbrs.pop()
+# print thisnode,nbrs,":",nextnode,blocked,B,path,stack,startnode
+# f=raw_input("pause")
+ if nextnode == startnode:
+ yield path[:]
+ closed.update(path)
+# print "Found a cycle",path,closed
+ elif nextnode not in blocked:
+ path.append(nextnode)
+ stack.append( (nextnode,list(subG[nextnode])) )
+ blocked.add(nextnode)
+ continue
+ # done with nextnode... look for more neighbors
+ if not nbrs: # no more nbrs
+ if thisnode in closed:
+ _unblock(thisnode,blocked,B)
+ else:
+ for nbr in G[thisnode]:
+ if thisnode not in B[nbr]:
+ B[nbr].add(thisnode)
+ stack.pop()
+# assert path[-1]==thisnode
+ path.pop()
+ # done processing this node
+ subG.remove_node(startnode)
+ H=subG.subgraph(scc) # make smaller to avoid work in SCC routine
+ sccs.extend(nx.strongly_connected_components(H))
+
+
+@not_implemented_for('undirected')
+def recursive_simple_cycles(G):
+ """Find simple cycles (elementary circuits) of a directed graph.
+
+ A simple cycle, or elementary circuit, is a closed path where no
+ node appears twice, except that the first and last node are the same.
+ Two elementary circuits are distinct if they are not cyclic permutations
+ of each other.
+
+ This version uses a recursive algorithm to build a list of cycles.
+ You should probably use the iterator version caled simple_cycles().
+ Warning: This recursive version uses lots of RAM!
+
+ Parameters
+ ----------
+ G : NetworkX DiGraph
+ A directed graph
+
+ Returns
+ -------
+ A list of circuits, where each circuit is a list of nodes, with the first
+ and last node being the same.
+
+ Example:
+ >>> G = nx.DiGraph([(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)])
+ >>> nx.recursive_simple_cycles(G)
+ [[0], [0, 1, 2], [0, 2], [1, 2], [2]]
+
+ See Also
+ --------
+ cycle_basis (for undirected graphs)
+
+ Notes
+ -----
+ The implementation follows pp. 79-80 in [1]_.
+
+ The time complexity is O((n+e)(c+1)) for n nodes, e edges and c
+ elementary circuits.
+
+ References
+ ----------
+ .. [1] Finding all the elementary circuits of a directed graph.
+ D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.
+ http://dx.doi.org/10.1137/0204007
+
+ See Also
+ --------
+ simple_cycles, cycle_basis
+ """
+ # Jon Olav Vik, 2010-08-09
+ def _unblock(thisnode):
+ """Recursively unblock and remove nodes from B[thisnode]."""
+ if blocked[thisnode]:
+ blocked[thisnode] = False
+ while B[thisnode]:
+ _unblock(B[thisnode].pop())
+
+ def circuit(thisnode, startnode, component):
+ closed = False # set to True if elementary path is closed
+ path.append(thisnode)
+ blocked[thisnode] = True
+ for nextnode in component[thisnode]: # direct successors of thisnode
+ if nextnode == startnode:
+ result.append(path[:])
+ closed = True
+ elif not blocked[nextnode]:
+ if circuit(nextnode, startnode, component):
+ closed = True
+ if closed:
+ _unblock(thisnode)
+ else:
+ for nextnode in component[thisnode]:
+ if thisnode not in B[nextnode]: # TODO: use set for speedup?
+ B[nextnode].append(thisnode)
+ path.pop() # remove thisnode from path
+ return closed
+
+ path = [] # stack of nodes in current path
+ blocked = defaultdict(bool) # vertex: blocked from search?
+ B = defaultdict(list) # graph portions that yield no elementary circuit
+ result = [] # list to accumulate the circuits found
+ # Johnson's algorithm requires some ordering of the nodes.
+ # They might not be sortable so we assign an arbitrary ordering.
+ ordering=dict(zip(G,range(len(G))))
+ for s in ordering:
+ # Build the subgraph induced by s and following nodes in the ordering
+ subgraph = G.subgraph(node for node in G
+ if ordering[node] >= ordering[s])
+ # Find the strongly connected component in the subgraph
+ # that contains the least node according to the ordering
+ strongcomp = nx.strongly_connected_components(subgraph)
+ mincomp=min(strongcomp,
+ key=lambda nodes: min(ordering[n] for n in nodes))
+ component = G.subgraph(mincomp)
+ if component:
+ # smallest node in the component according to the ordering
+ startnode = min(component,key=ordering.__getitem__)
+ for node in component:
+ blocked[node] = False
+ B[node][:] = []
+ dummy=circuit(startnode, startnode, component)
+ return result
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/dag.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/dag.py
new file mode 100644
index 0000000..4d376e0
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/dag.py
@@ -0,0 +1,275 @@
+# -*- coding: utf-8 -*-
+from fractions import gcd
+import networkx as nx
+"""Algorithms for directed acyclic graphs (DAGs)."""
+# Copyright (C) 2006-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>',
+ 'Dan Schult (dschult@colgate.edu)',
+ 'Ben Edwards (bedwards@cs.unm.edu)'])
+__all__ = ['descendants',
+ 'ancestors',
+ 'topological_sort',
+ 'topological_sort_recursive',
+ 'is_directed_acyclic_graph',
+ 'is_aperiodic']
+
+def descendants(G, source):
+ """Return all nodes reachable from `source` in G.
+
+ Parameters
+ ----------
+ G : NetworkX DiGraph
+ source : node in G
+
+ Returns
+ -------
+ des : set()
+ The descendants of source in G
+ """
+ if not G.has_node(source):
+ raise nx.NetworkXError("The node %s is not in the graph." % source)
+ des = set(nx.shortest_path_length(G, source=source).keys()) - set([source])
+ return des
+
+def ancestors(G, source):
+ """Return all nodes having a path to `source` in G.
+
+ Parameters
+ ----------
+ G : NetworkX DiGraph
+ source : node in G
+
+ Returns
+ -------
+ ancestors : set()
+ The ancestors of source in G
+ """
+ if not G.has_node(source):
+ raise nx.NetworkXError("The node %s is not in the graph." % source)
+ anc = set(nx.shortest_path_length(G, target=source).keys()) - set([source])
+ return anc
+
+def is_directed_acyclic_graph(G):
+ """Return True if the graph G is a directed acyclic graph (DAG) or
+ False if not.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ A graph
+
+ Returns
+ -------
+ is_dag : bool
+ True if G is a DAG, false otherwise
+ """
+ if not G.is_directed():
+ return False
+ try:
+ topological_sort(G)
+ return True
+ except nx.NetworkXUnfeasible:
+ return False
+
+def topological_sort(G,nbunch=None):
+ """Return a list of nodes in topological sort order.
+
+ A topological sort is a nonunique permutation of the nodes
+ such that an edge from u to v implies that u appears before v in the
+ topological sort order.
+
+ Parameters
+ ----------
+ G : NetworkX digraph
+ A directed graph
+
+ nbunch : container of nodes (optional)
+ Explore graph in specified order given in nbunch
+
+ Raises
+ ------
+ NetworkXError
+ Topological sort is defined for directed graphs only. If the
+ graph G is undirected, a NetworkXError is raised.
+
+ NetworkXUnfeasible
+ If G is not a directed acyclic graph (DAG) no topological sort
+ exists and a NetworkXUnfeasible exception is raised.
+
+ Notes
+ -----
+ This algorithm is based on a description and proof in
+ The Algorithm Design Manual [1]_ .
+
+ See also
+ --------
+ is_directed_acyclic_graph
+
+ References
+ ----------
+ .. [1] Skiena, S. S. The Algorithm Design Manual (Springer-Verlag, 1998).
+ http://www.amazon.com/exec/obidos/ASIN/0387948600/ref=ase_thealgorithmrepo/
+ """
+ if not G.is_directed():
+ raise nx.NetworkXError(
+ "Topological sort not defined on undirected graphs.")
+
+ # nonrecursive version
+ seen = set()
+ order = []
+ explored = set()
+
+ if nbunch is None:
+ nbunch = G.nodes_iter()
+ for v in nbunch: # process all vertices in G
+ if v in explored:
+ continue
+ fringe = [v] # nodes yet to look at
+ while fringe:
+ w = fringe[-1] # depth first search
+ if w in explored: # already looked down this branch
+ fringe.pop()
+ continue
+ seen.add(w) # mark as seen
+ # Check successors for cycles and for new nodes
+ new_nodes = []
+ for n in G[w]:
+ if n not in explored:
+ if n in seen: #CYCLE !!
+ raise nx.NetworkXUnfeasible("Graph contains a cycle.")
+ new_nodes.append(n)
+ if new_nodes: # Add new_nodes to fringe
+ fringe.extend(new_nodes)
+ else: # No new nodes so w is fully explored
+ explored.add(w)
+ order.append(w)
+ fringe.pop() # done considering this node
+ return list(reversed(order))
+
+def topological_sort_recursive(G,nbunch=None):
+ """Return a list of nodes in topological sort order.
+
+ A topological sort is a nonunique permutation of the nodes such
+ that an edge from u to v implies that u appears before v in the
+ topological sort order.
+
+ Parameters
+ ----------
+ G : NetworkX digraph
+
+ nbunch : container of nodes (optional)
+ Explore graph in specified order given in nbunch
+
+ Raises
+ ------
+ NetworkXError
+ Topological sort is defined for directed graphs only. If the
+ graph G is undirected, a NetworkXError is raised.
+
+ NetworkXUnfeasible
+ If G is not a directed acyclic graph (DAG) no topological sort
+ exists and a NetworkXUnfeasible exception is raised.
+
+ Notes
+ -----
+ This is a recursive version of topological sort.
+
+ See also
+ --------
+ topological_sort
+ is_directed_acyclic_graph
+
+ """
+ if not G.is_directed():
+ raise nx.NetworkXError(
+ "Topological sort not defined on undirected graphs.")
+
+ def _dfs(v):
+ ancestors.add(v)
+
+ for w in G[v]:
+ if w in ancestors:
+ raise nx.NetworkXUnfeasible("Graph contains a cycle.")
+
+ if w not in explored:
+ _dfs(w)
+
+ ancestors.remove(v)
+ explored.add(v)
+ order.append(v)
+
+ ancestors = set()
+ explored = set()
+ order = []
+
+ if nbunch is None:
+ nbunch = G.nodes_iter()
+
+ for v in nbunch:
+ if v not in explored:
+ _dfs(v)
+
+ return list(reversed(order))
+
+def is_aperiodic(G):
+ """Return True if G is aperiodic.
+
+ A directed graph is aperiodic if there is no integer k > 1 that
+ divides the length of every cycle in the graph.
+
+ Parameters
+ ----------
+ G : NetworkX DiGraph
+ Graph
+
+ Returns
+ -------
+ aperiodic : boolean
+ True if the graph is aperiodic False otherwise
+
+ Raises
+ ------
+ NetworkXError
+ If G is not directed
+
+ Notes
+ -----
+ This uses the method outlined in [1]_, which runs in O(m) time
+ given m edges in G. Note that a graph is not aperiodic if it is
+ acyclic as every integer trivial divides length 0 cycles.
+
+ References
+ ----------
+ .. [1] Jarvis, J. P.; Shier, D. R. (1996),
+ Graph-theoretic analysis of finite Markov chains,
+ in Shier, D. R.; Wallenius, K. T., Applied Mathematical Modeling:
+ A Multidisciplinary Approach, CRC Press.
+ """
+ if not G.is_directed():
+ raise nx.NetworkXError("is_aperiodic not defined for undirected graphs")
+
+ s = next(G.nodes_iter())
+ levels = {s:0}
+ this_level = [s]
+ g = 0
+ l = 1
+ while this_level:
+ next_level = []
+ for u in this_level:
+ for v in G[u]:
+ if v in levels: # Non-Tree Edge
+ g = gcd(g, levels[u]-levels[v] + 1)
+ else: # Tree Edge
+ next_level.append(v)
+ levels[v] = l
+ this_level = next_level
+ l += 1
+ if len(levels)==len(G): #All nodes in tree
+ return g==1
+ else:
+ return g==1 and nx.is_aperiodic(G.subgraph(set(G)-set(levels)))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/distance_measures.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/distance_measures.py
new file mode 100644
index 0000000..33df686
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/distance_measures.py
@@ -0,0 +1,170 @@
+# -*- coding: utf-8 -*-
+"""
+Graph diameter, radius, eccentricity and other properties.
+"""
+__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Dan Schult(dschult@colgate.edu)'])
+# Copyright (C) 2004-2010 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+__all__ = ['eccentricity', 'diameter', 'radius', 'periphery', 'center']
+
+import networkx
+
+def eccentricity(G, v=None, sp=None):
+ """Return the eccentricity of nodes in G.
+
+ The eccentricity of a node v is the maximum distance from v to
+ all other nodes in G.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ A graph
+
+ v : node, optional
+ Return value of specified node
+
+ sp : dict of dicts, optional
+ All pairs shortest path lengths as a dictionary of dictionaries
+
+ Returns
+ -------
+ ecc : dictionary
+ A dictionary of eccentricity values keyed by node.
+ """
+# nodes=
+# nodes=[]
+# if v is None: # none, use entire graph
+# nodes=G.nodes()
+# elif v in G: # is v a single node
+# nodes=[v]
+# else: # assume v is a container of nodes
+# nodes=v
+ order=G.order()
+
+ e={}
+ for n in G.nbunch_iter(v):
+ if sp is None:
+ length=networkx.single_source_shortest_path_length(G,n)
+ L = len(length)
+ else:
+ try:
+ length=sp[n]
+ L = len(length)
+ except TypeError:
+ raise networkx.NetworkXError('Format of "sp" is invalid.')
+ if L != order:
+ msg = "Graph not connected: infinite path length"
+ raise networkx.NetworkXError(msg)
+
+ e[n]=max(length.values())
+
+ if v in G:
+ return e[v] # return single value
+ else:
+ return e
+
+
+def diameter(G, e=None):
+ """Return the diameter of the graph G.
+
+ The diameter is the maximum eccentricity.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ A graph
+
+ e : eccentricity dictionary, optional
+ A precomputed dictionary of eccentricities.
+
+ Returns
+ -------
+ d : integer
+ Diameter of graph
+
+ See Also
+ --------
+ eccentricity
+ """
+ if e is None:
+ e=eccentricity(G)
+ return max(e.values())
+
+def periphery(G, e=None):
+ """Return the periphery of the graph G.
+
+ The periphery is the set of nodes with eccentricity equal to the diameter.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ A graph
+
+ e : eccentricity dictionary, optional
+ A precomputed dictionary of eccentricities.
+
+ Returns
+ -------
+ p : list
+ List of nodes in periphery
+ """
+ if e is None:
+ e=eccentricity(G)
+ diameter=max(e.values())
+ p=[v for v in e if e[v]==diameter]
+ return p
+
+
+def radius(G, e=None):
+ """Return the radius of the graph G.
+
+ The radius is the minimum eccentricity.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ A graph
+
+ e : eccentricity dictionary, optional
+ A precomputed dictionary of eccentricities.
+
+ Returns
+ -------
+ r : integer
+ Radius of graph
+ """
+ if e is None:
+ e=eccentricity(G)
+ return min(e.values())
+
+def center(G, e=None):
+ """Return the center of the graph G.
+
+ The center is the set of nodes with eccentricity equal to radius.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ A graph
+
+ e : eccentricity dictionary, optional
+ A precomputed dictionary of eccentricities.
+
+ Returns
+ -------
+ c : list
+ List of nodes in center
+ """
+ if e is None:
+ e=eccentricity(G)
+ # order the nodes by path length
+ radius=min(e.values())
+ p=[v for v in e if e[v]==radius]
+ return p
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/distance_regular.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/distance_regular.py
new file mode 100644
index 0000000..3fbcdbc
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/distance_regular.py
@@ -0,0 +1,179 @@
+"""
+=======================
+Distance-regular graphs
+=======================
+"""
+# Copyright (C) 2011 by
+# Dheeraj M R <dheerajrav@gmail.com>
+# Aric Hagberg <aric.hagberg@gmail.com>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+__author__ = """\n""".join(['Dheeraj M R <dheerajrav@gmail.com>',
+ 'Aric Hagberg <aric.hagberg@gmail.com>'])
+
+__all__ = ['is_distance_regular','intersection_array','global_parameters']
+
+def is_distance_regular(G):
+ """Returns True if the graph is distance regular, False otherwise.
+
+ A connected graph G is distance-regular if for any nodes x,y
+ and any integers i,j=0,1,...,d (where d is the graph
+ diameter), the number of vertices at distance i from x and
+ distance j from y depends only on i,j and the graph distance
+ between x and y, independently of the choice of x and y.
+
+ Parameters
+ ----------
+ G: Networkx graph (undirected)
+
+ Returns
+ -------
+ bool
+ True if the graph is Distance Regular, False otherwise
+
+ Examples
+ --------
+ >>> G=nx.hypercube_graph(6)
+ >>> nx.is_distance_regular(G)
+ True
+
+ See Also
+ --------
+ intersection_array, global_parameters
+
+ Notes
+ -----
+ For undirected and simple graphs only
+
+ References
+ ----------
+ .. [1] Brouwer, A. E.; Cohen, A. M.; and Neumaier, A.
+ Distance-Regular Graphs. New York: Springer-Verlag, 1989.
+ .. [2] Weisstein, Eric W. "Distance-Regular Graph."
+ http://mathworld.wolfram.com/Distance-RegularGraph.html
+
+ """
+ try:
+ a=intersection_array(G)
+ return True
+ except nx.NetworkXError:
+ return False
+
+def global_parameters(b,c):
+ """Return global parameters for a given intersection array.
+
+ Given a distance-regular graph G with integers b_i, c_i,i = 0,....,d
+ such that for any 2 vertices x,y in G at a distance i=d(x,y), there
+ are exactly c_i neighbors of y at a distance of i-1 from x and b_i
+ neighbors of y at a distance of i+1 from x.
+
+ Thus, a distance regular graph has the global parameters,
+ [[c_0,a_0,b_0],[c_1,a_1,b_1],......,[c_d,a_d,b_d]] for the
+ intersection array [b_0,b_1,.....b_{d-1};c_1,c_2,.....c_d]
+ where a_i+b_i+c_i=k , k= degree of every vertex.
+
+ Parameters
+ ----------
+ b,c: tuple of lists
+
+ Returns
+ -------
+ p : list of three-tuples
+
+ Examples
+ --------
+ >>> G=nx.dodecahedral_graph()
+ >>> b,c=nx.intersection_array(G)
+ >>> list(nx.global_parameters(b,c))
+ [(0, 0, 3), (1, 0, 2), (1, 1, 1), (1, 1, 1), (2, 0, 1), (3, 0, 0)]
+
+ References
+ ----------
+ .. [1] Weisstein, Eric W. "Global Parameters."
+ From MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/GlobalParameters.html
+
+ See Also
+ --------
+ intersection_array
+ """
+ d=len(b)
+ ba=b[:]
+ ca=c[:]
+ ba.append(0)
+ ca.insert(0,0)
+ k = ba[0]
+ aa = [k-x-y for x,y in zip(ba,ca)]
+ return zip(*[ca,aa,ba])
+
+
+def intersection_array(G):
+ """Returns the intersection array of a distance-regular graph.
+
+ Given a distance-regular graph G with integers b_i, c_i,i = 0,....,d
+ such that for any 2 vertices x,y in G at a distance i=d(x,y), there
+ are exactly c_i neighbors of y at a distance of i-1 from x and b_i
+ neighbors of y at a distance of i+1 from x.
+
+ A distance regular graph'sintersection array is given by,
+ [b_0,b_1,.....b_{d-1};c_1,c_2,.....c_d]
+
+ Parameters
+ ----------
+ G: Networkx graph (undirected)
+
+ Returns
+ -------
+ b,c: tuple of lists
+
+ Examples
+ --------
+ >>> G=nx.icosahedral_graph()
+ >>> nx.intersection_array(G)
+ ([5, 2, 1], [1, 2, 5])
+
+ References
+ ----------
+ .. [1] Weisstein, Eric W. "Intersection Array."
+ From MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/IntersectionArray.html
+
+
+ See Also
+ --------
+ global_parameters
+ """
+ if G.is_multigraph() or G.is_directed():
+ raise nx.NetworkxException('Not implemented for directed ',
+ 'or multiedge graphs.')
+ # test for regular graph (all degrees must be equal)
+ degree = G.degree_iter()
+ (_,k) = next(degree)
+ for _,knext in degree:
+ if knext != k:
+ raise nx.NetworkXError('Graph is not distance regular.')
+ k = knext
+ path_length = nx.all_pairs_shortest_path_length(G)
+ diameter = max([max(path_length[n].values()) for n in path_length])
+ bint = {} # 'b' intersection array
+ cint = {} # 'c' intersection array
+ for u in G:
+ for v in G:
+ try:
+ i = path_length[u][v]
+ except KeyError: # graph must be connected
+ raise nx.NetworkXError('Graph is not distance regular.')
+ # number of neighbors of v at a distance of i-1 from u
+ c = len([n for n in G[v] if path_length[n][u]==i-1])
+ # number of neighbors of v at a distance of i+1 from u
+ b = len([n for n in G[v] if path_length[n][u]==i+1])
+ # b,c are independent of u and v
+ if cint.get(i,c) != c or bint.get(i,b) != b:
+ raise nx.NetworkXError('Graph is not distance regular')
+ bint[i] = b
+ cint[i] = c
+ return ([bint.get(i,0) for i in range(diameter)],
+ [cint.get(i+1,0) for i in range(diameter)])
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/euler.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/euler.py
new file mode 100644
index 0000000..4e834c7
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/euler.py
@@ -0,0 +1,135 @@
+# -*- coding: utf-8 -*-
+"""
+Eulerian circuits and graphs.
+"""
+import networkx as nx
+
+__author__ = """\n""".join(['Nima Mohammadi (nima.irt[AT]gmail.com)',
+ 'Aric Hagberg <hagberg@lanl.gov>'])
+# Copyright (C) 2010 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+__all__ = ['is_eulerian', 'eulerian_circuit']
+
+def is_eulerian(G):
+ """Return True if G is an Eulerian graph, False otherwise.
+
+ An Eulerian graph is a graph with an Eulerian circuit.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX Graph
+
+ Examples
+ --------
+ >>> nx.is_eulerian(nx.DiGraph({0:[3], 1:[2], 2:[3], 3:[0, 1]}))
+ True
+ >>> nx.is_eulerian(nx.complete_graph(5))
+ True
+ >>> nx.is_eulerian(nx.petersen_graph())
+ False
+
+ Notes
+ -----
+ This implementation requires the graph to be connected
+ (or strongly connected for directed graphs).
+ """
+ if G.is_directed():
+ # Every node must have equal in degree and out degree
+ for n in G.nodes_iter():
+ if G.in_degree(n) != G.out_degree(n):
+ return False
+ # Must be strongly connected
+ if not nx.is_strongly_connected(G):
+ return False
+ else:
+ # An undirected Eulerian graph has no vertices of odd degrees
+ for v,d in G.degree_iter():
+ if d % 2 != 0:
+ return False
+ # Must be connected
+ if not nx.is_connected(G):
+ return False
+ return True
+
+
+def eulerian_circuit(G, source=None):
+ """Return the edges of an Eulerian circuit in G.
+
+ An Eulerian circuit is a path that crosses every edge in G exactly once
+ and finishes at the starting node.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX Graph
+ source : node, optional
+ Starting node for circuit.
+
+ Returns
+ -------
+ edges : generator
+ A generator that produces edges in the Eulerian circuit.
+
+ Raises
+ ------
+ NetworkXError
+ If the graph is not Eulerian.
+
+ See Also
+ --------
+ is_eulerian
+
+ Notes
+ -----
+ Uses Fleury's algorithm [1]_,[2]_
+
+ References
+ ----------
+ .. [1] Fleury, "Deux problemes de geometrie de situation",
+ Journal de mathematiques elementaires (1883), 257-261.
+ .. [2] http://en.wikipedia.org/wiki/Eulerian_path
+
+ Examples
+ --------
+ >>> G=nx.complete_graph(3)
+ >>> list(nx.eulerian_circuit(G))
+ [(0, 1), (1, 2), (2, 0)]
+ >>> list(nx.eulerian_circuit(G,source=1))
+ [(1, 0), (0, 2), (2, 1)]
+ >>> [u for u,v in nx.eulerian_circuit(G)] # nodes in circuit
+ [0, 1, 2]
+ """
+ if not is_eulerian(G):
+ raise nx.NetworkXError("G is not Eulerian.")
+
+ g = G.__class__(G) # copy graph structure (not attributes)
+
+ # set starting node
+ if source is None:
+ v = next(g.nodes_iter())
+ else:
+ v = source
+
+ while g.size() > 0:
+ n = v
+ # sort nbrs here to provide stable ordering of alternate cycles
+ nbrs = sorted([v for u,v in g.edges(n)])
+ for v in nbrs:
+ g.remove_edge(n,v)
+ bridge = not nx.is_connected(g.to_undirected())
+ if bridge:
+ g.add_edge(n,v) # add this edge back and try another
+ else:
+ break # this edge is good, break the for loop
+ if bridge:
+ g.remove_edge(n,v)
+ g.remove_node(n)
+ yield (n,v)
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/__init__.py
new file mode 100644
index 0000000..438ab5f
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/__init__.py
@@ -0,0 +1,3 @@
+from networkx.algorithms.flow.maxflow import *
+from networkx.algorithms.flow.mincost import *
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/maxflow.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/maxflow.py
new file mode 100644
index 0000000..ee71528
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/maxflow.py
@@ -0,0 +1,477 @@
+# -*- coding: utf-8 -*-
+"""
+Maximum flow (and minimum cut) algorithms on capacitated graphs.
+"""
+
+__author__ = """Loïc Séguin-C. <loicseguin@gmail.com>"""
+# Copyright (C) 2010 Loïc Séguin-C. <loicseguin@gmail.com>
+# All rights reserved.
+# BSD license.
+
+import networkx as nx
+
+__all__ = ['ford_fulkerson',
+ 'ford_fulkerson_flow',
+ 'ford_fulkerson_flow_and_auxiliary',
+ 'max_flow',
+ 'min_cut']
+
+def ford_fulkerson_flow_and_auxiliary(G, s, t, capacity='capacity'):
+ """Find a maximum single-commodity flow using the Ford-Fulkerson
+ algorithm.
+
+ This function returns both the value of the maximum flow and the
+ auxiliary network resulting after finding the maximum flow, which
+ is also named residual network in the literature. The
+ auxiliary network has edges with capacity equal to the capacity
+ of the edge in the original network minus the flow that went
+ throught that edge. Notice that it can happen that a flow
+ from v to u is allowed in the auxiliary network, though disallowed
+ in the original network. A dictionary with infinite capacity edges
+ can be found as an attribute of the auxiliary network.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Edges of the graph are expected to have an attribute called
+ 'capacity'. If this attribute is not present, the edge is
+ considered to have infinite capacity.
+
+ s : node
+ Source node for the flow.
+
+ t : node
+ Sink node for the flow.
+
+ capacity: string
+ Edges of the graph G are expected to have an attribute capacity
+ that indicates how much flow the edge can support. If this
+ attribute is not present, the edge is considered to have
+ infinite capacity. Default value: 'capacity'.
+
+ Returns
+ -------
+ flow_value : integer, float
+ Value of the maximum flow, i.e., net outflow from the source.
+
+ auxiliary : DiGraph
+ Residual/auxiliary network after finding the maximum flow.
+ A dictionary with infinite capacity edges can be found as
+ an attribute of this network: auxiliary.graph['inf_capacity_flows']
+
+ Raises
+ ------
+ NetworkXError
+ The algorithm does not support MultiGraph and MultiDiGraph. If
+ the input graph is an instance of one of these two classes, a
+ NetworkXError is raised.
+
+ NetworkXUnbounded
+ If the graph has a path of infinite capacity, the value of a
+ feasible flow on the graph is unbounded above and the function
+ raises a NetworkXUnbounded.
+
+ Notes
+ -----
+ This algorithm uses Edmonds-Karp-Dinitz path selection rule which
+ guarantees a running time of `O(nm^2)` for `n` nodes and `m` edges.
+
+ Examples
+ --------
+ >>> import networkx as nx
+ >>> G = nx.DiGraph()
+ >>> G.add_edge('x','a', capacity=3.0)
+ >>> G.add_edge('x','b', capacity=1.0)
+ >>> G.add_edge('a','c', capacity=3.0)
+ >>> G.add_edge('b','c', capacity=5.0)
+ >>> G.add_edge('b','d', capacity=4.0)
+ >>> G.add_edge('d','e', capacity=2.0)
+ >>> G.add_edge('c','y', capacity=2.0)
+ >>> G.add_edge('e','y', capacity=3.0)
+ >>> flow, auxiliary = nx.ford_fulkerson_flow_and_auxiliary(G, 'x', 'y')
+ >>> flow
+ 3.0
+ >>> # A dictionary with infinite capacity flows can be found as an
+ >>> # attribute of the auxiliary network
+ >>> inf_capacity_flows = auxiliary.graph['inf_capacity_flows']
+
+ """
+ if G.is_multigraph():
+ raise nx.NetworkXError(
+ 'MultiGraph and MultiDiGraph not supported (yet).')
+
+ if s not in G:
+ raise nx.NetworkXError('node %s not in graph' % str(s))
+ if t not in G:
+ raise nx.NetworkXError('node %s not in graph' % str(t))
+
+ auxiliary = _create_auxiliary_digraph(G, capacity=capacity)
+ inf_capacity_flows = auxiliary.graph['inf_capacity_flows']
+
+ flow_value = 0 # Initial feasible flow.
+
+ # As long as there is an (s, t)-path in the auxiliary digraph, find
+ # the shortest (with respect to the number of arcs) such path and
+ # augment the flow on this path.
+ while True:
+ try:
+ path_nodes = nx.bidirectional_shortest_path(auxiliary, s, t)
+ except nx.NetworkXNoPath:
+ break
+
+ # Get the list of edges in the shortest path.
+ path_edges = list(zip(path_nodes[:-1], path_nodes[1:]))
+
+ # Find the minimum capacity of an edge in the path.
+ try:
+ path_capacity = min([auxiliary[u][v][capacity]
+ for u, v in path_edges
+ if capacity in auxiliary[u][v]])
+ except ValueError:
+ # path of infinite capacity implies no max flow
+ raise nx.NetworkXUnbounded(
+ "Infinite capacity path, flow unbounded above.")
+
+ flow_value += path_capacity
+
+ # Augment the flow along the path.
+ for u, v in path_edges:
+ edge_attr = auxiliary[u][v]
+ if capacity in edge_attr:
+ edge_attr[capacity] -= path_capacity
+ if edge_attr[capacity] == 0:
+ auxiliary.remove_edge(u, v)
+ else:
+ inf_capacity_flows[(u, v)] += path_capacity
+
+ if auxiliary.has_edge(v, u):
+ if capacity in auxiliary[v][u]:
+ auxiliary[v][u][capacity] += path_capacity
+ else:
+ auxiliary.add_edge(v, u, {capacity: path_capacity})
+
+ auxiliary.graph['inf_capacity_flows'] = inf_capacity_flows
+ return flow_value, auxiliary
+
+def _create_auxiliary_digraph(G, capacity='capacity'):
+ """Initialize an auxiliary digraph and dict of infinite capacity
+ edges for a given graph G.
+ Ignore edges with capacity <= 0.
+ """
+ auxiliary = nx.DiGraph()
+ auxiliary.add_nodes_from(G)
+ inf_capacity_flows = {}
+ if nx.is_directed(G):
+ for edge in G.edges(data = True):
+ if capacity in edge[2]:
+ if edge[2][capacity] > 0:
+ auxiliary.add_edge(*edge)
+ else:
+ auxiliary.add_edge(*edge)
+ inf_capacity_flows[(edge[0], edge[1])] = 0
+ else:
+ for edge in G.edges(data = True):
+ if capacity in edge[2]:
+ if edge[2][capacity] > 0:
+ auxiliary.add_edge(*edge)
+ auxiliary.add_edge(edge[1], edge[0], edge[2])
+ else:
+ auxiliary.add_edge(*edge)
+ auxiliary.add_edge(edge[1], edge[0], edge[2])
+ inf_capacity_flows[(edge[0], edge[1])] = 0
+ inf_capacity_flows[(edge[1], edge[0])] = 0
+
+ auxiliary.graph['inf_capacity_flows'] = inf_capacity_flows
+ return auxiliary
+
+
+def _create_flow_dict(G, H, capacity='capacity'):
+ """Creates the flow dict of dicts on G corresponding to the
+ auxiliary digraph H and infinite capacity edges flows
+ inf_capacity_flows.
+ """
+ inf_capacity_flows = H.graph['inf_capacity_flows']
+ flow = dict([(u, {}) for u in G])
+
+ if G.is_directed():
+ for u, v in G.edges_iter():
+ if H.has_edge(u, v):
+ if capacity in G[u][v]:
+ flow[u][v] = max(0, G[u][v][capacity] - H[u][v][capacity])
+ elif G.has_edge(v, u) and not capacity in G[v][u]:
+ flow[u][v] = max(0, inf_capacity_flows[(u, v)] -
+ inf_capacity_flows[(v, u)])
+ else:
+ flow[u][v] = max(0, H[v].get(u, {}).get(capacity, 0) -
+ G[v].get(u, {}).get(capacity, 0))
+ else:
+ flow[u][v] = G[u][v][capacity]
+
+ else: # undirected
+ for u, v in G.edges_iter():
+ if H.has_edge(u, v):
+ if capacity in G[u][v]:
+ flow[u][v] = abs(G[u][v][capacity] - H[u][v][capacity])
+ else:
+ flow[u][v] = abs(inf_capacity_flows[(u, v)] -
+ inf_capacity_flows[(v, u)])
+ else:
+ flow[u][v] = G[u][v][capacity]
+ flow[v][u] = flow[u][v]
+
+ return flow
+
+def ford_fulkerson(G, s, t, capacity='capacity'):
+ """Find a maximum single-commodity flow using the Ford-Fulkerson
+ algorithm.
+
+ This algorithm uses Edmonds-Karp-Dinitz path selection rule which
+ guarantees a running time of `O(nm^2)` for `n` nodes and `m` edges.
+
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Edges of the graph are expected to have an attribute called
+ 'capacity'. If this attribute is not present, the edge is
+ considered to have infinite capacity.
+
+ s : node
+ Source node for the flow.
+
+ t : node
+ Sink node for the flow.
+
+ capacity: string
+ Edges of the graph G are expected to have an attribute capacity
+ that indicates how much flow the edge can support. If this
+ attribute is not present, the edge is considered to have
+ infinite capacity. Default value: 'capacity'.
+
+ Returns
+ -------
+ flow_value : integer, float
+ Value of the maximum flow, i.e., net outflow from the source.
+
+ flow_dict : dictionary
+ Dictionary of dictionaries keyed by nodes such that
+ flow_dict[u][v] is the flow edge (u, v).
+
+ Raises
+ ------
+ NetworkXError
+ The algorithm does not support MultiGraph and MultiDiGraph. If
+ the input graph is an instance of one of these two classes, a
+ NetworkXError is raised.
+
+ NetworkXUnbounded
+ If the graph has a path of infinite capacity, the value of a
+ feasible flow on the graph is unbounded above and the function
+ raises a NetworkXUnbounded.
+
+ Examples
+ --------
+ >>> import networkx as nx
+ >>> G = nx.DiGraph()
+ >>> G.add_edge('x','a', capacity=3.0)
+ >>> G.add_edge('x','b', capacity=1.0)
+ >>> G.add_edge('a','c', capacity=3.0)
+ >>> G.add_edge('b','c', capacity=5.0)
+ >>> G.add_edge('b','d', capacity=4.0)
+ >>> G.add_edge('d','e', capacity=2.0)
+ >>> G.add_edge('c','y', capacity=2.0)
+ >>> G.add_edge('e','y', capacity=3.0)
+ >>> flow, F = nx.ford_fulkerson(G, 'x', 'y')
+ >>> flow
+ 3.0
+ """
+ flow_value, auxiliary = ford_fulkerson_flow_and_auxiliary(G,
+ s, t, capacity=capacity)
+ flow_dict = _create_flow_dict(G, auxiliary, capacity=capacity)
+ return flow_value, flow_dict
+
+def ford_fulkerson_flow(G, s, t, capacity='capacity'):
+ """Return a maximum flow for a single-commodity flow problem.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Edges of the graph are expected to have an attribute called
+ 'capacity'. If this attribute is not present, the edge is
+ considered to have infinite capacity.
+
+ s : node
+ Source node for the flow.
+
+ t : node
+ Sink node for the flow.
+
+ capacity: string
+ Edges of the graph G are expected to have an attribute capacity
+ that indicates how much flow the edge can support. If this
+ attribute is not present, the edge is considered to have
+ infinite capacity. Default value: 'capacity'.
+
+ Returns
+ -------
+ flow_dict : dictionary
+ Dictionary of dictionaries keyed by nodes such that
+ flow_dict[u][v] is the flow edge (u, v).
+
+ Raises
+ ------
+ NetworkXError
+ The algorithm does not support MultiGraph and MultiDiGraph. If
+ the input graph is an instance of one of these two classes, a
+ NetworkXError is raised.
+
+ NetworkXUnbounded
+ If the graph has a path of infinite capacity, the value of a
+ feasible flow on the graph is unbounded above and the function
+ raises a NetworkXUnbounded.
+
+ Examples
+ --------
+ >>> import networkx as nx
+ >>> G = nx.DiGraph()
+ >>> G.add_edge('x','a', capacity=3.0)
+ >>> G.add_edge('x','b', capacity=1.0)
+ >>> G.add_edge('a','c', capacity=3.0)
+ >>> G.add_edge('b','c', capacity=5.0)
+ >>> G.add_edge('b','d', capacity=4.0)
+ >>> G.add_edge('d','e', capacity=2.0)
+ >>> G.add_edge('c','y', capacity=2.0)
+ >>> G.add_edge('e','y', capacity=3.0)
+ >>> F = nx.ford_fulkerson_flow(G, 'x', 'y')
+ >>> for u, v in sorted(G.edges_iter()):
+ ... print('(%s, %s) %.2f' % (u, v, F[u][v]))
+ ...
+ (a, c) 2.00
+ (b, c) 0.00
+ (b, d) 1.00
+ (c, y) 2.00
+ (d, e) 1.00
+ (e, y) 1.00
+ (x, a) 2.00
+ (x, b) 1.00
+ """
+ flow_value, auxiliary = ford_fulkerson_flow_and_auxiliary(G,
+ s, t, capacity=capacity)
+ return _create_flow_dict(G, auxiliary, capacity=capacity)
+
+def max_flow(G, s, t, capacity='capacity'):
+ """Find the value of a maximum single-commodity flow.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Edges of the graph are expected to have an attribute called
+ 'capacity'. If this attribute is not present, the edge is
+ considered to have infinite capacity.
+
+ s : node
+ Source node for the flow.
+
+ t : node
+ Sink node for the flow.
+
+ capacity: string
+ Edges of the graph G are expected to have an attribute capacity
+ that indicates how much flow the edge can support. If this
+ attribute is not present, the edge is considered to have
+ infinite capacity. Default value: 'capacity'.
+
+ Returns
+ -------
+ flow_value : integer, float
+ Value of the maximum flow, i.e., net outflow from the source.
+
+ Raises
+ ------
+ NetworkXError
+ The algorithm does not support MultiGraph and MultiDiGraph. If
+ the input graph is an instance of one of these two classes, a
+ NetworkXError is raised.
+
+ NetworkXUnbounded
+ If the graph has a path of infinite capacity, the value of a
+ feasible flow on the graph is unbounded above and the function
+ raises a NetworkXUnbounded.
+
+ Examples
+ --------
+ >>> import networkx as nx
+ >>> G = nx.DiGraph()
+ >>> G.add_edge('x','a', capacity=3.0)
+ >>> G.add_edge('x','b', capacity=1.0)
+ >>> G.add_edge('a','c', capacity=3.0)
+ >>> G.add_edge('b','c', capacity=5.0)
+ >>> G.add_edge('b','d', capacity=4.0)
+ >>> G.add_edge('d','e', capacity=2.0)
+ >>> G.add_edge('c','y', capacity=2.0)
+ >>> G.add_edge('e','y', capacity=3.0)
+ >>> flow = nx.max_flow(G, 'x', 'y')
+ >>> flow
+ 3.0
+ """
+ return ford_fulkerson_flow_and_auxiliary(G, s, t, capacity=capacity)[0]
+
+
+def min_cut(G, s, t, capacity='capacity'):
+ """Compute the value of a minimum (s, t)-cut.
+
+ Use the max-flow min-cut theorem, i.e., the capacity of a minimum
+ capacity cut is equal to the flow value of a maximum flow.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Edges of the graph are expected to have an attribute called
+ 'capacity'. If this attribute is not present, the edge is
+ considered to have infinite capacity.
+
+ s : node
+ Source node for the flow.
+
+ t : node
+ Sink node for the flow.
+
+ capacity: string
+ Edges of the graph G are expected to have an attribute capacity
+ that indicates how much flow the edge can support. If this
+ attribute is not present, the edge is considered to have
+ infinite capacity. Default value: 'capacity'.
+
+ Returns
+ -------
+ cutValue : integer, float
+ Value of the minimum cut.
+
+ Raises
+ ------
+ NetworkXUnbounded
+ If the graph has a path of infinite capacity, all cuts have
+ infinite capacity and the function raises a NetworkXError.
+
+ Examples
+ --------
+ >>> import networkx as nx
+ >>> G = nx.DiGraph()
+ >>> G.add_edge('x','a', capacity = 3.0)
+ >>> G.add_edge('x','b', capacity = 1.0)
+ >>> G.add_edge('a','c', capacity = 3.0)
+ >>> G.add_edge('b','c', capacity = 5.0)
+ >>> G.add_edge('b','d', capacity = 4.0)
+ >>> G.add_edge('d','e', capacity = 2.0)
+ >>> G.add_edge('c','y', capacity = 2.0)
+ >>> G.add_edge('e','y', capacity = 3.0)
+ >>> nx.min_cut(G, 'x', 'y')
+ 3.0
+ """
+
+ try:
+ return ford_fulkerson_flow_and_auxiliary(G, s, t, capacity=capacity)[0]
+ except nx.NetworkXUnbounded:
+ raise nx.NetworkXUnbounded(
+ "Infinite capacity path, no minimum cut.")
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/mincost.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/mincost.py
new file mode 100644
index 0000000..9e5f574
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/mincost.py
@@ -0,0 +1,802 @@
+# -*- coding: utf-8 -*-
+"""
+Minimum cost flow algorithms on directed connected graphs.
+"""
+
+__author__ = """Loïc Séguin-C. <loicseguin@gmail.com>"""
+# Copyright (C) 2010 Loïc Séguin-C. <loicseguin@gmail.com>
+# All rights reserved.
+# BSD license.
+
+
+__all__ = ['network_simplex',
+ 'min_cost_flow_cost',
+ 'min_cost_flow',
+ 'cost_of_flow',
+ 'max_flow_min_cost']
+
+import networkx as nx
+from networkx.utils import generate_unique_node
+
+def _initial_tree_solution(G, demand = 'demand', capacity = 'capacity',
+ weight = 'weight'):
+ """Find a initial tree solution rooted at r.
+
+ The initial tree solution is obtained by considering edges (r, v)
+ for all nodes v with non-negative demand and (v, r) for all nodes
+ with negative demand. If these edges do not exist, we add them to
+ the graph and call them artificial edges.
+ """
+ H = nx.DiGraph((edge for edge in G.edges(data=True) if
+ edge[2].get(capacity, 1) > 0))
+ demand_nodes = (node for node in G.nodes_iter(data=True) if
+ node[1].get(demand, 0) != 0)
+ H.add_nodes_from(demand_nodes)
+ r = H.nodes()[0]
+
+ T = nx.DiGraph()
+ y = {r: 0}
+ artificialEdges = []
+ flowCost = 0
+
+ n = H.number_of_nodes()
+ try:
+ maxWeight = max(abs(d[weight]) for u, v, d in H.edges(data = True)
+ if weight in d)
+ except ValueError:
+ maxWeight = 0
+ hugeWeight = 1 + n * maxWeight
+
+ for v, d in H.nodes(data = True)[1:]:
+ vDemand = d.get(demand, 0)
+ if vDemand >= 0:
+ if not (r, v) in H.edges():
+ H.add_edge(r, v, {weight: hugeWeight, 'flow': vDemand})
+ artificialEdges.append((r, v))
+ y[v] = H[r][v].get(weight, 0)
+ T.add_edge(r, v)
+ flowCost += vDemand * H[r][v].get(weight, 0)
+
+ else: # (r, v) in H.edges()
+ if (not capacity in H[r][v]
+ or vDemand <= H[r][v][capacity]):
+ H[r][v]['flow'] = vDemand
+ y[v] = H[r][v].get(weight, 0)
+ T.add_edge(r, v)
+ flowCost += vDemand * H[r][v].get(weight, 0)
+
+ else: # existing edge does not have enough capacity
+ newLabel = generate_unique_node()
+ H.add_edge(r, newLabel, {weight: hugeWeight, 'flow': vDemand})
+ H.add_edge(newLabel, v, {weight: hugeWeight, 'flow': vDemand})
+ artificialEdges.append((r, newLabel))
+ artificialEdges.append((newLabel, v))
+ y[v] = 2 * hugeWeight
+ y[newLabel] = hugeWeight
+ T.add_edge(r, newLabel)
+ T.add_edge(newLabel, v)
+ flowCost += 2 * vDemand * hugeWeight
+
+ else: # vDemand < 0
+ if not (v, r) in H.edges():
+ H.add_edge(v, r, {weight: hugeWeight, 'flow': -vDemand})
+ artificialEdges.append((v, r))
+ y[v] = -H[v][r].get(weight, 0)
+ T.add_edge(v, r)
+ flowCost += -vDemand * H[v][r].get(weight, 0)
+
+ else:
+ if (not capacity in H[v][r]
+ or -vDemand <= H[v][r][capacity]):
+ H[v][r]['flow'] = -vDemand
+ y[v] = -H[v][r].get(weight, 0)
+ T.add_edge(v, r)
+ flowCost += -vDemand * H[v][r].get(weight, 0)
+ else: # existing edge does not have enough capacity
+ newLabel = generate_unique_node()
+ H.add_edge(v, newLabel,
+ {weight: hugeWeight, 'flow': -vDemand})
+ H.add_edge(newLabel, r,
+ {weight: hugeWeight, 'flow': -vDemand})
+ artificialEdges.append((v, newLabel))
+ artificialEdges.append((newLabel, r))
+ y[v] = -2 * hugeWeight
+ y[newLabel] = -hugeWeight
+ T.add_edge(v, newLabel)
+ T.add_edge(newLabel, r)
+ flowCost += 2 * -vDemand * hugeWeight
+
+ return H, T, y, artificialEdges, flowCost, r
+
+
+def _find_entering_edge(H, c, capacity = 'capacity'):
+ """Find an edge which creates a negative cost cycle in the actual
+ tree solution.
+
+ The reduced cost of every edge gives the value of the cycle
+ obtained by adding that edge to the tree solution. If that value is
+ negative, we will augment the flow in the direction indicated by
+ the edge. Otherwise, we will augment the flow in the reverse
+ direction.
+
+ If no edge is found, return and empty tuple. This will cause the
+ main loop of the algorithm to terminate.
+ """
+ newEdge = ()
+ for u, v, d in H.edges_iter(data = True):
+ if d.get('flow', 0) == 0:
+ if c[(u, v)] < 0:
+ newEdge = (u, v)
+ break
+ else:
+ if capacity in d:
+ if (d.get('flow', 0) == d[capacity]
+ and c[(u, v)] > 0):
+ newEdge = (u, v)
+ break
+ return newEdge
+
+
+def _find_leaving_edge(H, T, cycle, newEdge, capacity = 'capacity',
+ reverse=False):
+ """Find an edge that will leave the basis and the value by which we
+ can increase or decrease the flow on that edge.
+
+ The leaving arc rule is used to prevent cycling.
+
+ If cycle has no reverse edge and no forward edge of finite
+ capacity, it means that cycle is a negative cost infinite capacity
+ cycle. This implies that the cost of a flow satisfying all demands
+ is unbounded below. An exception is raised in this case.
+ """
+ eps = False
+ leavingEdge = ()
+
+ # If cycle is a digon.
+ if len(cycle) == 3:
+ u, v = newEdge
+ if capacity not in H[u][v] and capacity not in H[v][u]:
+ raise nx.NetworkXUnbounded(
+ "Negative cost cycle of infinite capacity found. "
+ + "Min cost flow unbounded below.")
+
+ if reverse:
+ if H[u][v].get('flow', 0) > H[v][u].get('flow', 0):
+ return (v, u), H[v][u].get('flow', 0)
+ else:
+ return (u, v), H[u][v].get('flow', 0)
+ else:
+ uv_residual = H[u][v].get(capacity, 0) - H[u][v].get('flow', 0)
+ vu_residual = H[v][u].get(capacity, 0) - H[v][u].get('flow', 0)
+ if (uv_residual > vu_residual):
+ return (v, u), vu_residual
+ else:
+ return (u, v), uv_residual
+
+ # Find the forward edge with the minimum value for capacity - 'flow'
+ # and the reverse edge with the minimum value for 'flow'.
+ for index, u in enumerate(cycle[:-1]):
+ edgeCapacity = False
+ edge = ()
+ v = cycle[index + 1]
+ if (u, v) in T.edges() + [newEdge]: #forward edge
+ if capacity in H[u][v]: # edge (u, v) has finite capacity
+ edgeCapacity = H[u][v][capacity] - H[u][v].get('flow', 0)
+ edge = (u, v)
+ else: #reverse edge
+ edgeCapacity = H[v][u].get('flow', 0)
+ edge = (v, u)
+
+ # Determine if edge might be the leaving edge.
+ if edge:
+ if leavingEdge:
+ if edgeCapacity < eps:
+ eps = edgeCapacity
+ leavingEdge = edge
+ else:
+ eps = edgeCapacity
+ leavingEdge = edge
+
+ if not leavingEdge:
+ raise nx.NetworkXUnbounded(
+ "Negative cost cycle of infinite capacity found. "
+ + "Min cost flow unbounded below.")
+
+ return leavingEdge, eps
+
+
+def _create_flow_dict(G, H):
+ """Creates the flow dict of dicts of graph G with auxiliary graph H."""
+ flowDict = dict([(u, {}) for u in G])
+
+ for u in G.nodes_iter():
+ for v in G.neighbors(u):
+ if H.has_edge(u, v):
+ flowDict[u][v] = H[u][v].get('flow', 0)
+ else:
+ flowDict[u][v] = 0
+ return flowDict
+
+
+def network_simplex(G, demand = 'demand', capacity = 'capacity',
+ weight = 'weight'):
+ """Find a minimum cost flow satisfying all demands in digraph G.
+
+ This is a primal network simplex algorithm that uses the leaving
+ arc rule to prevent cycling.
+
+ G is a digraph with edge costs and capacities and in which nodes
+ have demand, i.e., they want to send or receive some amount of
+ flow. A negative demand means that the node wants to send flow, a
+ positive demand means that the node want to receive flow. A flow on
+ the digraph G satisfies all demand if the net flow into each node
+ is equal to the demand of that node.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ DiGraph on which a minimum cost flow satisfying all demands is
+ to be found.
+
+ demand: string
+ Nodes of the graph G are expected to have an attribute demand
+ that indicates how much flow a node wants to send (negative
+ demand) or receive (positive demand). Note that the sum of the
+ demands should be 0 otherwise the problem in not feasible. If
+ this attribute is not present, a node is considered to have 0
+ demand. Default value: 'demand'.
+
+ capacity: string
+ Edges of the graph G are expected to have an attribute capacity
+ that indicates how much flow the edge can support. If this
+ attribute is not present, the edge is considered to have
+ infinite capacity. Default value: 'capacity'.
+
+ weight: string
+ Edges of the graph G are expected to have an attribute weight
+ that indicates the cost incurred by sending one unit of flow on
+ that edge. If not present, the weight is considered to be 0.
+ Default value: 'weight'.
+
+ Returns
+ -------
+ flowCost: integer, float
+ Cost of a minimum cost flow satisfying all demands.
+
+ flowDict: dictionary
+ Dictionary of dictionaries keyed by nodes such that
+ flowDict[u][v] is the flow edge (u, v).
+
+ Raises
+ ------
+ NetworkXError
+ This exception is raised if the input graph is not directed,
+ not connected or is a multigraph.
+
+ NetworkXUnfeasible
+ This exception is raised in the following situations:
+ * The sum of the demands is not zero. Then, there is no
+ flow satisfying all demands.
+ * There is no flow satisfying all demand.
+
+ NetworkXUnbounded
+ This exception is raised if the digraph G has a cycle of
+ negative cost and infinite capacity. Then, the cost of a flow
+ satisfying all demands is unbounded below.
+
+ Notes
+ -----
+ This algorithm is not guaranteed to work if edge weights
+ are floating point numbers (overflows and roundoff errors can
+ cause problems).
+
+ See also
+ --------
+ cost_of_flow, max_flow_min_cost, min_cost_flow, min_cost_flow_cost
+
+ Examples
+ --------
+ A simple example of a min cost flow problem.
+
+ >>> import networkx as nx
+ >>> G = nx.DiGraph()
+ >>> G.add_node('a', demand = -5)
+ >>> G.add_node('d', demand = 5)
+ >>> G.add_edge('a', 'b', weight = 3, capacity = 4)
+ >>> G.add_edge('a', 'c', weight = 6, capacity = 10)
+ >>> G.add_edge('b', 'd', weight = 1, capacity = 9)
+ >>> G.add_edge('c', 'd', weight = 2, capacity = 5)
+ >>> flowCost, flowDict = nx.network_simplex(G)
+ >>> flowCost
+ 24
+ >>> flowDict # doctest: +SKIP
+ {'a': {'c': 1, 'b': 4}, 'c': {'d': 1}, 'b': {'d': 4}, 'd': {}}
+
+ The mincost flow algorithm can also be used to solve shortest path
+ problems. To find the shortest path between two nodes u and v,
+ give all edges an infinite capacity, give node u a demand of -1 and
+ node v a demand a 1. Then run the network simplex. The value of a
+ min cost flow will be the distance between u and v and edges
+ carrying positive flow will indicate the path.
+
+ >>> G=nx.DiGraph()
+ >>> G.add_weighted_edges_from([('s','u',10), ('s','x',5),
+ ... ('u','v',1), ('u','x',2),
+ ... ('v','y',1), ('x','u',3),
+ ... ('x','v',5), ('x','y',2),
+ ... ('y','s',7), ('y','v',6)])
+ >>> G.add_node('s', demand = -1)
+ >>> G.add_node('v', demand = 1)
+ >>> flowCost, flowDict = nx.network_simplex(G)
+ >>> flowCost == nx.shortest_path_length(G, 's', 'v', weight = 'weight')
+ True
+ >>> sorted([(u, v) for u in flowDict for v in flowDict[u] if flowDict[u][v] > 0])
+ [('s', 'x'), ('u', 'v'), ('x', 'u')]
+ >>> nx.shortest_path(G, 's', 'v', weight = 'weight')
+ ['s', 'x', 'u', 'v']
+
+ It is possible to change the name of the attributes used for the
+ algorithm.
+
+ >>> G = nx.DiGraph()
+ >>> G.add_node('p', spam = -4)
+ >>> G.add_node('q', spam = 2)
+ >>> G.add_node('a', spam = -2)
+ >>> G.add_node('d', spam = -1)
+ >>> G.add_node('t', spam = 2)
+ >>> G.add_node('w', spam = 3)
+ >>> G.add_edge('p', 'q', cost = 7, vacancies = 5)
+ >>> G.add_edge('p', 'a', cost = 1, vacancies = 4)
+ >>> G.add_edge('q', 'd', cost = 2, vacancies = 3)
+ >>> G.add_edge('t', 'q', cost = 1, vacancies = 2)
+ >>> G.add_edge('a', 't', cost = 2, vacancies = 4)
+ >>> G.add_edge('d', 'w', cost = 3, vacancies = 4)
+ >>> G.add_edge('t', 'w', cost = 4, vacancies = 1)
+ >>> flowCost, flowDict = nx.network_simplex(G, demand = 'spam',
+ ... capacity = 'vacancies',
+ ... weight = 'cost')
+ >>> flowCost
+ 37
+ >>> flowDict # doctest: +SKIP
+ {'a': {'t': 4}, 'd': {'w': 2}, 'q': {'d': 1}, 'p': {'q': 2, 'a': 2}, 't': {'q': 1, 'w': 1}, 'w': {}}
+
+ References
+ ----------
+ W. J. Cook, W. H. Cunningham, W. R. Pulleyblank and A. Schrijver.
+ Combinatorial Optimization. Wiley-Interscience, 1998.
+
+ """
+
+ if not G.is_directed():
+ raise nx.NetworkXError("Undirected graph not supported.")
+ if not nx.is_connected(G.to_undirected()):
+ raise nx.NetworkXError("Not connected graph not supported.")
+ if G.is_multigraph():
+ raise nx.NetworkXError("MultiDiGraph not supported.")
+ if sum(d[demand] for v, d in G.nodes(data = True)
+ if demand in d) != 0:
+ raise nx.NetworkXUnfeasible("Sum of the demands should be 0.")
+
+ # Fix an arbitrarily chosen root node and find an initial tree solution.
+ H, T, y, artificialEdges, flowCost, r = \
+ _initial_tree_solution(G, demand = demand, capacity = capacity,
+ weight = weight)
+
+ # Initialize the reduced costs.
+ c = {}
+ for u, v, d in H.edges_iter(data = True):
+ c[(u, v)] = d.get(weight, 0) + y[u] - y[v]
+
+ # Print stuff for debugging.
+ # print('-' * 78)
+ # nbIter = 0
+ # print('Iteration %d' % nbIter)
+ # nbIter += 1
+ # print('Tree solution: %s' % T.edges())
+ # print(' Edge %11s%10s' % ('Flow', 'Red Cost'))
+ # for u, v, d in H.edges(data = True):
+ # flag = ''
+ # if (u, v) in artificialEdges:
+ # flag = '*'
+ # print('(%s, %s)%1s%10d%10d' % (u, v, flag, d.get('flow', 0),
+ # c[(u, v)]))
+ # print('Distances: %s' % y)
+
+ # Main loop.
+ while True:
+ newEdge = _find_entering_edge(H, c, capacity = capacity)
+ if not newEdge:
+ break # Optimal basis found. Main loop is over.
+ cycleCost = abs(c[newEdge])
+
+ # Find the cycle created by adding newEdge to T.
+ path1 = nx.shortest_path(T.to_undirected(), r, newEdge[0])
+ path2 = nx.shortest_path(T.to_undirected(), r, newEdge[1])
+ join = r
+ for index, node in enumerate(path1[1:]):
+ if index + 1 < len(path2) and node == path2[index + 1]:
+ join = node
+ else:
+ break
+ path1 = path1[path1.index(join):]
+ path2 = path2[path2.index(join):]
+ cycle = []
+ if H[newEdge[0]][newEdge[1]].get('flow', 0) == 0:
+ reverse = False
+ path2.reverse()
+ cycle = path1 + path2
+ else: # newEdge is at capacity
+ reverse = True
+ path1.reverse()
+ cycle = path2 + path1
+
+ # Find the leaving edge. Will stop here if cycle is an infinite
+ # capacity negative cost cycle.
+ leavingEdge, eps = _find_leaving_edge(H, T, cycle, newEdge,
+ capacity=capacity,
+ reverse=reverse)
+
+ # Actual augmentation happens here. If eps = 0, don't bother.
+ if eps:
+ flowCost -= cycleCost * eps
+ if len(cycle) == 3:
+ if reverse:
+ eps = -eps
+ u, v = newEdge
+ H[u][v]['flow'] = H[u][v].get('flow', 0) + eps
+ H[v][u]['flow'] = H[v][u].get('flow', 0) + eps
+ else:
+ for index, u in enumerate(cycle[:-1]):
+ v = cycle[index + 1]
+ if (u, v) in T.edges() + [newEdge]:
+ H[u][v]['flow'] = H[u][v].get('flow', 0) + eps
+ else: # (v, u) in T.edges():
+ H[v][u]['flow'] -= eps
+
+ # Update tree solution.
+ T.add_edge(*newEdge)
+ T.remove_edge(*leavingEdge)
+
+ # Update distances and reduced costs.
+ if newEdge != leavingEdge:
+ forest = nx.DiGraph(T)
+ forest.remove_edge(*newEdge)
+ R, notR = nx.connected_component_subgraphs(forest.to_undirected())
+ if r in notR.nodes(): # make sure r is in R
+ R, notR = notR, R
+ if newEdge[0] in R.nodes():
+ for v in notR.nodes():
+ y[v] += c[newEdge]
+ else:
+ for v in notR.nodes():
+ y[v] -= c[newEdge]
+ for u, v in H.edges():
+ if u in notR.nodes() or v in notR.nodes():
+ c[(u, v)] = H[u][v].get(weight, 0) + y[u] - y[v]
+
+ # Print stuff for debugging.
+ # print('-' * 78)
+ # print('Iteration %d' % nbIter)
+ # nbIter += 1
+ # print('Tree solution: %s' % T.edges())
+ # print('New edge: (%s, %s)' % (newEdge[0], newEdge[1]))
+ # print('Leaving edge: (%s, %s)' % (leavingEdge[0], leavingEdge[1]))
+ # print('Cycle: %s' % cycle)
+ # print('eps: %d' % eps)
+ # print(' Edge %11s%10s' % ('Flow', 'Red Cost'))
+ # for u, v, d in H.edges(data = True):
+ # flag = ''
+ # if (u, v) in artificialEdges:
+ # flag = '*'
+ # print('(%s, %s)%1s%10d%10d' % (u, v, flag, d.get('flow', 0),
+ # c[(u, v)]))
+ # print('Distances: %s' % y)
+
+
+ # If an artificial edge has positive flow, the initial problem was
+ # not feasible.
+ for u, v in artificialEdges:
+ if H[u][v]['flow'] != 0:
+ raise nx.NetworkXUnfeasible("No flow satisfying all demands.")
+ H.remove_edge(u, v)
+
+ for u in H.nodes():
+ if not u in G:
+ H.remove_node(u)
+
+ flowDict = _create_flow_dict(G, H)
+
+ return flowCost, flowDict
+
+
+def min_cost_flow_cost(G, demand = 'demand', capacity = 'capacity',
+ weight = 'weight'):
+ """Find the cost of a minimum cost flow satisfying all demands in digraph G.
+
+ G is a digraph with edge costs and capacities and in which nodes
+ have demand, i.e., they want to send or receive some amount of
+ flow. A negative demand means that the node wants to send flow, a
+ positive demand means that the node want to receive flow. A flow on
+ the digraph G satisfies all demand if the net flow into each node
+ is equal to the demand of that node.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ DiGraph on which a minimum cost flow satisfying all demands is
+ to be found.
+
+ demand: string
+ Nodes of the graph G are expected to have an attribute demand
+ that indicates how much flow a node wants to send (negative
+ demand) or receive (positive demand). Note that the sum of the
+ demands should be 0 otherwise the problem in not feasible. If
+ this attribute is not present, a node is considered to have 0
+ demand. Default value: 'demand'.
+
+ capacity: string
+ Edges of the graph G are expected to have an attribute capacity
+ that indicates how much flow the edge can support. If this
+ attribute is not present, the edge is considered to have
+ infinite capacity. Default value: 'capacity'.
+
+ weight: string
+ Edges of the graph G are expected to have an attribute weight
+ that indicates the cost incurred by sending one unit of flow on
+ that edge. If not present, the weight is considered to be 0.
+ Default value: 'weight'.
+
+ Returns
+ -------
+ flowCost: integer, float
+ Cost of a minimum cost flow satisfying all demands.
+
+ Raises
+ ------
+ NetworkXError
+ This exception is raised if the input graph is not directed or
+ not connected.
+
+ NetworkXUnfeasible
+ This exception is raised in the following situations:
+ * The sum of the demands is not zero. Then, there is no
+ flow satisfying all demands.
+ * There is no flow satisfying all demand.
+
+ NetworkXUnbounded
+ This exception is raised if the digraph G has a cycle of
+ negative cost and infinite capacity. Then, the cost of a flow
+ satisfying all demands is unbounded below.
+
+ See also
+ --------
+ cost_of_flow, max_flow_min_cost, min_cost_flow, network_simplex
+
+ Examples
+ --------
+ A simple example of a min cost flow problem.
+
+ >>> import networkx as nx
+ >>> G = nx.DiGraph()
+ >>> G.add_node('a', demand = -5)
+ >>> G.add_node('d', demand = 5)
+ >>> G.add_edge('a', 'b', weight = 3, capacity = 4)
+ >>> G.add_edge('a', 'c', weight = 6, capacity = 10)
+ >>> G.add_edge('b', 'd', weight = 1, capacity = 9)
+ >>> G.add_edge('c', 'd', weight = 2, capacity = 5)
+ >>> flowCost = nx.min_cost_flow_cost(G)
+ >>> flowCost
+ 24
+ """
+ return network_simplex(G, demand = demand, capacity = capacity,
+ weight = weight)[0]
+
+
+def min_cost_flow(G, demand = 'demand', capacity = 'capacity',
+ weight = 'weight'):
+ """Return a minimum cost flow satisfying all demands in digraph G.
+
+ G is a digraph with edge costs and capacities and in which nodes
+ have demand, i.e., they want to send or receive some amount of
+ flow. A negative demand means that the node wants to send flow, a
+ positive demand means that the node want to receive flow. A flow on
+ the digraph G satisfies all demand if the net flow into each node
+ is equal to the demand of that node.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ DiGraph on which a minimum cost flow satisfying all demands is
+ to be found.
+
+ demand: string
+ Nodes of the graph G are expected to have an attribute demand
+ that indicates how much flow a node wants to send (negative
+ demand) or receive (positive demand). Note that the sum of the
+ demands should be 0 otherwise the problem in not feasible. If
+ this attribute is not present, a node is considered to have 0
+ demand. Default value: 'demand'.
+
+ capacity: string
+ Edges of the graph G are expected to have an attribute capacity
+ that indicates how much flow the edge can support. If this
+ attribute is not present, the edge is considered to have
+ infinite capacity. Default value: 'capacity'.
+
+ weight: string
+ Edges of the graph G are expected to have an attribute weight
+ that indicates the cost incurred by sending one unit of flow on
+ that edge. If not present, the weight is considered to be 0.
+ Default value: 'weight'.
+
+ Returns
+ -------
+ flowDict: dictionary
+ Dictionary of dictionaries keyed by nodes such that
+ flowDict[u][v] is the flow edge (u, v).
+
+ Raises
+ ------
+ NetworkXError
+ This exception is raised if the input graph is not directed or
+ not connected.
+
+ NetworkXUnfeasible
+ This exception is raised in the following situations:
+ * The sum of the demands is not zero. Then, there is no
+ flow satisfying all demands.
+ * There is no flow satisfying all demand.
+
+ NetworkXUnbounded
+ This exception is raised if the digraph G has a cycle of
+ negative cost and infinite capacity. Then, the cost of a flow
+ satisfying all demands is unbounded below.
+
+ See also
+ --------
+ cost_of_flow, max_flow_min_cost, min_cost_flow_cost, network_simplex
+
+ Examples
+ --------
+ A simple example of a min cost flow problem.
+
+ >>> import networkx as nx
+ >>> G = nx.DiGraph()
+ >>> G.add_node('a', demand = -5)
+ >>> G.add_node('d', demand = 5)
+ >>> G.add_edge('a', 'b', weight = 3, capacity = 4)
+ >>> G.add_edge('a', 'c', weight = 6, capacity = 10)
+ >>> G.add_edge('b', 'd', weight = 1, capacity = 9)
+ >>> G.add_edge('c', 'd', weight = 2, capacity = 5)
+ >>> flowDict = nx.min_cost_flow(G)
+ """
+ return network_simplex(G, demand = demand, capacity = capacity,
+ weight = weight)[1]
+
+
+def cost_of_flow(G, flowDict, weight = 'weight'):
+ """Compute the cost of the flow given by flowDict on graph G.
+
+ Note that this function does not check for the validity of the
+ flow flowDict. This function will fail if the graph G and the
+ flow don't have the same edge set.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ DiGraph on which a minimum cost flow satisfying all demands is
+ to be found.
+
+ weight: string
+ Edges of the graph G are expected to have an attribute weight
+ that indicates the cost incurred by sending one unit of flow on
+ that edge. If not present, the weight is considered to be 0.
+ Default value: 'weight'.
+
+ flowDict: dictionary
+ Dictionary of dictionaries keyed by nodes such that
+ flowDict[u][v] is the flow edge (u, v).
+
+ Returns
+ -------
+ cost: Integer, float
+ The total cost of the flow. This is given by the sum over all
+ edges of the product of the edge's flow and the edge's weight.
+
+ See also
+ --------
+ max_flow_min_cost, min_cost_flow, min_cost_flow_cost, network_simplex
+ """
+ return sum((flowDict[u][v] * d.get(weight, 0)
+ for u, v, d in G.edges_iter(data = True)))
+
+
+def max_flow_min_cost(G, s, t, capacity = 'capacity', weight = 'weight'):
+ """Return a maximum (s, t)-flow of minimum cost.
+
+ G is a digraph with edge costs and capacities. There is a source
+ node s and a sink node t. This function finds a maximum flow from
+ s to t whose total cost is minimized.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ DiGraph on which a minimum cost flow satisfying all demands is
+ to be found.
+
+ s: node label
+ Source of the flow.
+
+ t: node label
+ Destination of the flow.
+
+ capacity: string
+ Edges of the graph G are expected to have an attribute capacity
+ that indicates how much flow the edge can support. If this
+ attribute is not present, the edge is considered to have
+ infinite capacity. Default value: 'capacity'.
+
+ weight: string
+ Edges of the graph G are expected to have an attribute weight
+ that indicates the cost incurred by sending one unit of flow on
+ that edge. If not present, the weight is considered to be 0.
+ Default value: 'weight'.
+
+ Returns
+ -------
+ flowDict: dictionary
+ Dictionary of dictionaries keyed by nodes such that
+ flowDict[u][v] is the flow edge (u, v).
+
+ Raises
+ ------
+ NetworkXError
+ This exception is raised if the input graph is not directed or
+ not connected.
+
+ NetworkXUnbounded
+ This exception is raised if there is an infinite capacity path
+ from s to t in G. In this case there is no maximum flow. This
+ exception is also raised if the digraph G has a cycle of
+ negative cost and infinite capacity. Then, the cost of a flow
+ is unbounded below.
+
+ See also
+ --------
+ cost_of_flow, ford_fulkerson, min_cost_flow, min_cost_flow_cost,
+ network_simplex
+
+ Examples
+ --------
+ >>> G = nx.DiGraph()
+ >>> G.add_edges_from([(1, 2, {'capacity': 12, 'weight': 4}),
+ ... (1, 3, {'capacity': 20, 'weight': 6}),
+ ... (2, 3, {'capacity': 6, 'weight': -3}),
+ ... (2, 6, {'capacity': 14, 'weight': 1}),
+ ... (3, 4, {'weight': 9}),
+ ... (3, 5, {'capacity': 10, 'weight': 5}),
+ ... (4, 2, {'capacity': 19, 'weight': 13}),
+ ... (4, 5, {'capacity': 4, 'weight': 0}),
+ ... (5, 7, {'capacity': 28, 'weight': 2}),
+ ... (6, 5, {'capacity': 11, 'weight': 1}),
+ ... (6, 7, {'weight': 8}),
+ ... (7, 4, {'capacity': 6, 'weight': 6})])
+ >>> mincostFlow = nx.max_flow_min_cost(G, 1, 7)
+ >>> nx.cost_of_flow(G, mincostFlow)
+ 373
+ >>> maxFlow = nx.ford_fulkerson_flow(G, 1, 7)
+ >>> nx.cost_of_flow(G, maxFlow)
+ 428
+ >>> mincostFlowValue = (sum((mincostFlow[u][7] for u in G.predecessors(7)))
+ ... - sum((mincostFlow[7][v] for v in G.successors(7))))
+ >>> mincostFlowValue == nx.max_flow(G, 1, 7)
+ True
+
+
+ """
+ maxFlow = nx.max_flow(G, s, t, capacity = capacity)
+ H = nx.DiGraph(G)
+ H.add_node(s, demand = -maxFlow)
+ H.add_node(t, demand = maxFlow)
+ return min_cost_flow(H, capacity = capacity, weight = weight)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/tests/test_maxflow.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/tests/test_maxflow.py
new file mode 100644
index 0000000..41393c2
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/tests/test_maxflow.py
@@ -0,0 +1,273 @@
+# -*- coding: utf-8 -*-
+"""Max flow algorithm test suite.
+
+Run with nose: nosetests -v test_max_flow.py
+"""
+
+__author__ = """Loïc Séguin-C. <loicseguin@gmail.com>"""
+# Copyright (C) 2010 Loïc Séguin-C. <loicseguin@gmail.com>
+# All rights reserved.
+# BSD license.
+
+
+import networkx as nx
+from nose.tools import *
+
+def compare_flows(G, s, t, solnFlows, solnValue):
+ flowValue, flowDict = nx.ford_fulkerson(G, s, t)
+ assert_equal(flowValue, solnValue)
+ assert_equal(flowDict, solnFlows)
+ assert_equal(nx.min_cut(G, s, t), solnValue)
+ assert_equal(nx.max_flow(G, s, t), solnValue)
+ assert_equal(nx.ford_fulkerson_flow(G, s, t), solnFlows)
+
+
+class TestMaxflow:
+ def test_graph1(self):
+ # Trivial undirected graph
+ G = nx.Graph()
+ G.add_edge(1,2, capacity = 1.0)
+
+ solnFlows = {1: {2: 1.0},
+ 2: {1: 1.0}}
+
+ compare_flows(G, 1, 2, solnFlows, 1.0)
+
+ def test_graph2(self):
+ # A more complex undirected graph
+ # adapted from www.topcoder.com/tc?module=Statc&d1=tutorials&d2=maxFlow
+ G = nx.Graph()
+ G.add_edge('x','a', capacity = 3.0)
+ G.add_edge('x','b', capacity = 1.0)
+ G.add_edge('a','c', capacity = 3.0)
+ G.add_edge('b','c', capacity = 5.0)
+ G.add_edge('b','d', capacity = 4.0)
+ G.add_edge('d','e', capacity = 2.0)
+ G.add_edge('c','y', capacity = 2.0)
+ G.add_edge('e','y', capacity = 3.0)
+
+ H = {'x': {'a': 3, 'b': 1},
+ 'a': {'c': 3, 'x': 3},
+ 'b': {'c': 1, 'd': 2, 'x': 1},
+ 'c': {'a': 3, 'b': 1, 'y': 2},
+ 'd': {'b': 2, 'e': 2},
+ 'e': {'d': 2, 'y': 2},
+ 'y': {'c': 2, 'e': 2}}
+
+ compare_flows(G, 'x', 'y', H, 4.0)
+
+ def test_digraph1(self):
+ # The classic directed graph example
+ G = nx.DiGraph()
+ G.add_edge('a','b', capacity = 1000.0)
+ G.add_edge('a','c', capacity = 1000.0)
+ G.add_edge('b','c', capacity = 1.0)
+ G.add_edge('b','d', capacity = 1000.0)
+ G.add_edge('c','d', capacity = 1000.0)
+
+ H = {'a': {'b': 1000.0, 'c': 1000.0},
+ 'b': {'c': 0, 'd': 1000.0},
+ 'c': {'d': 1000.0},
+ 'd': {}}
+
+ compare_flows(G, 'a', 'd', H, 2000.0)
+
+ # An example in which some edges end up with zero flow.
+ G = nx.DiGraph()
+ G.add_edge('s', 'b', capacity = 2)
+ G.add_edge('s', 'c', capacity = 1)
+ G.add_edge('c', 'd', capacity = 1)
+ G.add_edge('d', 'a', capacity = 1)
+ G.add_edge('b', 'a', capacity = 2)
+ G.add_edge('a', 't', capacity = 2)
+
+ H = {'s': {'b': 2, 'c': 0},
+ 'c': {'d': 0},
+ 'd': {'a': 0},
+ 'b': {'a': 2},
+ 'a': {'t': 2},
+ 't': {}}
+
+ compare_flows(G, 's', 't', H, 2)
+
+ def test_digraph2(self):
+ # A directed graph example from Cormen et al.
+ G = nx.DiGraph()
+ G.add_edge('s','v1', capacity = 16.0)
+ G.add_edge('s','v2', capacity = 13.0)
+ G.add_edge('v1','v2', capacity = 10.0)
+ G.add_edge('v2','v1', capacity = 4.0)
+ G.add_edge('v1','v3', capacity = 12.0)
+ G.add_edge('v3','v2', capacity = 9.0)
+ G.add_edge('v2','v4', capacity = 14.0)
+ G.add_edge('v4','v3', capacity = 7.0)
+ G.add_edge('v3','t', capacity = 20.0)
+ G.add_edge('v4','t', capacity = 4.0)
+
+ H = {'s': {'v1': 12.0, 'v2': 11.0},
+ 'v2': {'v1': 0, 'v4': 11.0},
+ 'v1': {'v2': 0, 'v3': 12.0},
+ 'v3': {'v2': 0, 't': 19.0},
+ 'v4': {'v3': 7.0, 't': 4.0},
+ 't': {}}
+
+ compare_flows(G, 's', 't', H, 23.0)
+
+ def test_digraph3(self):
+ # A more complex directed graph
+ # from www.topcoder.com/tc?module=Statc&d1=tutorials&d2=maxFlow
+ G = nx.DiGraph()
+ G.add_edge('x','a', capacity = 3.0)
+ G.add_edge('x','b', capacity = 1.0)
+ G.add_edge('a','c', capacity = 3.0)
+ G.add_edge('b','c', capacity = 5.0)
+ G.add_edge('b','d', capacity = 4.0)
+ G.add_edge('d','e', capacity = 2.0)
+ G.add_edge('c','y', capacity = 2.0)
+ G.add_edge('e','y', capacity = 3.0)
+
+ H = {'x': {'a': 2.0, 'b': 1.0},
+ 'a': {'c': 2.0},
+ 'b': {'c': 0, 'd': 1.0},
+ 'c': {'y': 2.0},
+ 'd': {'e': 1.0},
+ 'e': {'y': 1.0},
+ 'y': {}}
+
+ compare_flows(G, 'x', 'y', H, 3.0)
+
+ def test_optional_capacity(self):
+ # Test optional capacity parameter.
+ G = nx.DiGraph()
+ G.add_edge('x','a', spam = 3.0)
+ G.add_edge('x','b', spam = 1.0)
+ G.add_edge('a','c', spam = 3.0)
+ G.add_edge('b','c', spam = 5.0)
+ G.add_edge('b','d', spam = 4.0)
+ G.add_edge('d','e', spam = 2.0)
+ G.add_edge('c','y', spam = 2.0)
+ G.add_edge('e','y', spam = 3.0)
+
+ solnFlows = {'x': {'a': 2.0, 'b': 1.0},
+ 'a': {'c': 2.0},
+ 'b': {'c': 0, 'd': 1.0},
+ 'c': {'y': 2.0},
+ 'd': {'e': 1.0},
+ 'e': {'y': 1.0},
+ 'y': {}}
+ solnValue = 3.0
+ s = 'x'
+ t = 'y'
+
+ flowValue, flowDict = nx.ford_fulkerson(G, s, t, capacity = 'spam')
+ assert_equal(flowValue, solnValue)
+ assert_equal(flowDict, solnFlows)
+ assert_equal(nx.min_cut(G, s, t, capacity = 'spam'), solnValue)
+ assert_equal(nx.max_flow(G, s, t, capacity = 'spam'), solnValue)
+ assert_equal(nx.ford_fulkerson_flow(G, s, t, capacity = 'spam'),
+ solnFlows)
+
+ def test_digraph_infcap_edges(self):
+ # DiGraph with infinite capacity edges
+ G = nx.DiGraph()
+ G.add_edge('s', 'a')
+ G.add_edge('s', 'b', capacity = 30)
+ G.add_edge('a', 'c', capacity = 25)
+ G.add_edge('b', 'c', capacity = 12)
+ G.add_edge('a', 't', capacity = 60)
+ G.add_edge('c', 't')
+
+ H = {'s': {'a': 85, 'b': 12},
+ 'a': {'c': 25, 't': 60},
+ 'b': {'c': 12},
+ 'c': {'t': 37},
+ 't': {}}
+
+ compare_flows(G, 's', 't', H, 97)
+
+ # DiGraph with infinite capacity digon
+ G = nx.DiGraph()
+ G.add_edge('s', 'a', capacity = 85)
+ G.add_edge('s', 'b', capacity = 30)
+ G.add_edge('a', 'c')
+ G.add_edge('c', 'a')
+ G.add_edge('b', 'c', capacity = 12)
+ G.add_edge('a', 't', capacity = 60)
+ G.add_edge('c', 't', capacity = 37)
+
+ H = {'s': {'a': 85, 'b': 12},
+ 'a': {'c': 25, 't': 60},
+ 'c': {'a': 0, 't': 37},
+ 'b': {'c': 12},
+ 't': {}}
+
+ compare_flows(G, 's', 't', H, 97)
+
+
+ def test_digraph_infcap_path(self):
+ # Graph with infinite capacity (s, t)-path
+ G = nx.DiGraph()
+ G.add_edge('s', 'a')
+ G.add_edge('s', 'b', capacity = 30)
+ G.add_edge('a', 'c')
+ G.add_edge('b', 'c', capacity = 12)
+ G.add_edge('a', 't', capacity = 60)
+ G.add_edge('c', 't')
+
+ assert_raises(nx.NetworkXUnbounded,
+ nx.ford_fulkerson, G, 's', 't')
+ assert_raises(nx.NetworkXUnbounded,
+ nx.max_flow, G, 's', 't')
+ assert_raises(nx.NetworkXUnbounded,
+ nx.ford_fulkerson_flow, G, 's', 't')
+ assert_raises(nx.NetworkXUnbounded,
+ nx.min_cut, G, 's', 't')
+
+ def test_graph_infcap_edges(self):
+ # Undirected graph with infinite capacity edges
+ G = nx.Graph()
+ G.add_edge('s', 'a')
+ G.add_edge('s', 'b', capacity = 30)
+ G.add_edge('a', 'c', capacity = 25)
+ G.add_edge('b', 'c', capacity = 12)
+ G.add_edge('a', 't', capacity = 60)
+ G.add_edge('c', 't')
+
+ H = {'s': {'a': 85, 'b': 12},
+ 'a': {'c': 25, 's': 85, 't': 60},
+ 'b': {'c': 12, 's': 12},
+ 'c': {'a': 25, 'b': 12, 't': 37},
+ 't': {'a': 60, 'c': 37}}
+
+ compare_flows(G, 's', 't', H, 97)
+
+ def test_digraph4(self):
+ # From ticket #429 by mfrasca.
+ G = nx.DiGraph()
+ G.add_edge('s', 'a', capacity = 2)
+ G.add_edge('s', 'b', capacity = 2)
+ G.add_edge('a', 'b', capacity = 5)
+ G.add_edge('a', 't', capacity = 1)
+ G.add_edge('b', 'a', capacity = 1)
+ G.add_edge('b', 't', capacity = 3)
+ flowSoln = {'a': {'b': 1, 't': 1},
+ 'b': {'a': 0, 't': 3},
+ 's': {'a': 2, 'b': 2},
+ 't': {}}
+ compare_flows(G, 's', 't', flowSoln, 4)
+
+
+ def test_disconnected(self):
+ G = nx.Graph()
+ G.add_weighted_edges_from([(0,1,1),(1,2,1),(2,3,1)],weight='capacity')
+ G.remove_node(1)
+ assert_equal(nx.max_flow(G,0,3),0)
+
+ def test_source_target_not_in_graph(self):
+ G = nx.Graph()
+ G.add_weighted_edges_from([(0,1,1),(1,2,1),(2,3,1)],weight='capacity')
+ G.remove_node(0)
+ assert_raises(nx.NetworkXError,nx.max_flow,G,0,3)
+ G.add_weighted_edges_from([(0,1,1),(1,2,1),(2,3,1)],weight='capacity')
+ G.remove_node(3)
+ assert_raises(nx.NetworkXError,nx.max_flow,G,0,3)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/tests/test_maxflow_large_graph.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/tests/test_maxflow_large_graph.py
new file mode 100644
index 0000000..578e2df
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/tests/test_maxflow_large_graph.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+"""Max flow algorithm test suite on large graphs.
+
+Run with nose: nosetests -v test_max_flow.py
+"""
+
+__author__ = """Loïc Séguin-C. <loicseguin@gmail.com>"""
+# Copyright (C) 2010 Loïc Séguin-C. <loicseguin@gmail.com>
+# All rights reserved.
+# BSD license.
+
+
+import networkx as nx
+from nose.tools import *
+
+def gen_pyramid(N):
+ # This graph admits a flow of value 1 for which every arc is at
+ # capacity (except the arcs incident to the sink which have
+ # infinite capacity).
+ G = nx.DiGraph()
+
+ for i in range(N - 1):
+ cap = 1. / (i + 2)
+ for j in range(i + 1):
+ G.add_edge((i, j), (i + 1, j),
+ capacity = cap)
+ cap = 1. / (i + 1) - cap
+ G.add_edge((i, j), (i + 1, j + 1),
+ capacity = cap)
+ cap = 1. / (i + 2) - cap
+
+ for j in range(N):
+ G.add_edge((N - 1, j), 't')
+
+ return G
+
+
+class TestMaxflowLargeGraph:
+ def test_complete_graph(self):
+ N = 50
+ G = nx.complete_graph(N)
+ for (u, v) in G.edges():
+ G[u][v]['capacity'] = 5
+ assert_equal(nx.ford_fulkerson(G, 1, 2)[0], 5 * (N - 1))
+
+ def test_pyramid(self):
+ N = 10
+# N = 100 # this gives a graph with 5051 nodes
+ G = gen_pyramid(N)
+ assert_almost_equal(nx.ford_fulkerson(G, (0, 0), 't')[0], 1.)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/tests/test_mincost.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/tests/test_mincost.py
new file mode 100644
index 0000000..72df2f0
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/flow/tests/test_mincost.py
@@ -0,0 +1,284 @@
+# -*- coding: utf-8 -*-
+
+import networkx as nx
+from nose.tools import assert_equal, assert_raises
+
+class TestNetworkSimplex:
+ def test_simple_digraph(self):
+ G = nx.DiGraph()
+ G.add_node('a', demand = -5)
+ G.add_node('d', demand = 5)
+ G.add_edge('a', 'b', weight = 3, capacity = 4)
+ G.add_edge('a', 'c', weight = 6, capacity = 10)
+ G.add_edge('b', 'd', weight = 1, capacity = 9)
+ G.add_edge('c', 'd', weight = 2, capacity = 5)
+ flowCost, H = nx.network_simplex(G)
+ soln = {'a': {'b': 4, 'c': 1},
+ 'b': {'d': 4},
+ 'c': {'d': 1},
+ 'd': {}}
+ assert_equal(flowCost, 24)
+ assert_equal(nx.min_cost_flow_cost(G), 24)
+ assert_equal(H, soln)
+ assert_equal(nx.min_cost_flow(G), soln)
+ assert_equal(nx.cost_of_flow(G, H), 24)
+
+ def test_negcycle_infcap(self):
+ G = nx.DiGraph()
+ G.add_node('s', demand = -5)
+ G.add_node('t', demand = 5)
+ G.add_edge('s', 'a', weight = 1, capacity = 3)
+ G.add_edge('a', 'b', weight = 3)
+ G.add_edge('c', 'a', weight = -6)
+ G.add_edge('b', 'd', weight = 1)
+ G.add_edge('d', 'c', weight = -2)
+ G.add_edge('d', 't', weight = 1, capacity = 3)
+ assert_raises(nx.NetworkXUnbounded, nx.network_simplex, G)
+
+ def test_sum_demands_not_zero(self):
+ G = nx.DiGraph()
+ G.add_node('s', demand = -5)
+ G.add_node('t', demand = 4)
+ G.add_edge('s', 'a', weight = 1, capacity = 3)
+ G.add_edge('a', 'b', weight = 3)
+ G.add_edge('a', 'c', weight = -6)
+ G.add_edge('b', 'd', weight = 1)
+ G.add_edge('c', 'd', weight = -2)
+ G.add_edge('d', 't', weight = 1, capacity = 3)
+ assert_raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
+
+ def test_no_flow_satisfying_demands(self):
+ G = nx.DiGraph()
+ G.add_node('s', demand = -5)
+ G.add_node('t', demand = 5)
+ G.add_edge('s', 'a', weight = 1, capacity = 3)
+ G.add_edge('a', 'b', weight = 3)
+ G.add_edge('a', 'c', weight = -6)
+ G.add_edge('b', 'd', weight = 1)
+ G.add_edge('c', 'd', weight = -2)
+ G.add_edge('d', 't', weight = 1, capacity = 3)
+ assert_raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
+
+ def test_transshipment(self):
+ G = nx.DiGraph()
+ G.add_node('a', demand = 1)
+ G.add_node('b', demand = -2)
+ G.add_node('c', demand = -2)
+ G.add_node('d', demand = 3)
+ G.add_node('e', demand = -4)
+ G.add_node('f', demand = -4)
+ G.add_node('g', demand = 3)
+ G.add_node('h', demand = 2)
+ G.add_node('r', demand = 3)
+ G.add_edge('a', 'c', weight = 3)
+ G.add_edge('r', 'a', weight = 2)
+ G.add_edge('b', 'a', weight = 9)
+ G.add_edge('r', 'c', weight = 0)
+ G.add_edge('b', 'r', weight = -6)
+ G.add_edge('c', 'd', weight = 5)
+ G.add_edge('e', 'r', weight = 4)
+ G.add_edge('e', 'f', weight = 3)
+ G.add_edge('h', 'b', weight = 4)
+ G.add_edge('f', 'd', weight = 7)
+ G.add_edge('f', 'h', weight = 12)
+ G.add_edge('g', 'd', weight = 12)
+ G.add_edge('f', 'g', weight = -1)
+ G.add_edge('h', 'g', weight = -10)
+ flowCost, H = nx.network_simplex(G)
+ soln = {'a': {'c': 0},
+ 'b': {'a': 0, 'r': 2},
+ 'c': {'d': 3},
+ 'd': {},
+ 'e': {'r': 3, 'f': 1},
+ 'f': {'d': 0, 'g': 3, 'h': 2},
+ 'g': {'d': 0},
+ 'h': {'b': 0, 'g': 0},
+ 'r': {'a': 1, 'c': 1}}
+ assert_equal(flowCost, 41)
+ assert_equal(nx.min_cost_flow_cost(G), 41)
+ assert_equal(H, soln)
+ assert_equal(nx.min_cost_flow(G), soln)
+ assert_equal(nx.cost_of_flow(G, H), 41)
+
+ def test_max_flow_min_cost(self):
+ G = nx.DiGraph()
+ G.add_edge('s', 'a', bandwidth = 6)
+ G.add_edge('s', 'c', bandwidth = 10, cost = 10)
+ G.add_edge('a', 'b', cost = 6)
+ G.add_edge('b', 'd', bandwidth = 8, cost = 7)
+ G.add_edge('c', 'd', cost = 10)
+ G.add_edge('d', 't', bandwidth = 5, cost = 5)
+ soln = {'s': {'a': 5, 'c': 0},
+ 'a': {'b': 5},
+ 'b': {'d': 5},
+ 'c': {'d': 0},
+ 'd': {'t': 5},
+ 't': {}}
+ flow = nx.max_flow_min_cost(G, 's', 't', capacity = 'bandwidth',
+ weight = 'cost')
+ assert_equal(flow, soln)
+ assert_equal(nx.cost_of_flow(G, flow, weight = 'cost'), 90)
+
+ def test_digraph1(self):
+ # From Bradley, S. P., Hax, A. C. and Magnanti, T. L. Applied
+ # Mathematical Programming. Addison-Wesley, 1977.
+ G = nx.DiGraph()
+ G.add_node(1, demand = -20)
+ G.add_node(4, demand = 5)
+ G.add_node(5, demand = 15)
+ G.add_edges_from([(1, 2, {'capacity': 15, 'weight': 4}),
+ (1, 3, {'capacity': 8, 'weight': 4}),
+ (2, 3, {'weight': 2}),
+ (2, 4, {'capacity': 4, 'weight': 2}),
+ (2, 5, {'capacity': 10, 'weight': 6}),
+ (3, 4, {'capacity': 15, 'weight': 1}),
+ (3, 5, {'capacity': 5, 'weight': 3}),
+ (4, 5, {'weight': 2}),
+ (5, 3, {'capacity': 4, 'weight': 1})])
+ flowCost, H = nx.network_simplex(G)
+ soln = {1: {2: 12, 3: 8},
+ 2: {3: 8, 4: 4, 5: 0},
+ 3: {4: 11, 5: 5},
+ 4: {5: 10},
+ 5: {3: 0}}
+ assert_equal(flowCost, 150)
+ assert_equal(nx.min_cost_flow_cost(G), 150)
+ assert_equal(H, soln)
+ assert_equal(nx.min_cost_flow(G), soln)
+ assert_equal(nx.cost_of_flow(G, H), 150)
+
+ def test_digraph2(self):
+ # Example from ticket #430 from mfrasca. Original source:
+ # http://www.cs.princeton.edu/courses/archive/spr03/cs226/lectures/mincost.4up.pdf, slide 11.
+ G = nx.DiGraph()
+ G.add_edge('s', 1, capacity=12)
+ G.add_edge('s', 2, capacity=6)
+ G.add_edge('s', 3, capacity=14)
+ G.add_edge(1, 2, capacity=11, weight=4)
+ G.add_edge(2, 3, capacity=9, weight=6)
+ G.add_edge(1, 4, capacity=5, weight=5)
+ G.add_edge(1, 5, capacity=2, weight=12)
+ G.add_edge(2, 5, capacity=4, weight=4)
+ G.add_edge(2, 6, capacity=2, weight=6)
+ G.add_edge(3, 6, capacity=31, weight=3)
+ G.add_edge(4, 5, capacity=18, weight=4)
+ G.add_edge(5, 6, capacity=9, weight=5)
+ G.add_edge(4, 't', capacity=3)
+ G.add_edge(5, 't', capacity=7)
+ G.add_edge(6, 't', capacity=22)
+ flow = nx.max_flow_min_cost(G, 's', 't')
+ soln = {1: {2: 6, 4: 5, 5: 1},
+ 2: {3: 6, 5: 4, 6: 2},
+ 3: {6: 20},
+ 4: {5: 2, 't': 3},
+ 5: {6: 0, 't': 7},
+ 6: {'t': 22},
+ 's': {1: 12, 2: 6, 3: 14},
+ 't': {}}
+ assert_equal(flow, soln)
+
+ def test_digraph3(self):
+ """Combinatorial Optimization: Algorithms and Complexity,
+ Papadimitriou Steiglitz at page 140 has an example, 7.1, but that
+ admits multiple solutions, so I alter it a bit. From ticket #430
+ by mfrasca."""
+
+ G = nx.DiGraph()
+ G.add_edge('s', 'a', {0: 2, 1: 4})
+ G.add_edge('s', 'b', {0: 2, 1: 1})
+ G.add_edge('a', 'b', {0: 5, 1: 2})
+ G.add_edge('a', 't', {0: 1, 1: 5})
+ G.add_edge('b', 'a', {0: 1, 1: 3})
+ G.add_edge('b', 't', {0: 3, 1: 2})
+
+ "PS.ex.7.1: testing main function"
+ sol = nx.max_flow_min_cost(G, 's', 't', capacity=0, weight=1)
+ flow = sum(v for v in sol['s'].values())
+ assert_equal(4, flow)
+ assert_equal(23, nx.cost_of_flow(G, sol, weight=1))
+ assert_equal(sol['s'], {'a': 2, 'b': 2})
+ assert_equal(sol['a'], {'b': 1, 't': 1})
+ assert_equal(sol['b'], {'a': 0, 't': 3})
+ assert_equal(sol['t'], {})
+
+ def test_zero_capacity_edges(self):
+ """Address issue raised in ticket #617 by arv."""
+ G = nx.DiGraph()
+ G.add_edges_from([(1, 2, {'capacity': 1, 'weight': 1}),
+ (1, 5, {'capacity': 1, 'weight': 1}),
+ (2, 3, {'capacity': 0, 'weight': 1}),
+ (2, 5, {'capacity': 1, 'weight': 1}),
+ (5, 3, {'capacity': 2, 'weight': 1}),
+ (5, 4, {'capacity': 0, 'weight': 1}),
+ (3, 4, {'capacity': 2, 'weight': 1})])
+ G.node[1]['demand'] = -1
+ G.node[2]['demand'] = -1
+ G.node[4]['demand'] = 2
+
+ flowCost, H = nx.network_simplex(G)
+ soln = {1: {2: 0, 5: 1},
+ 2: {3: 0, 5: 1},
+ 3: {4: 2},
+ 4: {},
+ 5: {3: 2, 4: 0}}
+ assert_equal(flowCost, 6)
+ assert_equal(nx.min_cost_flow_cost(G), 6)
+ assert_equal(H, soln)
+ assert_equal(nx.min_cost_flow(G), soln)
+ assert_equal(nx.cost_of_flow(G, H), 6)
+
+ def test_digon(self):
+ """Check if digons are handled properly. Taken from ticket
+ #618 by arv."""
+ nodes = [(1, {}),
+ (2, {'demand': -4}),
+ (3, {'demand': 4}),
+ ]
+ edges = [(1, 2, {'capacity': 3, 'weight': 600000}),
+ (2, 1, {'capacity': 2, 'weight': 0}),
+ (2, 3, {'capacity': 5, 'weight': 714285}),
+ (3, 2, {'capacity': 2, 'weight': 0}),
+ ]
+ G = nx.DiGraph(edges)
+ G.add_nodes_from(nodes)
+ flowCost, H = nx.network_simplex(G)
+ soln = {1: {2: 0},
+ 2: {1: 0, 3: 4},
+ 3: {2: 0}}
+ assert_equal(flowCost, 2857140)
+ assert_equal(nx.min_cost_flow_cost(G), 2857140)
+ assert_equal(H, soln)
+ assert_equal(nx.min_cost_flow(G), soln)
+ assert_equal(nx.cost_of_flow(G, H), 2857140)
+
+ def test_infinite_capacity_neg_digon(self):
+ """An infinite capacity negative cost digon results in an unbounded
+ instance."""
+ nodes = [(1, {}),
+ (2, {'demand': -4}),
+ (3, {'demand': 4}),
+ ]
+ edges = [(1, 2, {'weight': -600}),
+ (2, 1, {'weight': 0}),
+ (2, 3, {'capacity': 5, 'weight': 714285}),
+ (3, 2, {'capacity': 2, 'weight': 0}),
+ ]
+ G = nx.DiGraph(edges)
+ G.add_nodes_from(nodes)
+ assert_raises(nx.NetworkXUnbounded, nx.network_simplex, G)
+
+ def test_finite_capacity_neg_digon(self):
+ """The digon should receive the maximum amount of flow it can handle.
+ Taken from ticket #749 by @chuongdo."""
+ G = nx.DiGraph()
+ G.add_edge('a', 'b', capacity=1, weight=-1)
+ G.add_edge('b', 'a', capacity=1, weight=-1)
+ min_cost = -2
+ assert_equal(nx.min_cost_flow_cost(G), min_cost)
+
+ def test_multidigraph(self):
+ """Raise an exception for multidigraph."""
+ G = nx.MultiDiGraph()
+ G.add_weighted_edges_from([(1, 2, 1), (2, 3, 2)], weight='capacity')
+ assert_raises(nx.NetworkXError, nx.network_simplex, G)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/graphical.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/graphical.py
new file mode 100644
index 0000000..5c82761
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/graphical.py
@@ -0,0 +1,405 @@
+# -*- coding: utf-8 -*-
+"""Test sequences for graphiness.
+"""
+# Copyright (C) 2004-2013 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+from collections import defaultdict
+import heapq
+import networkx as nx
+__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Dan Schult (dschult@colgate.edu)'
+ 'Joel Miller (joel.c.miller.research@gmail.com)'
+ 'Ben Edwards'
+ 'Brian Cloteaux <brian.cloteaux@nist.gov>'])
+
+__all__ = ['is_graphical',
+ 'is_multigraphical',
+ 'is_pseudographical',
+ 'is_digraphical',
+ 'is_valid_degree_sequence_erdos_gallai',
+ 'is_valid_degree_sequence_havel_hakimi',
+ 'is_valid_degree_sequence', # deprecated
+ ]
+
+def is_graphical(sequence, method='eg'):
+ """Returns True if sequence is a valid degree sequence.
+
+ A degree sequence is valid if some graph can realize it.
+
+ Parameters
+ ----------
+ sequence : list or iterable container
+ A sequence of integer node degrees
+
+
+ method : "eg" | "hh"
+ The method used to validate the degree sequence.
+ "eg" corresponds to the Erdős-Gallai algorithm, and
+ "hh" to the Havel-Hakimi algorithm.
+
+ Returns
+ -------
+ valid : bool
+ True if the sequence is a valid degree sequence and False if not.
+
+ Examples
+ --------
+ >>> G = nx.path_graph(4)
+ >>> sequence = G.degree().values()
+ >>> nx.is_valid_degree_sequence(sequence)
+ True
+
+ References
+ ----------
+ Erdős-Gallai
+ [EG1960]_, [choudum1986]_
+
+ Havel-Hakimi
+ [havel1955]_, [hakimi1962]_, [CL1996]_
+ """
+ if method == 'eg':
+ valid = is_valid_degree_sequence_erdos_gallai(list(sequence))
+ elif method == 'hh':
+ valid = is_valid_degree_sequence_havel_hakimi(list(sequence))
+ else:
+ msg = "`method` must be 'eg' or 'hh'"
+ raise nx.NetworkXException(msg)
+ return valid
+
+is_valid_degree_sequence = is_graphical
+
+def _basic_graphical_tests(deg_sequence):
+ # Sort and perform some simple tests on the sequence
+ if not nx.utils.is_list_of_ints(deg_sequence):
+ raise nx.NetworkXUnfeasible
+ p = len(deg_sequence)
+ num_degs = [0]*p
+ dmax, dmin, dsum, n = 0, p, 0, 0
+ for d in deg_sequence:
+ # Reject if degree is negative or larger than the sequence length
+ if d<0 or d>=p:
+ raise nx.NetworkXUnfeasible
+ # Process only the non-zero integers
+ elif d>0:
+ dmax, dmin, dsum, n = max(dmax,d), min(dmin,d), dsum+d, n+1
+ num_degs[d] += 1
+ # Reject sequence if it has odd sum or is oversaturated
+ if dsum%2 or dsum>n*(n-1):
+ raise nx.NetworkXUnfeasible
+ return dmax,dmin,dsum,n,num_degs
+
+def is_valid_degree_sequence_havel_hakimi(deg_sequence):
+ r"""Returns True if deg_sequence can be realized by a simple graph.
+
+ The validation proceeds using the Havel-Hakimi theorem.
+ Worst-case run time is: O(s) where s is the sum of the sequence.
+
+ Parameters
+ ----------
+ deg_sequence : list
+ A list of integers where each element specifies the degree of a node
+ in a graph.
+
+ Returns
+ -------
+ valid : bool
+ True if deg_sequence is graphical and False if not.
+
+ Notes
+ -----
+ The ZZ condition says that for the sequence d if
+
+ .. math::
+ |d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
+
+ then d is graphical. This was shown in Theorem 6 in [1]_.
+
+ References
+ ----------
+ .. [1] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
+ of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
+
+ [havel1955]_, [hakimi1962]_, [CL1996]_
+
+ """
+ try:
+ dmax,dmin,dsum,n,num_degs = _basic_graphical_tests(deg_sequence)
+ except nx.NetworkXUnfeasible:
+ return False
+ # Accept if sequence has no non-zero degrees or passes the ZZ condition
+ if n==0 or 4*dmin*n >= (dmax+dmin+1) * (dmax+dmin+1):
+ return True
+
+ modstubs = [0]*(dmax+1)
+ # Successively reduce degree sequence by removing the maximum degree
+ while n > 0:
+ # Retrieve the maximum degree in the sequence
+ while num_degs[dmax] == 0:
+ dmax -= 1;
+ # If there are not enough stubs to connect to, then the sequence is
+ # not graphical
+ if dmax > n-1:
+ return False
+
+ # Remove largest stub in list
+ num_degs[dmax], n = num_degs[dmax]-1, n-1
+ # Reduce the next dmax largest stubs
+ mslen = 0
+ k = dmax
+ for i in range(dmax):
+ while num_degs[k] == 0:
+ k -= 1
+ num_degs[k], n = num_degs[k]-1, n-1
+ if k > 1:
+ modstubs[mslen] = k-1
+ mslen += 1
+ # Add back to the list any non-zero stubs that were removed
+ for i in range(mslen):
+ stub = modstubs[i]
+ num_degs[stub], n = num_degs[stub]+1, n+1
+ return True
+
+
+def is_valid_degree_sequence_erdos_gallai(deg_sequence):
+ r"""Returns True if deg_sequence can be realized by a simple graph.
+
+ The validation is done using the Erdős-Gallai theorem [EG1960]_.
+
+ Parameters
+ ----------
+ deg_sequence : list
+ A list of integers
+
+ Returns
+ -------
+ valid : bool
+ True if deg_sequence is graphical and False if not.
+
+ Notes
+ -----
+
+ This implementation uses an equivalent form of the Erdős-Gallai criterion.
+ Worst-case run time is: O(n) where n is the length of the sequence.
+
+ Specifically, a sequence d is graphical if and only if the
+ sum of the sequence is even and for all strong indices k in the sequence,
+
+ .. math::
+
+ \sum_{i=1}^{k} d_i \leq k(k-1) + \sum_{j=k+1}^{n} \min(d_i,k)
+ = k(n-1) - ( k \sum_{j=0}^{k-1} n_j - \sum_{j=0}^{k-1} j n_j )
+
+ A strong index k is any index where `d_k \geq k` and the value `n_j` is the
+ number of occurrences of j in d. The maximal strong index is called the
+ Durfee index.
+
+ This particular rearrangement comes from the proof of Theorem 3 in [2]_.
+
+ The ZZ condition says that for the sequence d if
+
+ .. math::
+ |d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
+
+ then d is graphical. This was shown in Theorem 6 in [2]_.
+
+ References
+ ----------
+ .. [1] A. Tripathi and S. Vijay. "A note on a theorem of Erdős & Gallai",
+ Discrete Mathematics, 265, pp. 417-420 (2003).
+ .. [2] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
+ of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
+
+ [EG1960]_, [choudum1986]_
+ """
+ try:
+ dmax,dmin,dsum,n,num_degs = _basic_graphical_tests(deg_sequence)
+ except nx.NetworkXUnfeasible:
+ return False
+ # Accept if sequence has no non-zero degrees or passes the ZZ condition
+ if n==0 or 4*dmin*n >= (dmax+dmin+1) * (dmax+dmin+1):
+ return True
+
+ # Perform the EG checks using the reformulation of Zverovich and Zverovich
+ k, sum_deg, sum_nj, sum_jnj = 0, 0, 0, 0
+ for dk in range(dmax, dmin-1, -1):
+ if dk < k+1: # Check if already past Durfee index
+ return True
+ if num_degs[dk] > 0:
+ run_size = num_degs[dk] # Process a run of identical-valued degrees
+ if dk < k+run_size: # Check if end of run is past Durfee index
+ run_size = dk-k # Adjust back to Durfee index
+ sum_deg += run_size * dk
+ for v in range(run_size):
+ sum_nj += num_degs[k+v]
+ sum_jnj += (k+v) * num_degs[k+v]
+ k += run_size
+ if sum_deg > k*(n-1) - k*sum_nj + sum_jnj:
+ return False
+ return True
+
+def is_multigraphical(sequence):
+ """Returns True if some multigraph can realize the sequence.
+
+ Parameters
+ ----------
+ deg_sequence : list
+ A list of integers
+
+ Returns
+ -------
+ valid : bool
+ True if deg_sequence is a multigraphic degree sequence and False if not.
+
+ Notes
+ -----
+ The worst-case run time is O(n) where n is the length of the sequence.
+
+ References
+ ----------
+ .. [1] S. L. Hakimi. "On the realizability of a set of integers as
+ degrees of the vertices of a linear graph", J. SIAM, 10, pp. 496-506
+ (1962).
+ """
+ deg_sequence = list(sequence)
+ if not nx.utils.is_list_of_ints(deg_sequence):
+ return False
+ dsum, dmax = 0, 0
+ for d in deg_sequence:
+ if d<0:
+ return False
+ dsum, dmax = dsum+d, max(dmax,d)
+ if dsum%2 or dsum<2*dmax:
+ return False
+ return True
+
+def is_pseudographical(sequence):
+ """Returns True if some pseudograph can realize the sequence.
+
+ Every nonnegative integer sequence with an even sum is pseudographical
+ (see [1]_).
+
+ Parameters
+ ----------
+ sequence : list or iterable container
+ A sequence of integer node degrees
+
+ Returns
+ -------
+ valid : bool
+ True if the sequence is a pseudographic degree sequence and False if not.
+
+ Notes
+ -----
+ The worst-case run time is O(n) where n is the length of the sequence.
+
+ References
+ ----------
+ .. [1] F. Boesch and F. Harary. "Line removal algorithms for graphs
+ and their degree lists", IEEE Trans. Circuits and Systems, CAS-23(12),
+ pp. 778-782 (1976).
+ """
+ s = list(sequence)
+ if not nx.utils.is_list_of_ints(s):
+ return False
+ return sum(s)%2 == 0 and min(s) >= 0
+
+def is_digraphical(in_sequence, out_sequence):
+ r"""Returns True if some directed graph can realize the in- and out-degree
+ sequences.
+
+ Parameters
+ ----------
+ in_sequence : list or iterable container
+ A sequence of integer node in-degrees
+
+ out_sequence : list or iterable container
+ A sequence of integer node out-degrees
+
+ Returns
+ -------
+ valid : bool
+ True if in and out-sequences are digraphic False if not.
+
+ Notes
+ -----
+ This algorithm is from Kleitman and Wang [1]_.
+ The worst case runtime is O(s * log n) where s and n are the sum and length
+ of the sequences respectively.
+
+ References
+ ----------
+ .. [1] D.J. Kleitman and D.L. Wang
+ Algorithms for Constructing Graphs and Digraphs with Given Valences
+ and Factors, Discrete Mathematics, 6(1), pp. 79-88 (1973)
+ """
+ in_deg_sequence = list(in_sequence)
+ out_deg_sequence = list(out_sequence)
+ if not nx.utils.is_list_of_ints(in_deg_sequence):
+ return False
+ if not nx.utils.is_list_of_ints(out_deg_sequence):
+ return False
+ # Process the sequences and form two heaps to store degree pairs with
+ # either zero or non-zero out degrees
+ sumin, sumout, nin, nout = 0, 0, len(in_deg_sequence), len(out_deg_sequence)
+ maxn = max(nin, nout)
+ maxin = 0
+ if maxn==0:
+ return True
+ stubheap, zeroheap = [ ], [ ]
+ for n in range(maxn):
+ in_deg, out_deg = 0, 0
+ if n<nout:
+ out_deg = out_deg_sequence[n]
+ if n<nin:
+ in_deg = in_deg_sequence[n]
+ if in_deg<0 or out_deg<0:
+ return False
+ sumin, sumout, maxin = sumin+in_deg, sumout+out_deg, max(maxin, in_deg)
+ if in_deg > 0:
+ stubheap.append((-1*out_deg, -1*in_deg))
+ elif out_deg > 0:
+ zeroheap.append(-1*out_deg)
+ if sumin != sumout:
+ return False
+ heapq.heapify(stubheap)
+ heapq.heapify(zeroheap)
+
+ modstubs = [(0,0)]*(maxin+1)
+ # Successively reduce degree sequence by removing the maximum out degree
+ while stubheap:
+ # Take the first value in the sequence with non-zero in degree
+ (freeout, freein) = heapq.heappop( stubheap )
+ freein *= -1
+ if freein > len(stubheap)+len(zeroheap):
+ return False
+
+ # Attach out stubs to the nodes with the most in stubs
+ mslen = 0
+ for i in range(freein):
+ if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0]):
+ stubout = heapq.heappop(zeroheap)
+ stubin = 0
+ else:
+ (stubout, stubin) = heapq.heappop(stubheap)
+ if stubout == 0:
+ return False
+ # Check if target is now totally connected
+ if stubout+1<0 or stubin<0:
+ modstubs[mslen] = (stubout+1, stubin)
+ mslen += 1
+
+ # Add back the nodes to the heap that still have available stubs
+ for i in range(mslen):
+ stub = modstubs[i]
+ if stub[1] < 0:
+ heapq.heappush(stubheap, stub)
+ else:
+ heapq.heappush(zeroheap, stub[0])
+ if freeout<0:
+ heapq.heappush(zeroheap, freeout)
+ return True
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/hierarchy.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/hierarchy.py
new file mode 100644
index 0000000..c38337b
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/hierarchy.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+"""
+Flow Hierarchy.
+"""
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+__authors__ = "\n".join(['Ben Edwards (bedwards@cs.unm.edu)'])
+__all__ = ['flow_hierarchy']
+
+def flow_hierarchy(G, weight=None):
+ """Returns the flow hierarchy of a directed network.
+
+ Flow hierarchy is defined as the fraction of edges not participating
+ in cycles in a directed graph [1]_.
+
+ Parameters
+ ----------
+ G : DiGraph or MultiDiGraph
+ A directed graph
+
+ weight : key,optional (default=None)
+ Attribute to use for node weights. If None the weight defaults to 1.
+
+ Returns
+ -------
+ h : float
+ Flow heirarchy value
+
+ Notes
+ -----
+ The algorithm described in [1]_ computes the flow hierarchy through
+ exponentiation of the adjacency matrix. This function implements an
+ alternative approach that finds strongly connected components.
+ An edge is in a cycle if and only if it is in a strongly connected
+ component, which can be found in `O(m)` time using Tarjan's algorithm.
+
+ References
+ ----------
+ .. [1] Luo, J.; Magee, C.L. (2011),
+ Detecting evolving patterns of self-organizing networks by flow
+ hierarchy measurement, Complexity, Volume 16 Issue 6 53-61.
+ DOI: 10.1002/cplx.20368
+ http://web.mit.edu/~cmagee/www/documents/28-DetectingEvolvingPatterns_FlowHierarchy.pdf
+ """
+ if not G.is_directed():
+ raise nx.NetworkXError("G must be a digraph in flow_heirarchy")
+ scc = nx.strongly_connected_components(G)
+ return 1.-sum(G.subgraph(c).size(weight) for c in scc)/float(G.size(weight))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isolate.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isolate.py
new file mode 100644
index 0000000..a14178b
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isolate.py
@@ -0,0 +1,77 @@
+# encoding: utf-8
+"""
+Functions for identifying isolate (degree zero) nodes.
+"""
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+__author__ = """\n""".join(['Drew Conway <drew.conway@nyu.edu>',
+ 'Aric Hagberg <hagberg@lanl.gov>'])
+__all__=['is_isolate','isolates']
+
+def is_isolate(G,n):
+ """Determine of node n is an isolate (degree zero).
+
+ Parameters
+ ----------
+ G : graph
+ A networkx graph
+ n : node
+ A node in G
+
+ Returns
+ -------
+ isolate : bool
+ True if n has no neighbors, False otherwise.
+
+ Examples
+ --------
+ >>> G=nx.Graph()
+ >>> G.add_edge(1,2)
+ >>> G.add_node(3)
+ >>> nx.is_isolate(G,2)
+ False
+ >>> nx.is_isolate(G,3)
+ True
+ """
+ return G.degree(n)==0
+
+def isolates(G):
+ """Return list of isolates in the graph.
+
+ Isolates are nodes with no neighbors (degree zero).
+
+ Parameters
+ ----------
+ G : graph
+ A networkx graph
+
+ Returns
+ -------
+ isolates : list
+ List of isolate nodes.
+
+ Examples
+ --------
+ >>> G = nx.Graph()
+ >>> G.add_edge(1,2)
+ >>> G.add_node(3)
+ >>> nx.isolates(G)
+ [3]
+
+ To remove all isolates in the graph use
+ >>> G.remove_nodes_from(nx.isolates(G))
+ >>> G.nodes()
+ [1, 2]
+
+ For digraphs isolates have zero in-degree and zero out_degre
+ >>> G = nx.DiGraph([(0,1),(1,2)])
+ >>> G.add_node(3)
+ >>> nx.isolates(G)
+ [3]
+ """
+ return [n for (n,d) in G.degree_iter() if d==0]
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/__init__.py
new file mode 100644
index 0000000..7821bc2
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/__init__.py
@@ -0,0 +1,4 @@
+from networkx.algorithms.isomorphism.isomorph import *
+from networkx.algorithms.isomorphism.vf2userfunc import *
+from networkx.algorithms.isomorphism.matchhelpers import *
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/isomorph.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/isomorph.py
new file mode 100644
index 0000000..7de3308
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/isomorph.py
@@ -0,0 +1,227 @@
+"""
+Graph isomorphism functions.
+"""
+import networkx as nx
+from networkx.exception import NetworkXError
+__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Christopher Ellison cellison@cse.ucdavis.edu)'])
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+__all__ = ['could_be_isomorphic',
+ 'fast_could_be_isomorphic',
+ 'faster_could_be_isomorphic',
+ 'is_isomorphic']
+
+def could_be_isomorphic(G1,G2):
+ """Returns False if graphs are definitely not isomorphic.
+ True does NOT guarantee isomorphism.
+
+ Parameters
+ ----------
+ G1, G2 : graphs
+ The two graphs G1 and G2 must be the same type.
+
+ Notes
+ -----
+ Checks for matching degree, triangle, and number of cliques sequences.
+ """
+
+ # Check global properties
+ if G1.order() != G2.order(): return False
+
+ # Check local properties
+ d1=G1.degree()
+ t1=nx.triangles(G1)
+ c1=nx.number_of_cliques(G1)
+ props1=[ [d1[v], t1[v], c1[v]] for v in d1 ]
+ props1.sort()
+
+ d2=G2.degree()
+ t2=nx.triangles(G2)
+ c2=nx.number_of_cliques(G2)
+ props2=[ [d2[v], t2[v], c2[v]] for v in d2 ]
+ props2.sort()
+
+ if props1 != props2:
+ return False
+
+ # OK...
+ return True
+
+graph_could_be_isomorphic=could_be_isomorphic
+
+def fast_could_be_isomorphic(G1,G2):
+ """Returns False if graphs are definitely not isomorphic.
+
+ True does NOT guarantee isomorphism.
+
+ Parameters
+ ----------
+ G1, G2 : graphs
+ The two graphs G1 and G2 must be the same type.
+
+ Notes
+ -----
+ Checks for matching degree and triangle sequences.
+ """
+ # Check global properties
+ if G1.order() != G2.order(): return False
+
+ # Check local properties
+ d1=G1.degree()
+ t1=nx.triangles(G1)
+ props1=[ [d1[v], t1[v]] for v in d1 ]
+ props1.sort()
+
+ d2=G2.degree()
+ t2=nx.triangles(G2)
+ props2=[ [d2[v], t2[v]] for v in d2 ]
+ props2.sort()
+
+ if props1 != props2: return False
+
+ # OK...
+ return True
+
+fast_graph_could_be_isomorphic=fast_could_be_isomorphic
+
+def faster_could_be_isomorphic(G1,G2):
+ """Returns False if graphs are definitely not isomorphic.
+
+ True does NOT guarantee isomorphism.
+
+ Parameters
+ ----------
+ G1, G2 : graphs
+ The two graphs G1 and G2 must be the same type.
+
+ Notes
+ -----
+ Checks for matching degree sequences.
+ """
+ # Check global properties
+ if G1.order() != G2.order(): return False
+
+ # Check local properties
+ d1=list(G1.degree().values())
+ d1.sort()
+ d2=list(G2.degree().values())
+ d2.sort()
+
+ if d1 != d2: return False
+
+ # OK...
+ return True
+
+faster_graph_could_be_isomorphic=faster_could_be_isomorphic
+
+def is_isomorphic(G1, G2, node_match=None, edge_match=None):
+ """Returns True if the graphs G1 and G2 are isomorphic and False otherwise.
+
+ Parameters
+ ----------
+ G1, G2: graphs
+ The two graphs G1 and G2 must be the same type.
+
+ node_match : callable
+ A function that returns True if node n1 in G1 and n2 in G2 should
+ be considered equal during the isomorphism test.
+ If node_match is not specified then node attributes are not considered.
+
+ The function will be called like
+
+ node_match(G1.node[n1], G2.node[n2]).
+
+ That is, the function will receive the node attribute dictionaries
+ for n1 and n2 as inputs.
+
+ edge_match : callable
+ A function that returns True if the edge attribute dictionary
+ for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should
+ be considered equal during the isomorphism test. If edge_match is
+ not specified then edge attributes are not considered.
+
+ The function will be called like
+
+ edge_match(G1[u1][v1], G2[u2][v2]).
+
+ That is, the function will receive the edge attribute dictionaries
+ of the edges under consideration.
+
+ Notes
+ -----
+ Uses the vf2 algorithm [1]_.
+
+ Examples
+ --------
+ >>> import networkx.algorithms.isomorphism as iso
+
+ For digraphs G1 and G2, using 'weight' edge attribute (default: 1)
+
+ >>> G1 = nx.DiGraph()
+ >>> G2 = nx.DiGraph()
+ >>> G1.add_path([1,2,3,4],weight=1)
+ >>> G2.add_path([10,20,30,40],weight=2)
+ >>> em = iso.numerical_edge_match('weight', 1)
+ >>> nx.is_isomorphic(G1, G2) # no weights considered
+ True
+ >>> nx.is_isomorphic(G1, G2, edge_match=em) # match weights
+ False
+
+ For multidigraphs G1 and G2, using 'fill' node attribute (default: '')
+
+ >>> G1 = nx.MultiDiGraph()
+ >>> G2 = nx.MultiDiGraph()
+ >>> G1.add_nodes_from([1,2,3],fill='red')
+ >>> G2.add_nodes_from([10,20,30,40],fill='red')
+ >>> G1.add_path([1,2,3,4],weight=3, linewidth=2.5)
+ >>> G2.add_path([10,20,30,40],weight=3)
+ >>> nm = iso.categorical_node_match('fill', 'red')
+ >>> nx.is_isomorphic(G1, G2, node_match=nm)
+ True
+
+ For multidigraphs G1 and G2, using 'weight' edge attribute (default: 7)
+
+ >>> G1.add_edge(1,2, weight=7)
+ >>> G2.add_edge(10,20)
+ >>> em = iso.numerical_multiedge_match('weight', 7, rtol=1e-6)
+ >>> nx.is_isomorphic(G1, G2, edge_match=em)
+ True
+
+ For multigraphs G1 and G2, using 'weight' and 'linewidth' edge attributes
+ with default values 7 and 2.5. Also using 'fill' node attribute with
+ default value 'red'.
+
+ >>> em = iso.numerical_multiedge_match(['weight', 'linewidth'], [7, 2.5])
+ >>> nm = iso.categorical_node_match('fill', 'red')
+ >>> nx.is_isomorphic(G1, G2, edge_match=em, node_match=nm)
+ True
+
+ See Also
+ --------
+ numerical_node_match, numerical_edge_match, numerical_multiedge_match
+ categorical_node_match, categorical_edge_match, categorical_multiedge_match
+
+ References
+ ----------
+ .. [1] L. P. Cordella, P. Foggia, C. Sansone, M. Vento,
+ "An Improved Algorithm for Matching Large Graphs",
+ 3rd IAPR-TC15 Workshop on Graph-based Representations in
+ Pattern Recognition, Cuen, pp. 149-159, 2001.
+ http://amalfi.dis.unina.it/graph/db/papers/vf-algorithm.pdf
+ """
+ if G1.is_directed() and G2.is_directed():
+ GM = nx.algorithms.isomorphism.DiGraphMatcher
+ elif (not G1.is_directed()) and (not G2.is_directed()):
+ GM = nx.algorithms.isomorphism.GraphMatcher
+ else:
+ raise NetworkXError("Graphs G1 and G2 are not of the same type.")
+
+ gm = GM(G1, G2, node_match=node_match, edge_match=edge_match)
+
+ return gm.is_isomorphic()
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/isomorphvf2.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/isomorphvf2.py
new file mode 100644
index 0000000..1efe74d
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/isomorphvf2.py
@@ -0,0 +1,965 @@
+# -*- coding: utf-8 -*-
+"""
+*************
+VF2 Algorithm
+*************
+
+An implementation of VF2 algorithm for graph ismorphism testing.
+
+The simplest interface to use this module is to call networkx.is_isomorphic().
+
+Introduction
+------------
+
+The GraphMatcher and DiGraphMatcher are responsible for matching
+graphs or directed graphs in a predetermined manner. This
+usually means a check for an isomorphism, though other checks
+are also possible. For example, a subgraph of one graph
+can be checked for isomorphism to a second graph.
+
+Matching is done via syntactic feasibility. It is also possible
+to check for semantic feasibility. Feasibility, then, is defined
+as the logical AND of the two functions.
+
+To include a semantic check, the (Di)GraphMatcher class should be
+subclassed, and the semantic_feasibility() function should be
+redefined. By default, the semantic feasibility function always
+returns True. The effect of this is that semantics are not
+considered in the matching of G1 and G2.
+
+Examples
+--------
+
+Suppose G1 and G2 are isomorphic graphs. Verification is as follows:
+
+>>> from networkx.algorithms import isomorphism
+>>> G1 = nx.path_graph(4)
+>>> G2 = nx.path_graph(4)
+>>> GM = isomorphism.GraphMatcher(G1,G2)
+>>> GM.is_isomorphic()
+True
+
+GM.mapping stores the isomorphism mapping from G1 to G2.
+
+>>> GM.mapping
+{0: 0, 1: 1, 2: 2, 3: 3}
+
+
+Suppose G1 and G2 are isomorphic directed graphs
+graphs. Verification is as follows:
+
+>>> G1 = nx.path_graph(4, create_using=nx.DiGraph())
+>>> G2 = nx.path_graph(4, create_using=nx.DiGraph())
+>>> DiGM = isomorphism.DiGraphMatcher(G1,G2)
+>>> DiGM.is_isomorphic()
+True
+
+DiGM.mapping stores the isomorphism mapping from G1 to G2.
+
+>>> DiGM.mapping
+{0: 0, 1: 1, 2: 2, 3: 3}
+
+
+
+Subgraph Isomorphism
+--------------------
+Graph theory literature can be ambiguious about the meaning of the
+above statement, and we seek to clarify it now.
+
+In the VF2 literature, a mapping M is said to be a graph-subgraph
+isomorphism iff M is an isomorphism between G2 and a subgraph of G1.
+Thus, to say that G1 and G2 are graph-subgraph isomorphic is to say
+that a subgraph of G1 is isomorphic to G2.
+
+Other literature uses the phrase 'subgraph isomorphic' as in 'G1 does
+not have a subgraph isomorphic to G2'. Another use is as an in adverb
+for isomorphic. Thus, to say that G1 and G2 are subgraph isomorphic
+is to say that a subgraph of G1 is isomorphic to G2.
+
+Finally, the term 'subgraph' can have multiple meanings. In this
+context, 'subgraph' always means a 'node-induced subgraph'. Edge-induced
+subgraph isomorphisms are not directly supported, but one should be
+able to perform the check by making use of nx.line_graph(). For
+subgraphs which are not induced, the term 'monomorphism' is preferred
+over 'isomorphism'. Currently, it is not possible to check for
+monomorphisms.
+
+Let G=(N,E) be a graph with a set of nodes N and set of edges E.
+
+If G'=(N',E') is a subgraph, then:
+ N' is a subset of N
+ E' is a subset of E
+
+If G'=(N',E') is a node-induced subgraph, then:
+ N' is a subset of N
+ E' is the subset of edges in E relating nodes in N'
+
+If G'=(N',E') is an edge-induced subgrpah, then:
+ N' is the subset of nodes in N related by edges in E'
+ E' is a subset of E
+
+References
+----------
+[1] Luigi P. Cordella, Pasquale Foggia, Carlo Sansone, Mario Vento,
+ "A (Sub)Graph Isomorphism Algorithm for Matching Large Graphs",
+ IEEE Transactions on Pattern Analysis and Machine Intelligence,
+ vol. 26, no. 10, pp. 1367-1372, Oct., 2004.
+ http://ieeexplore.ieee.org/iel5/34/29305/01323804.pdf
+
+[2] L. P. Cordella, P. Foggia, C. Sansone, M. Vento, "An Improved
+ Algorithm for Matching Large Graphs", 3rd IAPR-TC15 Workshop
+ on Graph-based Representations in Pattern Recognition, Cuen,
+ pp. 149-159, 2001.
+ http://amalfi.dis.unina.it/graph/db/papers/vf-algorithm.pdf
+
+See Also
+--------
+syntactic_feasibliity(), semantic_feasibility()
+
+Notes
+-----
+Modified to handle undirected graphs.
+Modified to handle multiple edges.
+
+
+In general, this problem is NP-Complete.
+
+
+
+"""
+
+# Copyright (C) 2007-2009 by the NetworkX maintainers
+# All rights reserved.
+# BSD license.
+
+# This work was originally coded by Christopher Ellison
+# as part of the Computational Mechanics Python (CMPy) project.
+# James P. Crutchfield, principal investigator.
+# Complexity Sciences Center and Physics Department, UC Davis.
+
+import sys
+import networkx as nx
+
+__all__ = ['GraphMatcher',
+ 'DiGraphMatcher']
+
+class GraphMatcher(object):
+ """Implementation of VF2 algorithm for matching undirected graphs.
+
+ Suitable for Graph and MultiGraph instances.
+ """
+ def __init__(self, G1, G2):
+ """Initialize GraphMatcher.
+
+ Parameters
+ ----------
+ G1,G2: NetworkX Graph or MultiGraph instances.
+ The two graphs to check for isomorphism.
+
+ Examples
+ --------
+ To create a GraphMatcher which checks for syntactic feasibility:
+
+ >>> from networkx.algorithms import isomorphism
+ >>> G1 = nx.path_graph(4)
+ >>> G2 = nx.path_graph(4)
+ >>> GM = isomorphism.GraphMatcher(G1,G2)
+ """
+ self.G1 = G1
+ self.G2 = G2
+ self.G1_nodes = set(G1.nodes())
+ self.G2_nodes = set(G2.nodes())
+
+ # Set recursion limit.
+ self.old_recursion_limit = sys.getrecursionlimit()
+ expected_max_recursion_level = len(self.G2)
+ if self.old_recursion_limit < 1.5 * expected_max_recursion_level:
+ # Give some breathing room.
+ sys.setrecursionlimit(int(1.5 * expected_max_recursion_level))
+
+ # Declare that we will be searching for a graph-graph isomorphism.
+ self.test = 'graph'
+
+ # Initialize state
+ self.initialize()
+
+ def reset_recursion_limit(self):
+ """Restores the recursion limit."""
+ ### TODO:
+ ### Currently, we use recursion and set the recursion level higher.
+ ### It would be nice to restore the level, but because the
+ ### (Di)GraphMatcher classes make use of cyclic references, garbage
+ ### collection will never happen when we define __del__() to
+ ### restore the recursion level. The result is a memory leak.
+ ### So for now, we do not automatically restore the recursion level,
+ ### and instead provide a method to do this manually. Eventually,
+ ### we should turn this into a non-recursive implementation.
+ sys.setrecursionlimit(self.old_recursion_limit)
+
+ def candidate_pairs_iter(self):
+ """Iterator over candidate pairs of nodes in G1 and G2."""
+
+ # All computations are done using the current state!
+
+ G1_nodes = self.G1_nodes
+ G2_nodes = self.G2_nodes
+
+ # First we compute the inout-terminal sets.
+ T1_inout = [node for node in G1_nodes if (node in self.inout_1) and (node not in self.core_1)]
+ T2_inout = [node for node in G2_nodes if (node in self.inout_2) and (node not in self.core_2)]
+
+ # If T1_inout and T2_inout are both nonempty.
+ # P(s) = T1_inout x {min T2_inout}
+ if T1_inout and T2_inout:
+ for node in T1_inout:
+ yield node, min(T2_inout)
+
+ else:
+ # If T1_inout and T2_inout were both empty....
+ # P(s) = (N_1 - M_1) x {min (N_2 - M_2)}
+ ##if not (T1_inout or T2_inout): # as suggested by [2], incorrect
+ if 1: # as inferred from [1], correct
+ # First we determine the candidate node for G2
+ other_node = min(G2_nodes - set(self.core_2))
+ for node in self.G1:
+ if node not in self.core_1:
+ yield node, other_node
+
+ # For all other cases, we don't have any candidate pairs.
+
+ def initialize(self):
+ """Reinitializes the state of the algorithm.
+
+ This method should be redefined if using something other than GMState.
+ If only subclassing GraphMatcher, a redefinition is not necessary.
+
+ """
+
+ # core_1[n] contains the index of the node paired with n, which is m,
+ # provided n is in the mapping.
+ # core_2[m] contains the index of the node paired with m, which is n,
+ # provided m is in the mapping.
+ self.core_1 = {}
+ self.core_2 = {}
+
+ # See the paper for definitions of M_x and T_x^{y}
+
+ # inout_1[n] is non-zero if n is in M_1 or in T_1^{inout}
+ # inout_2[m] is non-zero if m is in M_2 or in T_2^{inout}
+ #
+ # The value stored is the depth of the SSR tree when the node became
+ # part of the corresponding set.
+ self.inout_1 = {}
+ self.inout_2 = {}
+ # Practically, these sets simply store the nodes in the subgraph.
+
+ self.state = GMState(self)
+
+ # Provide a convienient way to access the isomorphism mapping.
+ self.mapping = self.core_1.copy()
+
+ def is_isomorphic(self):
+ """Returns True if G1 and G2 are isomorphic graphs."""
+
+ # Let's do two very quick checks!
+ # QUESTION: Should we call faster_graph_could_be_isomorphic(G1,G2)?
+ # For now, I just copy the code.
+
+ # Check global properties
+ if self.G1.order() != self.G2.order(): return False
+
+ # Check local properties
+ d1=sorted(self.G1.degree().values())
+ d2=sorted(self.G2.degree().values())
+ if d1 != d2: return False
+
+ try:
+ x = next(self.isomorphisms_iter())
+ return True
+ except StopIteration:
+ return False
+
+ def isomorphisms_iter(self):
+ """Generator over isomorphisms between G1 and G2."""
+ # Declare that we are looking for a graph-graph isomorphism.
+ self.test = 'graph'
+ self.initialize()
+ for mapping in self.match():
+ yield mapping
+
+ def match(self):
+ """Extends the isomorphism mapping.
+
+ This function is called recursively to determine if a complete
+ isomorphism can be found between G1 and G2. It cleans up the class
+ variables after each recursive call. If an isomorphism is found,
+ we yield the mapping.
+
+ """
+ if len(self.core_1) == len(self.G2):
+ # Save the final mapping, otherwise garbage collection deletes it.
+ self.mapping = self.core_1.copy()
+ # The mapping is complete.
+ yield self.mapping
+ else:
+ for G1_node, G2_node in self.candidate_pairs_iter():
+ if self.syntactic_feasibility(G1_node, G2_node):
+ if self.semantic_feasibility(G1_node, G2_node):
+ # Recursive call, adding the feasible state.
+ newstate = self.state.__class__(self, G1_node, G2_node)
+ for mapping in self.match():
+ yield mapping
+
+ # restore data structures
+ newstate.restore()
+
+ def semantic_feasibility(self, G1_node, G2_node):
+ """Returns True if adding (G1_node, G2_node) is symantically feasible.
+
+ The semantic feasibility function should return True if it is
+ acceptable to add the candidate pair (G1_node, G2_node) to the current
+ partial isomorphism mapping. The logic should focus on semantic
+ information contained in the edge data or a formalized node class.
+
+ By acceptable, we mean that the subsequent mapping can still become a
+ complete isomorphism mapping. Thus, if adding the candidate pair
+ definitely makes it so that the subsequent mapping cannot become a
+ complete isomorphism mapping, then this function must return False.
+
+ The default semantic feasibility function always returns True. The
+ effect is that semantics are not considered in the matching of G1
+ and G2.
+
+ The semantic checks might differ based on the what type of test is
+ being performed. A keyword description of the test is stored in
+ self.test. Here is a quick description of the currently implemented
+ tests::
+
+ test='graph'
+ Indicates that the graph matcher is looking for a graph-graph
+ isomorphism.
+
+ test='subgraph'
+ Indicates that the graph matcher is looking for a subgraph-graph
+ isomorphism such that a subgraph of G1 is isomorphic to G2.
+
+ Any subclass which redefines semantic_feasibility() must maintain
+ the above form to keep the match() method functional. Implementations
+ should consider multigraphs.
+ """
+ return True
+
+ def subgraph_is_isomorphic(self):
+ """Returns True if a subgraph of G1 is isomorphic to G2."""
+ try:
+ x = next(self.subgraph_isomorphisms_iter())
+ return True
+ except StopIteration:
+ return False
+
+# subgraph_is_isomorphic.__doc__ += "\n" + subgraph.replace('\n','\n'+indent)
+
+ def subgraph_isomorphisms_iter(self):
+ """Generator over isomorphisms between a subgraph of G1 and G2."""
+ # Declare that we are looking for graph-subgraph isomorphism.
+ self.test = 'subgraph'
+ self.initialize()
+ for mapping in self.match():
+ yield mapping
+
+# subgraph_isomorphisms_iter.__doc__ += "\n" + subgraph.replace('\n','\n'+indent)
+
+ def syntactic_feasibility(self, G1_node, G2_node):
+ """Returns True if adding (G1_node, G2_node) is syntactically feasible.
+
+ This function returns True if it is adding the candidate pair
+ to the current partial isomorphism mapping is allowable. The addition
+ is allowable if the inclusion of the candidate pair does not make it
+ impossible for an isomorphism to be found.
+ """
+
+ # The VF2 algorithm was designed to work with graphs having, at most,
+ # one edge connecting any two nodes. This is not the case when
+ # dealing with an MultiGraphs.
+ #
+ # Basically, when we test the look-ahead rules R_neighbor, we will
+ # make sure that the number of edges are checked. We also add
+ # a R_self check to verify that the number of selfloops is acceptable.
+ #
+ # Users might be comparing Graph instances with MultiGraph instances.
+ # So the generic GraphMatcher class must work with MultiGraphs.
+ # Care must be taken since the value in the innermost dictionary is a
+ # singlet for Graph instances. For MultiGraphs, the value in the
+ # innermost dictionary is a list.
+
+
+ ###
+ ### Test at each step to get a return value as soon as possible.
+ ###
+
+ ### Look ahead 0
+
+ # R_self
+
+ # The number of selfloops for G1_node must equal the number of
+ # self-loops for G2_node. Without this check, we would fail on
+ # R_neighbor at the next recursion level. But it is good to prune the
+ # search tree now.
+ if self.G1.number_of_edges(G1_node,G1_node) != self.G2.number_of_edges(G2_node,G2_node):
+ return False
+
+
+ # R_neighbor
+
+ # For each neighbor n' of n in the partial mapping, the corresponding
+ # node m' is a neighbor of m, and vice versa. Also, the number of
+ # edges must be equal.
+ for neighbor in self.G1[G1_node]:
+ if neighbor in self.core_1:
+ if not (self.core_1[neighbor] in self.G2[G2_node]):
+ return False
+ elif self.G1.number_of_edges(neighbor, G1_node) != self.G2.number_of_edges(self.core_1[neighbor], G2_node):
+ return False
+ for neighbor in self.G2[G2_node]:
+ if neighbor in self.core_2:
+ if not (self.core_2[neighbor] in self.G1[G1_node]):
+ return False
+ elif self.G1.number_of_edges(self.core_2[neighbor], G1_node) != self.G2.number_of_edges(neighbor, G2_node):
+ return False
+
+ ### Look ahead 1
+
+ # R_terminout
+ # The number of neighbors of n that are in T_1^{inout} is equal to the
+ # number of neighbors of m that are in T_2^{inout}, and vice versa.
+ num1 = 0
+ for neighbor in self.G1[G1_node]:
+ if (neighbor in self.inout_1) and (neighbor not in self.core_1):
+ num1 += 1
+ num2 = 0
+ for neighbor in self.G2[G2_node]:
+ if (neighbor in self.inout_2) and (neighbor not in self.core_2):
+ num2 += 1
+ if self.test == 'graph':
+ if not (num1 == num2):
+ return False
+ else: # self.test == 'subgraph'
+ if not (num1 >= num2):
+ return False
+
+
+ ### Look ahead 2
+
+ # R_new
+
+ # The number of neighbors of n that are neither in the core_1 nor
+ # T_1^{inout} is equal to the number of neighbors of m
+ # that are neither in core_2 nor T_2^{inout}.
+ num1 = 0
+ for neighbor in self.G1[G1_node]:
+ if neighbor not in self.inout_1:
+ num1 += 1
+ num2 = 0
+ for neighbor in self.G2[G2_node]:
+ if neighbor not in self.inout_2:
+ num2 += 1
+ if self.test == 'graph':
+ if not (num1 == num2):
+ return False
+ else: # self.test == 'subgraph'
+ if not (num1 >= num2):
+ return False
+
+ # Otherwise, this node pair is syntactically feasible!
+ return True
+
+
+class DiGraphMatcher(GraphMatcher):
+ """Implementation of VF2 algorithm for matching directed graphs.
+
+ Suitable for DiGraph and MultiDiGraph instances.
+ """
+# __doc__ += "Notes\n%s-----" % (indent,) + sources.replace('\n','\n'+indent)
+
+ def __init__(self, G1, G2):
+ """Initialize DiGraphMatcher.
+
+ G1 and G2 should be nx.Graph or nx.MultiGraph instances.
+
+ Examples
+ --------
+ To create a GraphMatcher which checks for syntactic feasibility:
+
+ >>> from networkx.algorithms import isomorphism
+ >>> G1 = nx.DiGraph(nx.path_graph(4, create_using=nx.DiGraph()))
+ >>> G2 = nx.DiGraph(nx.path_graph(4, create_using=nx.DiGraph()))
+ >>> DiGM = isomorphism.DiGraphMatcher(G1,G2)
+ """
+ super(DiGraphMatcher, self).__init__(G1, G2)
+
+ def candidate_pairs_iter(self):
+ """Iterator over candidate pairs of nodes in G1 and G2."""
+
+ # All computations are done using the current state!
+
+ G1_nodes = self.G1_nodes
+ G2_nodes = self.G2_nodes
+
+ # First we compute the out-terminal sets.
+ T1_out = [node for node in G1_nodes if (node in self.out_1) and (node not in self.core_1)]
+ T2_out = [node for node in G2_nodes if (node in self.out_2) and (node not in self.core_2)]
+
+ # If T1_out and T2_out are both nonempty.
+ # P(s) = T1_out x {min T2_out}
+ if T1_out and T2_out:
+ node_2 = min(T2_out)
+ for node_1 in T1_out:
+ yield node_1, node_2
+
+ # If T1_out and T2_out were both empty....
+ # We compute the in-terminal sets.
+
+ ##elif not (T1_out or T2_out): # as suggested by [2], incorrect
+ else: # as suggested by [1], correct
+ T1_in = [node for node in G1_nodes if (node in self.in_1) and (node not in self.core_1)]
+ T2_in = [node for node in G2_nodes if (node in self.in_2) and (node not in self.core_2)]
+
+ # If T1_in and T2_in are both nonempty.
+ # P(s) = T1_out x {min T2_out}
+ if T1_in and T2_in:
+ node_2 = min(T2_in)
+ for node_1 in T1_in:
+ yield node_1, node_2
+
+ # If all terminal sets are empty...
+ # P(s) = (N_1 - M_1) x {min (N_2 - M_2)}
+
+ ##elif not (T1_in or T2_in): # as suggested by [2], incorrect
+ else: # as inferred from [1], correct
+ node_2 = min(G2_nodes - set(self.core_2))
+ for node_1 in G1_nodes:
+ if node_1 not in self.core_1:
+ yield node_1, node_2
+
+ # For all other cases, we don't have any candidate pairs.
+
+ def initialize(self):
+ """Reinitializes the state of the algorithm.
+
+ This method should be redefined if using something other than DiGMState.
+ If only subclassing GraphMatcher, a redefinition is not necessary.
+ """
+
+ # core_1[n] contains the index of the node paired with n, which is m,
+ # provided n is in the mapping.
+ # core_2[m] contains the index of the node paired with m, which is n,
+ # provided m is in the mapping.
+ self.core_1 = {}
+ self.core_2 = {}
+
+ # See the paper for definitions of M_x and T_x^{y}
+
+ # in_1[n] is non-zero if n is in M_1 or in T_1^{in}
+ # out_1[n] is non-zero if n is in M_1 or in T_1^{out}
+ #
+ # in_2[m] is non-zero if m is in M_2 or in T_2^{in}
+ # out_2[m] is non-zero if m is in M_2 or in T_2^{out}
+ #
+ # The value stored is the depth of the search tree when the node became
+ # part of the corresponding set.
+ self.in_1 = {}
+ self.in_2 = {}
+ self.out_1 = {}
+ self.out_2 = {}
+
+ self.state = DiGMState(self)
+
+ # Provide a convienient way to access the isomorphism mapping.
+ self.mapping = self.core_1.copy()
+
+ def syntactic_feasibility(self, G1_node, G2_node):
+ """Returns True if adding (G1_node, G2_node) is syntactically feasible.
+
+ This function returns True if it is adding the candidate pair
+ to the current partial isomorphism mapping is allowable. The addition
+ is allowable if the inclusion of the candidate pair does not make it
+ impossible for an isomorphism to be found.
+ """
+
+ # The VF2 algorithm was designed to work with graphs having, at most,
+ # one edge connecting any two nodes. This is not the case when
+ # dealing with an MultiGraphs.
+ #
+ # Basically, when we test the look-ahead rules R_pred and R_succ, we
+ # will make sure that the number of edges are checked. We also add
+ # a R_self check to verify that the number of selfloops is acceptable.
+
+ # Users might be comparing DiGraph instances with MultiDiGraph
+ # instances. So the generic DiGraphMatcher class must work with
+ # MultiDiGraphs. Care must be taken since the value in the innermost
+ # dictionary is a singlet for DiGraph instances. For MultiDiGraphs,
+ # the value in the innermost dictionary is a list.
+
+
+ ###
+ ### Test at each step to get a return value as soon as possible.
+ ###
+
+
+
+ ### Look ahead 0
+
+ # R_self
+
+ # The number of selfloops for G1_node must equal the number of
+ # self-loops for G2_node. Without this check, we would fail on R_pred
+ # at the next recursion level. This should prune the tree even further.
+
+ if self.G1.number_of_edges(G1_node,G1_node) != self.G2.number_of_edges(G2_node,G2_node):
+ return False
+
+
+ # R_pred
+
+ # For each predecessor n' of n in the partial mapping, the
+ # corresponding node m' is a predecessor of m, and vice versa. Also,
+ # the number of edges must be equal
+ for predecessor in self.G1.pred[G1_node]:
+ if predecessor in self.core_1:
+ if not (self.core_1[predecessor] in self.G2.pred[G2_node]):
+ return False
+ elif self.G1.number_of_edges(predecessor, G1_node) != self.G2.number_of_edges(self.core_1[predecessor], G2_node):
+ return False
+
+ for predecessor in self.G2.pred[G2_node]:
+ if predecessor in self.core_2:
+ if not (self.core_2[predecessor] in self.G1.pred[G1_node]):
+ return False
+ elif self.G1.number_of_edges(self.core_2[predecessor], G1_node) != self.G2.number_of_edges(predecessor, G2_node):
+ return False
+
+
+ # R_succ
+
+ # For each successor n' of n in the partial mapping, the corresponding
+ # node m' is a successor of m, and vice versa. Also, the number of
+ # edges must be equal.
+ for successor in self.G1[G1_node]:
+ if successor in self.core_1:
+ if not (self.core_1[successor] in self.G2[G2_node]):
+ return False
+ elif self.G1.number_of_edges(G1_node, successor) != self.G2.number_of_edges(G2_node, self.core_1[successor]):
+ return False
+
+ for successor in self.G2[G2_node]:
+ if successor in self.core_2:
+ if not (self.core_2[successor] in self.G1[G1_node]):
+ return False
+ elif self.G1.number_of_edges(G1_node, self.core_2[successor]) != self.G2.number_of_edges(G2_node, successor):
+ return False
+
+
+ ### Look ahead 1
+
+ # R_termin
+ # The number of predecessors of n that are in T_1^{in} is equal to the
+ # number of predecessors of m that are in T_2^{in}.
+ num1 = 0
+ for predecessor in self.G1.pred[G1_node]:
+ if (predecessor in self.in_1) and (predecessor not in self.core_1):
+ num1 += 1
+ num2 = 0
+ for predecessor in self.G2.pred[G2_node]:
+ if (predecessor in self.in_2) and (predecessor not in self.core_2):
+ num2 += 1
+ if self.test == 'graph':
+ if not (num1 == num2):
+ return False
+ else: # self.test == 'subgraph'
+ if not (num1 >= num2):
+ return False
+
+ # The number of successors of n that are in T_1^{in} is equal to the
+ # number of successors of m that are in T_2^{in}.
+ num1 = 0
+ for successor in self.G1[G1_node]:
+ if (successor in self.in_1) and (successor not in self.core_1):
+ num1 += 1
+ num2 = 0
+ for successor in self.G2[G2_node]:
+ if (successor in self.in_2) and (successor not in self.core_2):
+ num2 += 1
+ if self.test == 'graph':
+ if not (num1 == num2):
+ return False
+ else: # self.test == 'subgraph'
+ if not (num1 >= num2):
+ return False
+
+ # R_termout
+
+ # The number of predecessors of n that are in T_1^{out} is equal to the
+ # number of predecessors of m that are in T_2^{out}.
+ num1 = 0
+ for predecessor in self.G1.pred[G1_node]:
+ if (predecessor in self.out_1) and (predecessor not in self.core_1):
+ num1 += 1
+ num2 = 0
+ for predecessor in self.G2.pred[G2_node]:
+ if (predecessor in self.out_2) and (predecessor not in self.core_2):
+ num2 += 1
+ if self.test == 'graph':
+ if not (num1 == num2):
+ return False
+ else: # self.test == 'subgraph'
+ if not (num1 >= num2):
+ return False
+
+ # The number of successors of n that are in T_1^{out} is equal to the
+ # number of successors of m that are in T_2^{out}.
+ num1 = 0
+ for successor in self.G1[G1_node]:
+ if (successor in self.out_1) and (successor not in self.core_1):
+ num1 += 1
+ num2 = 0
+ for successor in self.G2[G2_node]:
+ if (successor in self.out_2) and (successor not in self.core_2):
+ num2 += 1
+ if self.test == 'graph':
+ if not (num1 == num2):
+ return False
+ else: # self.test == 'subgraph'
+ if not (num1 >= num2):
+ return False
+
+ ### Look ahead 2
+
+ # R_new
+
+ # The number of predecessors of n that are neither in the core_1 nor
+ # T_1^{in} nor T_1^{out} is equal to the number of predecessors of m
+ # that are neither in core_2 nor T_2^{in} nor T_2^{out}.
+ num1 = 0
+ for predecessor in self.G1.pred[G1_node]:
+ if (predecessor not in self.in_1) and (predecessor not in self.out_1):
+ num1 += 1
+ num2 = 0
+ for predecessor in self.G2.pred[G2_node]:
+ if (predecessor not in self.in_2) and (predecessor not in self.out_2):
+ num2 += 1
+ if self.test == 'graph':
+ if not (num1 == num2):
+ return False
+ else: # self.test == 'subgraph'
+ if not (num1 >= num2):
+ return False
+
+ # The number of successors of n that are neither in the core_1 nor
+ # T_1^{in} nor T_1^{out} is equal to the number of successors of m
+ # that are neither in core_2 nor T_2^{in} nor T_2^{out}.
+ num1 = 0
+ for successor in self.G1[G1_node]:
+ if (successor not in self.in_1) and (successor not in self.out_1):
+ num1 += 1
+ num2 = 0
+ for successor in self.G2[G2_node]:
+ if (successor not in self.in_2) and (successor not in self.out_2):
+ num2 += 1
+ if self.test == 'graph':
+ if not (num1 == num2):
+ return False
+ else: # self.test == 'subgraph'
+ if not (num1 >= num2):
+ return False
+
+ # Otherwise, this node pair is syntactically feasible!
+ return True
+
+
+class GMState(object):
+ """Internal representation of state for the GraphMatcher class.
+
+ This class is used internally by the GraphMatcher class. It is used
+ only to store state specific data. There will be at most G2.order() of
+ these objects in memory at a time, due to the depth-first search
+ strategy employed by the VF2 algorithm.
+ """
+ def __init__(self, GM, G1_node=None, G2_node=None):
+ """Initializes GMState object.
+
+ Pass in the GraphMatcher to which this GMState belongs and the
+ new node pair that will be added to the GraphMatcher's current
+ isomorphism mapping.
+ """
+ self.GM = GM
+
+ # Initialize the last stored node pair.
+ self.G1_node = None
+ self.G2_node = None
+ self.depth = len(GM.core_1)
+
+ if G1_node is None or G2_node is None:
+ # Then we reset the class variables
+ GM.core_1 = {}
+ GM.core_2 = {}
+ GM.inout_1 = {}
+ GM.inout_2 = {}
+
+ # Watch out! G1_node == 0 should evaluate to True.
+ if G1_node is not None and G2_node is not None:
+ # Add the node pair to the isomorphism mapping.
+ GM.core_1[G1_node] = G2_node
+ GM.core_2[G2_node] = G1_node
+
+ # Store the node that was added last.
+ self.G1_node = G1_node
+ self.G2_node = G2_node
+
+ # Now we must update the other two vectors.
+ # We will add only if it is not in there already!
+ self.depth = len(GM.core_1)
+
+ # First we add the new nodes...
+ if G1_node not in GM.inout_1:
+ GM.inout_1[G1_node] = self.depth
+ if G2_node not in GM.inout_2:
+ GM.inout_2[G2_node] = self.depth
+
+ # Now we add every other node...
+
+ # Updates for T_1^{inout}
+ new_nodes = set([])
+ for node in GM.core_1:
+ new_nodes.update([neighbor for neighbor in GM.G1[node] if neighbor not in GM.core_1])
+ for node in new_nodes:
+ if node not in GM.inout_1:
+ GM.inout_1[node] = self.depth
+
+ # Updates for T_2^{inout}
+ new_nodes = set([])
+ for node in GM.core_2:
+ new_nodes.update([neighbor for neighbor in GM.G2[node] if neighbor not in GM.core_2])
+ for node in new_nodes:
+ if node not in GM.inout_2:
+ GM.inout_2[node] = self.depth
+
+ def restore(self):
+ """Deletes the GMState object and restores the class variables."""
+ # First we remove the node that was added from the core vectors.
+ # Watch out! G1_node == 0 should evaluate to True.
+ if self.G1_node is not None and self.G2_node is not None:
+ del self.GM.core_1[self.G1_node]
+ del self.GM.core_2[self.G2_node]
+
+ # Now we revert the other two vectors.
+ # Thus, we delete all entries which have this depth level.
+ for vector in (self.GM.inout_1, self.GM.inout_2):
+ for node in list(vector.keys()):
+ if vector[node] == self.depth:
+ del vector[node]
+
+
+class DiGMState(object):
+ """Internal representation of state for the DiGraphMatcher class.
+
+ This class is used internally by the DiGraphMatcher class. It is used
+ only to store state specific data. There will be at most G2.order() of
+ these objects in memory at a time, due to the depth-first search
+ strategy employed by the VF2 algorithm.
+
+ """
+ def __init__(self, GM, G1_node=None, G2_node=None):
+ """Initializes DiGMState object.
+
+ Pass in the DiGraphMatcher to which this DiGMState belongs and the
+ new node pair that will be added to the GraphMatcher's current
+ isomorphism mapping.
+ """
+ self.GM = GM
+
+ # Initialize the last stored node pair.
+ self.G1_node = None
+ self.G2_node = None
+ self.depth = len(GM.core_1)
+
+ if G1_node is None or G2_node is None:
+ # Then we reset the class variables
+ GM.core_1 = {}
+ GM.core_2 = {}
+ GM.in_1 = {}
+ GM.in_2 = {}
+ GM.out_1 = {}
+ GM.out_2 = {}
+
+ # Watch out! G1_node == 0 should evaluate to True.
+ if G1_node is not None and G2_node is not None:
+ # Add the node pair to the isomorphism mapping.
+ GM.core_1[G1_node] = G2_node
+ GM.core_2[G2_node] = G1_node
+
+ # Store the node that was added last.
+ self.G1_node = G1_node
+ self.G2_node = G2_node
+
+ # Now we must update the other four vectors.
+ # We will add only if it is not in there already!
+ self.depth = len(GM.core_1)
+
+ # First we add the new nodes...
+ for vector in (GM.in_1, GM.out_1):
+ if G1_node not in vector:
+ vector[G1_node] = self.depth
+ for vector in (GM.in_2, GM.out_2):
+ if G2_node not in vector:
+ vector[G2_node] = self.depth
+
+ # Now we add every other node...
+
+ # Updates for T_1^{in}
+ new_nodes = set([])
+ for node in GM.core_1:
+ new_nodes.update([predecessor for predecessor in GM.G1.predecessors(node) if predecessor not in GM.core_1])
+ for node in new_nodes:
+ if node not in GM.in_1:
+ GM.in_1[node] = self.depth
+
+ # Updates for T_2^{in}
+ new_nodes = set([])
+ for node in GM.core_2:
+ new_nodes.update([predecessor for predecessor in GM.G2.predecessors(node) if predecessor not in GM.core_2])
+ for node in new_nodes:
+ if node not in GM.in_2:
+ GM.in_2[node] = self.depth
+
+ # Updates for T_1^{out}
+ new_nodes = set([])
+ for node in GM.core_1:
+ new_nodes.update([successor for successor in GM.G1.successors(node) if successor not in GM.core_1])
+ for node in new_nodes:
+ if node not in GM.out_1:
+ GM.out_1[node] = self.depth
+
+ # Updates for T_2^{out}
+ new_nodes = set([])
+ for node in GM.core_2:
+ new_nodes.update([successor for successor in GM.G2.successors(node) if successor not in GM.core_2])
+ for node in new_nodes:
+ if node not in GM.out_2:
+ GM.out_2[node] = self.depth
+
+ def restore(self):
+ """Deletes the DiGMState object and restores the class variables."""
+
+ # First we remove the node that was added from the core vectors.
+ # Watch out! G1_node == 0 should evaluate to True.
+ if self.G1_node is not None and self.G2_node is not None:
+ del self.GM.core_1[self.G1_node]
+ del self.GM.core_2[self.G2_node]
+
+ # Now we revert the other four vectors.
+ # Thus, we delete all entries which have this depth level.
+ for vector in (self.GM.in_1, self.GM.in_2, self.GM.out_1, self.GM.out_2):
+ for node in list(vector.keys()):
+ if vector[node] == self.depth:
+ del vector[node]
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/matchhelpers.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/matchhelpers.py
new file mode 100644
index 0000000..f9af38b
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/matchhelpers.py
@@ -0,0 +1,346 @@
+"""Functions which help end users define customize node_match and
+edge_match functions to use during isomorphism checks.
+"""
+from itertools import permutations
+import types
+import networkx as nx
+
+__all__ = ['categorical_node_match',
+ 'categorical_edge_match',
+ 'categorical_multiedge_match',
+ 'numerical_node_match',
+ 'numerical_edge_match',
+ 'numerical_multiedge_match',
+ 'generic_node_match',
+ 'generic_edge_match',
+ 'generic_multiedge_match',
+ ]
+
+
+def copyfunc(f, name=None):
+ """Returns a deepcopy of a function."""
+ try:
+ return types.FunctionType(f.func_code, f.func_globals, name or f.name,
+ f.func_defaults, f.func_closure)
+ except AttributeError:
+ return types.FunctionType(f.__code__, f.__globals__, name or f.name,
+ f.__defaults__, f.__closure__)
+
+def allclose(x, y, rtol=1.0000000000000001e-05, atol=1e-08):
+ """Returns True if x and y are sufficiently close, elementwise.
+
+ Parameters
+ ----------
+ rtol : float
+ The relative error tolerance.
+ atol : float
+ The absolute error tolerance.
+
+ """
+ # assume finite weights, see numpy.allclose() for reference
+ for xi, yi in zip(x,y):
+ if not ( abs(xi-yi) <= atol + rtol * abs(yi) ):
+ return False
+ return True
+
+
+def close(x, y, rtol=1.0000000000000001e-05, atol=1e-08):
+ """Returns True if x and y are sufficiently close.
+
+ Parameters
+ ----------
+ rtol : float
+ The relative error tolerance.
+ atol : float
+ The absolute error tolerance.
+
+ """
+ # assume finite weights, see numpy.allclose() for reference
+ return abs(x-y) <= atol + rtol * abs(y)
+
+
+categorical_doc = """
+Returns a comparison function for a categorical node attribute.
+
+The value(s) of the attr(s) must be hashable and comparable via the ==
+operator since they are placed into a set([]) object. If the sets from
+G1 and G2 are the same, then the constructed function returns True.
+
+Parameters
+----------
+attr : string | list
+ The categorical node attribute to compare, or a list of categorical
+ node attributes to compare.
+default : value | list
+ The default value for the categorical node attribute, or a list of
+ default values for the categorical node attributes.
+
+Returns
+-------
+match : function
+ The customized, categorical `node_match` function.
+
+Examples
+--------
+>>> import networkx.algorithms.isomorphism as iso
+>>> nm = iso.categorical_node_match('size', 1)
+>>> nm = iso.categorical_node_match(['color', 'size'], ['red', 2])
+
+"""
+
+def categorical_node_match(attr, default):
+ if nx.utils.is_string_like(attr):
+ def match(data1, data2):
+ return data1.get(attr, default) == data2.get(attr, default)
+ else:
+ attrs = list(zip(attr, default)) # Python 3
+ def match(data1, data2):
+ values1 = set([data1.get(attr, d) for attr, d in attrs])
+ values2 = set([data2.get(attr, d) for attr, d in attrs])
+ return values1 == values2
+ return match
+
+categorical_edge_match = copyfunc(categorical_node_match, 'categorical_edge_match')
+
+def categorical_multiedge_match(attr, default):
+ if nx.utils.is_string_like(attr):
+ def match(datasets1, datasets2):
+ values1 = set([data.get(attr, default) for data in datasets1.values()])
+ values2 = set([data.get(attr, default) for data in datasets2.values()])
+ return values1 == values2
+ else:
+ attrs = list(zip(attr, default)) # Python 3
+ def match(datasets1, datasets2):
+ values1 = set([])
+ for data1 in datasets1.values():
+ x = tuple( data1.get(attr, d) for attr, d in attrs )
+ values1.add(x)
+ values2 = set([])
+ for data2 in datasets2.values():
+ x = tuple( data2.get(attr, d) for attr, d in attrs )
+ values2.add(x)
+ return values1 == values2
+ return match
+
+# Docstrings for categorical functions.
+categorical_node_match.__doc__ = categorical_doc
+categorical_edge_match.__doc__ = categorical_doc.replace('node', 'edge')
+tmpdoc = categorical_doc.replace('node', 'edge')
+tmpdoc = tmpdoc.replace('categorical_edge_match', 'categorical_multiedge_match')
+categorical_multiedge_match.__doc__ = tmpdoc
+
+
+numerical_doc = """
+Returns a comparison function for a numerical node attribute.
+
+The value(s) of the attr(s) must be numerical and sortable. If the
+sorted list of values from G1 and G2 are the same within some
+tolerance, then the constructed function returns True.
+
+Parameters
+----------
+attr : string | list
+ The numerical node attribute to compare, or a list of numerical
+ node attributes to compare.
+default : value | list
+ The default value for the numerical node attribute, or a list of
+ default values for the numerical node attributes.
+rtol : float
+ The relative error tolerance.
+atol : float
+ The absolute error tolerance.
+
+Returns
+-------
+match : function
+ The customized, numerical `node_match` function.
+
+Examples
+--------
+>>> import networkx.algorithms.isomorphism as iso
+>>> nm = iso.numerical_node_match('weight', 1.0)
+>>> nm = iso.numerical_node_match(['weight', 'linewidth'], [.25, .5])
+
+"""
+
+def numerical_node_match(attr, default, rtol=1.0000000000000001e-05, atol=1e-08):
+ if nx.utils.is_string_like(attr):
+ def match(data1, data2):
+ return close(data1.get(attr, default),
+ data2.get(attr, default),
+ rtol=rtol, atol=atol)
+ else:
+ attrs = list(zip(attr, default)) # Python 3
+ def match(data1, data2):
+ values1 = [data1.get(attr, d) for attr, d in attrs]
+ values2 = [data2.get(attr, d) for attr, d in attrs]
+ return allclose(values1, values2, rtol=rtol, atol=atol)
+ return match
+
+numerical_edge_match = copyfunc(numerical_node_match, 'numerical_edge_match')
+
+def numerical_multiedge_match(attr, default, rtol=1.0000000000000001e-05, atol=1e-08):
+ if nx.utils.is_string_like(attr):
+ def match(datasets1, datasets2):
+ values1 = sorted([data.get(attr, default) for data in datasets1.values()])
+ values2 = sorted([data.get(attr, default) for data in datasets2.values()])
+ return allclose(values1, values2, rtol=rtol, atol=atol)
+ else:
+ attrs = list(zip(attr, default)) # Python 3
+ def match(datasets1, datasets2):
+ values1 = []
+ for data1 in datasets1.values():
+ x = tuple( data1.get(attr, d) for attr, d in attrs )
+ values1.append(x)
+ values2 = []
+ for data2 in datasets2.values():
+ x = tuple( data2.get(attr, d) for attr, d in attrs )
+ values2.append(x)
+ values1.sort()
+ values2.sort()
+ for xi, yi in zip(values1, values2):
+ if not allclose(xi, yi, rtol=rtol, atol=atol):
+ return False
+ else:
+ return True
+ return match
+
+# Docstrings for numerical functions.
+numerical_node_match.__doc__ = numerical_doc
+numerical_edge_match.__doc__ = numerical_doc.replace('node', 'edge')
+tmpdoc = numerical_doc.replace('node', 'edge')
+tmpdoc = tmpdoc.replace('numerical_edge_match', 'numerical_multiedge_match')
+numerical_multiedge_match.__doc__ = tmpdoc
+
+
+generic_doc = """
+Returns a comparison function for a generic attribute.
+
+The value(s) of the attr(s) are compared using the specified
+operators. If all the attributes are equal, then the constructed
+function returns True.
+
+Parameters
+----------
+attr : string | list
+ The node attribute to compare, or a list of node attributes
+ to compare.
+default : value | list
+ The default value for the node attribute, or a list of
+ default values for the node attributes.
+op : callable | list
+ The operator to use when comparing attribute values, or a list
+ of operators to use when comparing values for each attribute.
+
+Returns
+-------
+match : function
+ The customized, generic `node_match` function.
+
+Examples
+--------
+>>> from operator import eq
+>>> from networkx.algorithms.isomorphism.matchhelpers import close
+>>> from networkx.algorithms.isomorphism import generic_node_match
+>>> nm = generic_node_match('weight', 1.0, close)
+>>> nm = generic_node_match('color', 'red', eq)
+>>> nm = generic_node_match(['weight', 'color'], [1.0, 'red'], [close, eq])
+
+"""
+
+def generic_node_match(attr, default, op):
+ if nx.utils.is_string_like(attr):
+ def match(data1, data2):
+ return op(data1.get(attr, default), data2.get(attr, default))
+ else:
+ attrs = list(zip(attr, default, op)) # Python 3
+ def match(data1, data2):
+ for attr, d, operator in attrs:
+ if not operator(data1.get(attr, d), data2.get(attr, d)):
+ return False
+ else:
+ return True
+ return match
+
+generic_edge_match = copyfunc(generic_node_match, 'generic_edge_match')
+
+def generic_multiedge_match(attr, default, op):
+ """Returns a comparison function for a generic attribute.
+
+ The value(s) of the attr(s) are compared using the specified
+ operators. If all the attributes are equal, then the constructed
+ function returns True. Potentially, the constructed edge_match
+ function can be slow since it must verify that no isomorphism
+ exists between the multiedges before it returns False.
+
+ Parameters
+ ----------
+ attr : string | list
+ The edge attribute to compare, or a list of node attributes
+ to compare.
+ default : value | list
+ The default value for the edge attribute, or a list of
+ default values for the dgeattributes.
+ op : callable | list
+ The operator to use when comparing attribute values, or a list
+ of operators to use when comparing values for each attribute.
+
+ Returns
+ -------
+ match : function
+ The customized, generic `edge_match` function.
+
+ Examples
+ --------
+ >>> from operator import eq
+ >>> from networkx.algorithms.isomorphism.matchhelpers import close
+ >>> from networkx.algorithms.isomorphism import generic_node_match
+ >>> nm = generic_node_match('weight', 1.0, close)
+ >>> nm = generic_node_match('color', 'red', eq)
+ >>> nm = generic_node_match(['weight', 'color'],
+ ... [1.0, 'red'],
+ ... [close, eq])
+ ...
+
+ """
+
+ # This is slow, but generic.
+ # We must test every possible isomorphism between the edges.
+ if nx.utils.is_string_like(attr):
+ def match(datasets1, datasets2):
+ values1 = [data.get(attr, default) for data in datasets1.values()]
+ values2 = [data.get(attr, default) for data in datasets2.values()]
+ for vals2 in permutations(values2):
+ for xi, yi in zip(values1, vals2):
+ if not op(xi, yi):
+ # This is not an isomorphism, go to next permutation.
+ break
+ else:
+ # Then we found an isomorphism.
+ return True
+ else:
+ # Then there are no isomorphisms between the multiedges.
+ return False
+ else:
+ attrs = list(zip(attr, default)) # Python 3
+ def match(datasets1, datasets2):
+ values1 = []
+ for data1 in datasets1.values():
+ x = tuple( data1.get(attr, d) for attr, d in attrs )
+ values1.append(x)
+ values2 = []
+ for data2 in datasets2.values():
+ x = tuple( data2.get(attr, d) for attr, d in attrs )
+ values2.append(x)
+ for vals2 in permutations(values2):
+ for xi, yi, operator in zip(values1, vals2, op):
+ if not operator(xi, yi):
+ return False
+ else:
+ return True
+ return match
+
+# Docstrings for numerical functions.
+generic_node_match.__doc__ = generic_doc
+generic_edge_match.__doc__ = generic_doc.replace('node', 'edge')
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/iso_r01_s80.A99 b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/iso_r01_s80.A99
new file mode 100644
index 0000000..dac54f0
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/iso_r01_s80.A99
Binary files differ
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/iso_r01_s80.B99 b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/iso_r01_s80.B99
new file mode 100644
index 0000000..6c6af68
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/iso_r01_s80.B99
Binary files differ
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/si2_b06_m200.A99 b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/si2_b06_m200.A99
new file mode 100644
index 0000000..60c3a3c
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/si2_b06_m200.A99
Binary files differ
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/si2_b06_m200.B99 b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/si2_b06_m200.B99
new file mode 100644
index 0000000..0236872
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/si2_b06_m200.B99
Binary files differ
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/test_isomorphism.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/test_isomorphism.py
new file mode 100644
index 0000000..b9c7e63
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/test_isomorphism.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+from networkx.algorithms import isomorphism as iso
+
+class TestIsomorph:
+
+ def setUp(self):
+ self.G1=nx.Graph()
+ self.G2=nx.Graph()
+ self.G3=nx.Graph()
+ self.G4=nx.Graph()
+ self.G1.add_edges_from([ [1,2],[1,3],[1,5],[2,3] ])
+ self.G2.add_edges_from([ [10,20],[20,30],[10,30],[10,50] ])
+ self.G3.add_edges_from([ [1,2],[1,3],[1,5],[2,5] ])
+ self.G4.add_edges_from([ [1,2],[1,3],[1,5],[2,4] ])
+
+ def test_could_be_isomorphic(self):
+ assert_true(iso.could_be_isomorphic(self.G1,self.G2))
+ assert_true(iso.could_be_isomorphic(self.G1,self.G3))
+ assert_false(iso.could_be_isomorphic(self.G1,self.G4))
+ assert_true(iso.could_be_isomorphic(self.G3,self.G2))
+
+ def test_fast_could_be_isomorphic(self):
+ assert_true(iso.fast_could_be_isomorphic(self.G3,self.G2))
+
+ def test_faster_could_be_isomorphic(self):
+ assert_true(iso.faster_could_be_isomorphic(self.G3,self.G2))
+
+ def test_is_isomorphic(self):
+ assert_true(iso.is_isomorphic(self.G1,self.G2))
+ assert_false(iso.is_isomorphic(self.G1,self.G4))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/test_isomorphvf2.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/test_isomorphvf2.py
new file mode 100644
index 0000000..562d396
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/test_isomorphvf2.py
@@ -0,0 +1,217 @@
+"""
+ Tests for VF2 isomorphism algorithm.
+"""
+
+import os
+import struct
+import random
+
+from nose.tools import assert_true, assert_equal
+import networkx as nx
+from networkx.algorithms import isomorphism as iso
+
+class TestWikipediaExample(object):
+ # Source: http://en.wikipedia.org/wiki/Graph_isomorphism
+
+ # Nodes 'a', 'b', 'c' and 'd' form a column.
+ # Nodes 'g', 'h', 'i' and 'j' form a column.
+ g1edges = [['a','g'], ['a','h'], ['a','i'],
+ ['b','g'], ['b','h'], ['b','j'],
+ ['c','g'], ['c','i'], ['c','j'],
+ ['d','h'], ['d','i'], ['d','j']]
+
+ # Nodes 1,2,3,4 form the clockwise corners of a large square.
+ # Nodes 5,6,7,8 form the clockwise corners of a small square
+ g2edges = [[1,2], [2,3], [3,4], [4,1],
+ [5,6], [6,7], [7,8], [8,5],
+ [1,5], [2,6], [3,7], [4,8]]
+
+ def test_graph(self):
+ g1 = nx.Graph()
+ g2 = nx.Graph()
+ g1.add_edges_from(self.g1edges)
+ g2.add_edges_from(self.g2edges)
+ gm = iso.GraphMatcher(g1,g2)
+ assert_true(gm.is_isomorphic())
+
+ mapping = sorted(gm.mapping.items())
+# this mapping is only one of the possibilies
+# so this test needs to be reconsidered
+# isomap = [('a', 1), ('b', 6), ('c', 3), ('d', 8),
+# ('g', 2), ('h', 5), ('i', 4), ('j', 7)]
+# assert_equal(mapping, isomap)
+
+ def test_subgraph(self):
+ g1 = nx.Graph()
+ g2 = nx.Graph()
+ g1.add_edges_from(self.g1edges)
+ g2.add_edges_from(self.g2edges)
+ g3 = g2.subgraph([1,2,3,4])
+ gm = iso.GraphMatcher(g1,g3)
+ assert_true(gm.subgraph_is_isomorphic())
+
+class TestVF2GraphDB(object):
+ # http://amalfi.dis.unina.it/graph/db/
+
+ @staticmethod
+ def create_graph(filename):
+ """Creates a Graph instance from the filename."""
+
+ # The file is assumed to be in the format from the VF2 graph database.
+ # Each file is composed of 16-bit numbers (unsigned short int).
+ # So we will want to read 2 bytes at a time.
+
+ # We can read the number as follows:
+ # number = struct.unpack('<H', file.read(2))
+ # This says, expect the data in little-endian encoding
+ # as an unsigned short int and unpack 2 bytes from the file.
+
+ fh = open(filename, mode='rb')
+
+ # Grab the number of nodes.
+ # Node numeration is 0-based, so the first node has index 0.
+ nodes = struct.unpack('<H', fh.read(2))[0]
+
+ graph = nx.Graph()
+ for from_node in range(nodes):
+ # Get the number of edges.
+ edges = struct.unpack('<H', fh.read(2))[0]
+ for edge in range(edges):
+ # Get the terminal node.
+ to_node = struct.unpack('<H', fh.read(2))[0]
+ graph.add_edge(from_node, to_node)
+
+ fh.close()
+ return graph
+
+ def test_graph(self):
+ head,tail = os.path.split(__file__)
+ g1 = self.create_graph(os.path.join(head,'iso_r01_s80.A99'))
+ g2 = self.create_graph(os.path.join(head,'iso_r01_s80.B99'))
+ gm = iso.GraphMatcher(g1,g2)
+ assert_true(gm.is_isomorphic())
+
+ def test_subgraph(self):
+ # A is the subgraph
+ # B is the full graph
+ head,tail = os.path.split(__file__)
+ subgraph = self.create_graph(os.path.join(head,'si2_b06_m200.A99'))
+ graph = self.create_graph(os.path.join(head,'si2_b06_m200.B99'))
+ gm = iso.GraphMatcher(graph, subgraph)
+ assert_true(gm.subgraph_is_isomorphic())
+
+def test_graph_atlas():
+ #Atlas = nx.graph_atlas_g()[0:208] # 208, 6 nodes or less
+ Atlas = nx.graph_atlas_g()[0:100]
+ alphabet = list(range(26))
+ for graph in Atlas:
+ nlist = graph.nodes()
+ labels = alphabet[:len(nlist)]
+ for s in range(10):
+ random.shuffle(labels)
+ d = dict(zip(nlist,labels))
+ relabel = nx.relabel_nodes(graph, d)
+ gm = iso.GraphMatcher(graph, relabel)
+ assert_true(gm.is_isomorphic())
+
+def test_multiedge():
+ # Simple test for multigraphs
+ # Need something much more rigorous
+ edges = [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5),
+ (5, 6), (6, 7), (7, 8), (8, 9), (9, 10),
+ (10, 11), (10, 11), (11, 12), (11, 12),
+ (12, 13), (12, 13), (13, 14), (13, 14),
+ (14, 15), (14, 15), (15, 16), (15, 16),
+ (16, 17), (16, 17), (17, 18), (17, 18),
+ (18, 19), (18, 19), (19, 0), (19, 0)]
+ nodes = list(range(20))
+
+ for g1 in [nx.MultiGraph(), nx.MultiDiGraph()]:
+ g1.add_edges_from(edges)
+ for _ in range(10):
+ new_nodes = list(nodes)
+ random.shuffle(new_nodes)
+ d = dict(zip(nodes, new_nodes))
+ g2 = nx.relabel_nodes(g1, d)
+ if not g1.is_directed():
+ gm = iso.GraphMatcher(g1,g2)
+ else:
+ gm = iso.DiGraphMatcher(g1,g2)
+ assert_true(gm.is_isomorphic())
+
+def test_selfloop():
+ # Simple test for graphs with selfloops
+ edges = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 2),
+ (2, 4), (3, 1), (3, 2), (4, 2), (4, 5), (5, 4)]
+ nodes = list(range(6))
+
+ for g1 in [nx.Graph(), nx.DiGraph()]:
+ g1.add_edges_from(edges)
+ for _ in range(100):
+ new_nodes = list(nodes)
+ random.shuffle(new_nodes)
+ d = dict(zip(nodes, new_nodes))
+ g2 = nx.relabel_nodes(g1, d)
+ if not g1.is_directed():
+ gm = iso.GraphMatcher(g1,g2)
+ else:
+ gm = iso.DiGraphMatcher(g1,g2)
+ assert_true(gm.is_isomorphic())
+
+def test_isomorphism_iter1():
+ # As described in:
+ # http://groups.google.com/group/networkx-discuss/browse_thread/thread/2ff65c67f5e3b99f/d674544ebea359bb?fwc=1
+ g1 = nx.DiGraph()
+ g2 = nx.DiGraph()
+ g3 = nx.DiGraph()
+ g1.add_edge('A','B')
+ g1.add_edge('B','C')
+ g2.add_edge('Y','Z')
+ g3.add_edge('Z','Y')
+ gm12 = iso.DiGraphMatcher(g1,g2)
+ gm13 = iso.DiGraphMatcher(g1,g3)
+ x = list(gm12.subgraph_isomorphisms_iter())
+ y = list(gm13.subgraph_isomorphisms_iter())
+ assert_true({'A':'Y','B':'Z'} in x)
+ assert_true({'B':'Y','C':'Z'} in x)
+ assert_true({'A':'Z','B':'Y'} in y)
+ assert_true({'B':'Z','C':'Y'} in y)
+ assert_equal(len(x),len(y))
+ assert_equal(len(x),2)
+
+def test_isomorphism_iter2():
+ # Path
+ for L in range(2,10):
+ g1 = nx.path_graph(L)
+ gm = iso.GraphMatcher(g1,g1)
+ s = len(list(gm.isomorphisms_iter()))
+ assert_equal(s,2)
+ # Cycle
+ for L in range(3,10):
+ g1 = nx.cycle_graph(L)
+ gm = iso.GraphMatcher(g1,g1)
+ s = len(list(gm.isomorphisms_iter()))
+ assert_equal(s, 2*L)
+
+def test_multiple():
+ # Verify that we can use the graph matcher multiple times
+ edges = [('A','B'),('B','A'),('B','C')]
+ for g1,g2 in [(nx.Graph(),nx.Graph()), (nx.DiGraph(),nx.DiGraph())]:
+ g1.add_edges_from(edges)
+ g2.add_edges_from(edges)
+ g3 = nx.subgraph(g2, ['A','B'])
+ if not g1.is_directed():
+ gmA = iso.GraphMatcher(g1,g2)
+ gmB = iso.GraphMatcher(g1,g3)
+ else:
+ gmA = iso.DiGraphMatcher(g1,g2)
+ gmB = iso.DiGraphMatcher(g1,g3)
+ assert_true(gmA.is_isomorphic())
+ g2.remove_node('C')
+ assert_true(gmA.subgraph_is_isomorphic())
+ assert_true(gmB.subgraph_is_isomorphic())
+# for m in [gmB.mapping, gmB.mapping]:
+# assert_true(m['A'] == 'A')
+# assert_true(m['B'] == 'B')
+# assert_true('C' not in m)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/test_vf2userfunc.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/test_vf2userfunc.py
new file mode 100644
index 0000000..ca4f6bd
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/tests/test_vf2userfunc.py
@@ -0,0 +1,192 @@
+"""
+ Tests for VF2 isomorphism algorithm for weighted graphs.
+"""
+
+from nose.tools import assert_true, assert_false
+from operator import eq
+
+import networkx as nx
+import networkx.algorithms.isomorphism as iso
+def test_simple():
+ # 16 simple tests
+ w = 'weight'
+ edges = [(0,0,1),(0,0,1.5),(0,1,2),(1,0,3)]
+ for g1 in [nx.Graph(),
+ nx.DiGraph(),
+ nx.MultiGraph(),
+ nx.MultiDiGraph(),
+ ]:
+
+ g1.add_weighted_edges_from(edges)
+ g2 = g1.subgraph(g1.nodes())
+ if g1.is_multigraph():
+ em = iso.numerical_multiedge_match('weight', 1)
+ else:
+ em = iso.numerical_edge_match('weight', 1)
+ assert_true( nx.is_isomorphic(g1,g2,edge_match=em) )
+
+ for mod1, mod2 in [(False, True), (True, False), (True, True)]:
+ # mod1 tests a regular edge
+ # mod2 tests a selfloop
+ if g2.is_multigraph():
+ if mod1:
+ data1 = {0:{'weight':10}}
+ if mod2:
+ data2 = {0:{'weight':1},1:{'weight':2.5}}
+ else:
+ if mod1:
+ data1 = {'weight':10}
+ if mod2:
+ data2 = {'weight':2.5}
+
+ g2 = g1.subgraph(g1.nodes())
+ if mod1:
+ if not g1.is_directed():
+ g2.adj[1][0] = data1
+ g2.adj[0][1] = data1
+ else:
+ g2.succ[1][0] = data1
+ g2.pred[0][1] = data1
+ if mod2:
+ if not g1.is_directed():
+ g2.adj[0][0] = data2
+ else:
+ g2.succ[0][0] = data2
+ g2.pred[0][0] = data2
+
+ assert_false(nx.is_isomorphic(g1,g2,edge_match=em))
+
+def test_weightkey():
+ g1 = nx.DiGraph()
+ g2 = nx.DiGraph()
+
+ g1.add_edge('A','B', weight=1)
+ g2.add_edge('C','D', weight=0)
+
+ assert_true( nx.is_isomorphic(g1, g2) )
+ em = iso.numerical_edge_match('nonexistent attribute', 1)
+ assert_true( nx.is_isomorphic(g1, g2, edge_match=em) )
+ em = iso.numerical_edge_match('weight', 1)
+ assert_false( nx.is_isomorphic(g1, g2, edge_match=em) )
+
+ g2 = nx.DiGraph()
+ g2.add_edge('C','D')
+ assert_true( nx.is_isomorphic(g1, g2, edge_match=em) )
+
+class TestNodeMatch_Graph(object):
+ def setUp(self):
+ self.g1 = nx.Graph()
+ self.g2 = nx.Graph()
+ self.build()
+
+ def build(self):
+
+ self.nm = iso.categorical_node_match('color', '')
+ self.em = iso.numerical_edge_match('weight', 1)
+
+ self.g1.add_node('A', color='red')
+ self.g2.add_node('C', color='blue')
+
+ self.g1.add_edge('A','B', weight=1)
+ self.g2.add_edge('C','D', weight=1)
+
+ def test_noweight_nocolor(self):
+ assert_true( nx.is_isomorphic(self.g1, self.g2) )
+
+ def test_color1(self):
+ assert_false( nx.is_isomorphic(self.g1, self.g2, node_match=self.nm) )
+
+ def test_color2(self):
+ self.g1.node['A']['color'] = 'blue'
+ assert_true( nx.is_isomorphic(self.g1, self.g2, node_match=self.nm) )
+
+ def test_weight1(self):
+ assert_true( nx.is_isomorphic(self.g1, self.g2, edge_match=self.em) )
+
+ def test_weight2(self):
+ self.g1.add_edge('A', 'B', weight=2)
+ assert_false( nx.is_isomorphic(self.g1, self.g2, edge_match=self.em) )
+
+ def test_colorsandweights1(self):
+ iso = nx.is_isomorphic(self.g1, self.g2,
+ node_match=self.nm, edge_match=self.em)
+ assert_false(iso)
+
+ def test_colorsandweights2(self):
+ self.g1.node['A']['color'] = 'blue'
+ iso = nx.is_isomorphic(self.g1, self.g2,
+ node_match=self.nm, edge_match=self.em)
+ assert_true(iso)
+
+ def test_colorsandweights3(self):
+ # make the weights disagree
+ self.g1.add_edge('A', 'B', weight=2)
+ assert_false( nx.is_isomorphic(self.g1, self.g2,
+ node_match=self.nm, edge_match=self.em) )
+
+class TestEdgeMatch_MultiGraph(object):
+ def setUp(self):
+ self.g1 = nx.MultiGraph()
+ self.g2 = nx.MultiGraph()
+ self.GM = iso.MultiGraphMatcher
+ self.build()
+
+ def build(self):
+ g1 = self.g1
+ g2 = self.g2
+
+ # We will assume integer weights only.
+ g1.add_edge('A', 'B', color='green', weight=0, size=.5)
+ g1.add_edge('A', 'B', color='red', weight=1, size=.35)
+ g1.add_edge('A', 'B', color='red', weight=2, size=.65)
+
+ g2.add_edge('C', 'D', color='green', weight=1, size=.5)
+ g2.add_edge('C', 'D', color='red', weight=0, size=.45)
+ g2.add_edge('C', 'D', color='red', weight=2, size=.65)
+
+ if g1.is_multigraph():
+ self.em = iso.numerical_multiedge_match('weight', 1)
+ self.emc = iso.categorical_multiedge_match('color', '')
+ self.emcm = iso.categorical_multiedge_match(['color', 'weight'], ['', 1])
+ self.emg1 = iso.generic_multiedge_match('color', 'red', eq)
+ self.emg2 = iso.generic_multiedge_match(['color', 'weight', 'size'], ['red', 1, .5], [eq, eq, iso.matchhelpers.close])
+ else:
+ self.em = iso.numerical_edge_match('weight', 1)
+ self.emc = iso.categorical_edge_match('color', '')
+ self.emcm = iso.categorical_edge_match(['color', 'weight'], ['', 1])
+ self.emg1 = iso.generic_multiedge_match('color', 'red', eq)
+ self.emg2 = iso.generic_edge_match(['color', 'weight', 'size'], ['red', 1, .5], [eq, eq, iso.matchhelpers.close])
+
+ def test_weights_only(self):
+ assert_true( nx.is_isomorphic(self.g1, self.g2, edge_match=self.em) )
+
+ def test_colors_only(self):
+ gm = self.GM(self.g1, self.g2, edge_match=self.emc)
+ assert_true( gm.is_isomorphic() )
+
+ def test_colorsandweights(self):
+ gm = self.GM(self.g1, self.g2, edge_match=self.emcm)
+ assert_false( gm.is_isomorphic() )
+
+ def test_generic1(self):
+ gm = self.GM(self.g1, self.g2, edge_match=self.emg1)
+ assert_true( gm.is_isomorphic() )
+
+ def test_generic2(self):
+ gm = self.GM(self.g1, self.g2, edge_match=self.emg2)
+ assert_false( gm.is_isomorphic() )
+
+
+class TestEdgeMatch_DiGraph(TestNodeMatch_Graph):
+ def setUp(self):
+ self.g1 = nx.DiGraph()
+ self.g2 = nx.DiGraph()
+ self.build()
+
+class TestEdgeMatch_MultiDiGraph(TestEdgeMatch_MultiGraph):
+ def setUp(self):
+ self.g1 = nx.MultiDiGraph()
+ self.g2 = nx.MultiDiGraph()
+ self.GM = iso.MultiDiGraphMatcher
+ self.build()
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/vf2userfunc.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/vf2userfunc.py
new file mode 100644
index 0000000..64685c7
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/isomorphism/vf2userfunc.py
@@ -0,0 +1,198 @@
+"""
+ Module to simplify the specification of user-defined equality functions for
+ node and edge attributes during isomorphism checks.
+
+ During the construction of an isomorphism, the algorithm considers two
+ candidate nodes n1 in G1 and n2 in G2. The graphs G1 and G2 are then
+ compared with respect to properties involving n1 and n2, and if the outcome
+ is good, then the candidate nodes are considered isomorphic. NetworkX
+ provides a simple mechanism for users to extend the comparisons to include
+ node and edge attributes.
+
+ Node attributes are handled by the node_match keyword. When considering
+ n1 and n2, the algorithm passes their node attribute dictionaries to
+ node_match, and if it returns False, then n1 and n2 cannot be
+ considered to be isomorphic.
+
+ Edge attributes are handled by the edge_match keyword. When considering
+ n1 and n2, the algorithm must verify that outgoing edges from n1 are
+ commensurate with the outgoing edges for n2. If the graph is directed,
+ then a similar check is also performed for incoming edges.
+
+ Focusing only on outgoing edges, we consider pairs of nodes (n1, v1) from
+ G1 and (n2, v2) from G2. For graphs and digraphs, there is only one edge
+ between (n1, v1) and only one edge between (n2, v2). Those edge attribute
+ dictionaries are passed to edge_match, and if it returns False, then
+ n1 and n2 cannot be considered isomorphic. For multigraphs and
+ multidigraphs, there can be multiple edges between (n1, v1) and also
+ multiple edges between (n2, v2). Now, there must exist an isomorphism
+ from "all the edges between (n1, v1)" to "all the edges between (n2, v2)".
+ So, all of the edge attribute dictionaries are passed to edge_match, and
+ it must determine if there is an isomorphism between the two sets of edges.
+"""
+
+import networkx as nx
+
+from . import isomorphvf2 as vf2
+
+__all__ = ['GraphMatcher',
+ 'DiGraphMatcher',
+ 'MultiGraphMatcher',
+ 'MultiDiGraphMatcher',
+ ]
+
+
+def _semantic_feasibility(self, G1_node, G2_node):
+ """Returns True if mapping G1_node to G2_node is semantically feasible.
+ """
+ # Make sure the nodes match
+ if self.node_match is not None:
+ nm = self.node_match(self.G1.node[G1_node], self.G2.node[G2_node])
+ if not nm:
+ return False
+
+ # Make sure the edges match
+ if self.edge_match is not None:
+
+ # Cached lookups
+ G1_adj = self.G1_adj
+ G2_adj = self.G2_adj
+ core_1 = self.core_1
+ edge_match = self.edge_match
+
+ for neighbor in G1_adj[G1_node]:
+ # G1_node is not in core_1, so we must handle R_self separately
+ if neighbor == G1_node:
+ if not edge_match(G1_adj[G1_node][G1_node],
+ G2_adj[G2_node][G2_node]):
+ return False
+ elif neighbor in core_1:
+ if not edge_match(G1_adj[G1_node][neighbor],
+ G2_adj[G2_node][core_1[neighbor]]):
+ return False
+ # syntactic check has already verified that neighbors are symmetric
+
+ return True
+
+
+class GraphMatcher(vf2.GraphMatcher):
+ """VF2 isomorphism checker for undirected graphs.
+ """
+ def __init__(self, G1, G2, node_match=None, edge_match=None):
+ """Initialize graph matcher.
+
+ Parameters
+ ----------
+ G1, G2: graph
+ The graphs to be tested.
+
+ node_match: callable
+ A function that returns True iff node n1 in G1 and n2 in G2
+ should be considered equal during the isomorphism test. The
+ function will be called like::
+
+ node_match(G1.node[n1], G2.node[n2])
+
+ That is, the function will receive the node attribute dictionaries
+ of the nodes under consideration. If None, then no attributes are
+ considered when testing for an isomorphism.
+
+ edge_match: callable
+ A function that returns True iff the edge attribute dictionary for
+ the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should be
+ considered equal during the isomorphism test. The function will be
+ called like::
+
+ edge_match(G1[u1][v1], G2[u2][v2])
+
+ That is, the function will receive the edge attribute dictionaries
+ of the edges under consideration. If None, then no attributes are
+ considered when testing for an isomorphism.
+
+ """
+ vf2.GraphMatcher.__init__(self, G1, G2)
+
+ self.node_match = node_match
+ self.edge_match = edge_match
+
+ # These will be modified during checks to minimize code repeat.
+ self.G1_adj = self.G1.adj
+ self.G2_adj = self.G2.adj
+
+ semantic_feasibility = _semantic_feasibility
+
+
+class DiGraphMatcher(vf2.DiGraphMatcher):
+ """VF2 isomorphism checker for directed graphs.
+ """
+ def __init__(self, G1, G2, node_match=None, edge_match=None):
+ """Initialize graph matcher.
+
+ Parameters
+ ----------
+ G1, G2 : graph
+ The graphs to be tested.
+
+ node_match : callable
+ A function that returns True iff node n1 in G1 and n2 in G2
+ should be considered equal during the isomorphism test. The
+ function will be called like::
+
+ node_match(G1.node[n1], G2.node[n2])
+
+ That is, the function will receive the node attribute dictionaries
+ of the nodes under consideration. If None, then no attributes are
+ considered when testing for an isomorphism.
+
+ edge_match : callable
+ A function that returns True iff the edge attribute dictionary for
+ the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should be
+ considered equal during the isomorphism test. The function will be
+ called like::
+
+ edge_match(G1[u1][v1], G2[u2][v2])
+
+ That is, the function will receive the edge attribute dictionaries
+ of the edges under consideration. If None, then no attributes are
+ considered when testing for an isomorphism.
+
+ """
+ vf2.DiGraphMatcher.__init__(self, G1, G2)
+
+ self.node_match = node_match
+ self.edge_match = edge_match
+
+ # These will be modified during checks to minimize code repeat.
+ self.G1_adj = self.G1.adj
+ self.G2_adj = self.G2.adj
+
+
+ def semantic_feasibility(self, G1_node, G2_node):
+ """Returns True if mapping G1_node to G2_node is semantically feasible."""
+
+ # Test node_match and also test edge_match on successors
+ feasible = _semantic_feasibility(self, G1_node, G2_node)
+ if not feasible:
+ return False
+
+ # Test edge_match on predecessors
+ self.G1_adj = self.G1.pred
+ self.G2_adj = self.G2.pred
+ feasible = _semantic_feasibility(self, G1_node, G2_node)
+ self.G1_adj = self.G1.adj
+ self.G2_adj = self.G2.adj
+
+ return feasible
+
+## The "semantics" of edge_match are different for multi(di)graphs, but
+## the implementation is the same. So, technically we do not need to
+## provide "multi" versions, but we do so to match NetworkX's base classes.
+
+class MultiGraphMatcher(GraphMatcher):
+ """VF2 isomorphism checker for undirected multigraphs. """
+ pass
+
+class MultiDiGraphMatcher(DiGraphMatcher):
+ """VF2 isomorphism checker for directed multigraphs. """
+ pass
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/link_analysis/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/link_analysis/__init__.py
new file mode 100644
index 0000000..d7e67c3
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/link_analysis/__init__.py
@@ -0,0 +1,2 @@
+from networkx.algorithms.link_analysis.pagerank_alg import *
+from networkx.algorithms.link_analysis.hits_alg import *
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/link_analysis/hits_alg.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/link_analysis/hits_alg.py
new file mode 100644
index 0000000..5b707d7
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/link_analysis/hits_alg.py
@@ -0,0 +1,308 @@
+"""Hubs and authorities analysis of graph structure.
+"""
+# Copyright (C) 2008-2012 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+# NetworkX:http://networkx.lanl.gov/
+import networkx as nx
+from networkx.exception import NetworkXError
+__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
+__all__ = ['hits','hits_numpy','hits_scipy','authority_matrix','hub_matrix']
+
+def hits(G,max_iter=100,tol=1.0e-8,nstart=None,normalized=True):
+ """Return HITS hubs and authorities values for nodes.
+
+ The HITS algorithm computes two numbers for a node.
+ Authorities estimates the node value based on the incoming links.
+ Hubs estimates the node value based on outgoing links.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ max_iter : interger, optional
+ Maximum number of iterations in power method.
+
+ tol : float, optional
+ Error tolerance used to check convergence in power method iteration.
+
+ nstart : dictionary, optional
+ Starting value of each node for power method iteration.
+
+ normalized : bool (default=True)
+ Normalize results by the sum of all of the values.
+
+ Returns
+ -------
+ (hubs,authorities) : two-tuple of dictionaries
+ Two dictionaries keyed by node containing the hub and authority
+ values.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> h,a=nx.hits(G)
+
+ Notes
+ -----
+ The eigenvector calculation is done by the power iteration method
+ and has no guarantee of convergence. The iteration will stop
+ after max_iter iterations or an error tolerance of
+ number_of_nodes(G)*tol has been reached.
+
+ The HITS algorithm was designed for directed graphs but this
+ algorithm does not check if the input graph is directed and will
+ execute on undirected graphs.
+
+ References
+ ----------
+ .. [1] A. Langville and C. Meyer,
+ "A survey of eigenvector methods of web information retrieval."
+ http://citeseer.ist.psu.edu/713792.html
+ .. [2] Jon Kleinberg,
+ Authoritative sources in a hyperlinked environment
+ Journal of the ACM 46 (5): 604-32, 1999.
+ doi:10.1145/324133.324140.
+ http://www.cs.cornell.edu/home/kleinber/auth.pdf.
+ """
+ if type(G) == nx.MultiGraph or type(G) == nx.MultiDiGraph:
+ raise Exception("hits() not defined for graphs with multiedges.")
+ if len(G) == 0:
+ return {},{}
+ # choose fixed starting vector if not given
+ if nstart is None:
+ h=dict.fromkeys(G,1.0/G.number_of_nodes())
+ else:
+ h=nstart
+ # normalize starting vector
+ s=1.0/sum(h.values())
+ for k in h:
+ h[k]*=s
+ i=0
+ while True: # power iteration: make up to max_iter iterations
+ hlast=h
+ h=dict.fromkeys(hlast.keys(),0)
+ a=dict.fromkeys(hlast.keys(),0)
+ # this "matrix multiply" looks odd because it is
+ # doing a left multiply a^T=hlast^T*G
+ for n in h:
+ for nbr in G[n]:
+ a[nbr]+=hlast[n]*G[n][nbr].get('weight',1)
+ # now multiply h=Ga
+ for n in h:
+ for nbr in G[n]:
+ h[n]+=a[nbr]*G[n][nbr].get('weight',1)
+ # normalize vector
+ s=1.0/max(h.values())
+ for n in h: h[n]*=s
+ # normalize vector
+ s=1.0/max(a.values())
+ for n in a: a[n]*=s
+ # check convergence, l1 norm
+ err=sum([abs(h[n]-hlast[n]) for n in h])
+ if err < tol:
+ break
+ if i>max_iter:
+ raise NetworkXError(\
+ "HITS: power iteration failed to converge in %d iterations."%(i+1))
+ i+=1
+ if normalized:
+ s = 1.0/sum(a.values())
+ for n in a:
+ a[n] *= s
+ s = 1.0/sum(h.values())
+ for n in h:
+ h[n] *= s
+ return h,a
+
+def authority_matrix(G,nodelist=None):
+ """Return the HITS authority matrix."""
+ M=nx.to_numpy_matrix(G,nodelist=nodelist)
+ return M.T*M
+
+def hub_matrix(G,nodelist=None):
+ """Return the HITS hub matrix."""
+ M=nx.to_numpy_matrix(G,nodelist=nodelist)
+ return M*M.T
+
+def hits_numpy(G,normalized=True):
+ """Return HITS hubs and authorities values for nodes.
+
+ The HITS algorithm computes two numbers for a node.
+ Authorities estimates the node value based on the incoming links.
+ Hubs estimates the node value based on outgoing links.
+
+ Parameters
+ -----------
+ G : graph
+ A NetworkX graph
+
+ normalized : bool (default=True)
+ Normalize results by the sum of all of the values.
+
+ Returns
+ -------
+ (hubs,authorities) : two-tuple of dictionaries
+ Two dictionaries keyed by node containing the hub and authority
+ values.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> h,a=nx.hits(G)
+
+ Notes
+ -----
+ The eigenvector calculation uses NumPy's interface to LAPACK.
+
+ The HITS algorithm was designed for directed graphs but this
+ algorithm does not check if the input graph is directed and will
+ execute on undirected graphs.
+
+ References
+ ----------
+ .. [1] A. Langville and C. Meyer,
+ "A survey of eigenvector methods of web information retrieval."
+ http://citeseer.ist.psu.edu/713792.html
+ .. [2] Jon Kleinberg,
+ Authoritative sources in a hyperlinked environment
+ Journal of the ACM 46 (5): 604-32, 1999.
+ doi:10.1145/324133.324140.
+ http://www.cs.cornell.edu/home/kleinber/auth.pdf.
+ """
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError(\
+ "hits_numpy() requires NumPy: http://scipy.org/")
+ if len(G) == 0:
+ return {},{}
+ H=nx.hub_matrix(G,G.nodes())
+ e,ev=np.linalg.eig(H)
+ m=e.argsort()[-1] # index of maximum eigenvalue
+ h=np.array(ev[:,m]).flatten()
+ A=nx.authority_matrix(G,G.nodes())
+ e,ev=np.linalg.eig(A)
+ m=e.argsort()[-1] # index of maximum eigenvalue
+ a=np.array(ev[:,m]).flatten()
+ if normalized:
+ h = h/h.sum()
+ a = a/a.sum()
+ else:
+ h = h/h.max()
+ a = a/a.max()
+ hubs=dict(zip(G.nodes(),map(float,h)))
+ authorities=dict(zip(G.nodes(),map(float,a)))
+ return hubs,authorities
+
+def hits_scipy(G,max_iter=100,tol=1.0e-6,normalized=True):
+ """Return HITS hubs and authorities values for nodes.
+
+ The HITS algorithm computes two numbers for a node.
+ Authorities estimates the node value based on the incoming links.
+ Hubs estimates the node value based on outgoing links.
+
+ Parameters
+ -----------
+ G : graph
+ A NetworkX graph
+
+ max_iter : interger, optional
+ Maximum number of iterations in power method.
+
+ tol : float, optional
+ Error tolerance used to check convergence in power method iteration.
+
+ nstart : dictionary, optional
+ Starting value of each node for power method iteration.
+
+ normalized : bool (default=True)
+ Normalize results by the sum of all of the values.
+
+ Returns
+ -------
+ (hubs,authorities) : two-tuple of dictionaries
+ Two dictionaries keyed by node containing the hub and authority
+ values.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> h,a=nx.hits(G)
+
+ Notes
+ -----
+ This implementation uses SciPy sparse matrices.
+
+ The eigenvector calculation is done by the power iteration method
+ and has no guarantee of convergence. The iteration will stop
+ after max_iter iterations or an error tolerance of
+ number_of_nodes(G)*tol has been reached.
+
+ The HITS algorithm was designed for directed graphs but this
+ algorithm does not check if the input graph is directed and will
+ execute on undirected graphs.
+
+ References
+ ----------
+ .. [1] A. Langville and C. Meyer,
+ "A survey of eigenvector methods of web information retrieval."
+ http://citeseer.ist.psu.edu/713792.html
+ .. [2] Jon Kleinberg,
+ Authoritative sources in a hyperlinked environment
+ Journal of the ACM 46 (5): 604-632, 1999.
+ doi:10.1145/324133.324140.
+ http://www.cs.cornell.edu/home/kleinber/auth.pdf.
+ """
+ try:
+ import scipy.sparse
+ import numpy as np
+ except ImportError:
+ raise ImportError(\
+ "hits_scipy() requires SciPy: http://scipy.org/")
+ if len(G) == 0:
+ return {},{}
+ M=nx.to_scipy_sparse_matrix(G,nodelist=G.nodes())
+ (n,m)=M.shape # should be square
+ A=M.T*M # authority matrix
+ x=scipy.ones((n,1))/n # initial guess
+ # power iteration on authority matrix
+ i=0
+ while True:
+ xlast=x
+ x=A*x
+ x=x/x.max()
+ # check convergence, l1 norm
+ err=scipy.absolute(x-xlast).sum()
+ if err < tol:
+ break
+ if i>max_iter:
+ raise NetworkXError(\
+ "HITS: power iteration failed to converge in %d iterations."%(i+1))
+ i+=1
+
+ a=np.asarray(x).flatten()
+ # h=M*a
+ h=np.asarray(M*a).flatten()
+ if normalized:
+ h = h/h.sum()
+ a = a/a.sum()
+ hubs=dict(zip(G.nodes(),map(float,h)))
+ authorities=dict(zip(G.nodes(),map(float,a)))
+ return hubs,authorities
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import numpy
+ except:
+ raise SkipTest("NumPy not available")
+ try:
+ import scipy
+ except:
+ raise SkipTest("SciPy not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/link_analysis/pagerank_alg.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/link_analysis/pagerank_alg.py
new file mode 100644
index 0000000..244350a
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/link_analysis/pagerank_alg.py
@@ -0,0 +1,399 @@
+"""PageRank analysis of graph structure. """
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+# NetworkX:http://networkx.lanl.gov/
+import networkx as nx
+from networkx.exception import NetworkXError
+__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
+__all__ = ['pagerank','pagerank_numpy','pagerank_scipy','google_matrix']
+
+def pagerank(G,alpha=0.85,personalization=None,
+ max_iter=100,tol=1.0e-8,nstart=None,weight='weight'):
+ """Return the PageRank of the nodes in the graph.
+
+ PageRank computes a ranking of the nodes in the graph G based on
+ the structure of the incoming links. It was originally designed as
+ an algorithm to rank web pages.
+
+ Parameters
+ -----------
+ G : graph
+ A NetworkX graph
+
+ alpha : float, optional
+ Damping parameter for PageRank, default=0.85
+
+ personalization: dict, optional
+ The "personalization vector" consisting of a dictionary with a
+ key for every graph node and nonzero personalization value for each node.
+
+ max_iter : integer, optional
+ Maximum number of iterations in power method eigenvalue solver.
+
+ tol : float, optional
+ Error tolerance used to check convergence in power method solver.
+
+ nstart : dictionary, optional
+ Starting value of PageRank iteration for each node.
+
+ weight : key, optional
+ Edge data key to use as weight. If None weights are set to 1.
+
+ Returns
+ -------
+ pagerank : dictionary
+ Dictionary of nodes with PageRank as value
+
+ Examples
+ --------
+ >>> G=nx.DiGraph(nx.path_graph(4))
+ >>> pr=nx.pagerank(G,alpha=0.9)
+
+ Notes
+ -----
+ The eigenvector calculation is done by the power iteration method
+ and has no guarantee of convergence. The iteration will stop
+ after max_iter iterations or an error tolerance of
+ number_of_nodes(G)*tol has been reached.
+
+ The PageRank algorithm was designed for directed graphs but this
+ algorithm does not check if the input graph is directed and will
+ execute on undirected graphs by converting each oriented edge in the
+ directed graph to two edges.
+
+ See Also
+ --------
+ pagerank_numpy, pagerank_scipy, google_matrix
+
+ References
+ ----------
+ .. [1] A. Langville and C. Meyer,
+ "A survey of eigenvector methods of web information retrieval."
+ http://citeseer.ist.psu.edu/713792.html
+ .. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,
+ The PageRank citation ranking: Bringing order to the Web. 1999
+ http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
+ """
+ if type(G) == nx.MultiGraph or type(G) == nx.MultiDiGraph:
+ raise Exception("pagerank() not defined for graphs with multiedges.")
+
+ if len(G) == 0:
+ return {}
+
+ if not G.is_directed():
+ D=G.to_directed()
+ else:
+ D=G
+
+ # create a copy in (right) stochastic form
+ W=nx.stochastic_graph(D, weight=weight)
+ scale=1.0/W.number_of_nodes()
+
+ # choose fixed starting vector if not given
+ if nstart is None:
+ x=dict.fromkeys(W,scale)
+ else:
+ x=nstart
+ # normalize starting vector to 1
+ s=1.0/sum(x.values())
+ for k in x: x[k]*=s
+
+ # assign uniform personalization/teleportation vector if not given
+ if personalization is None:
+ p=dict.fromkeys(W,scale)
+ else:
+ p=personalization
+ # normalize starting vector to 1
+ s=1.0/sum(p.values())
+ for k in p:
+ p[k]*=s
+ if set(p)!=set(G):
+ raise NetworkXError('Personalization vector '
+ 'must have a value for every node')
+
+
+ # "dangling" nodes, no links out from them
+ out_degree=W.out_degree()
+ dangle=[n for n in W if out_degree[n]==0.0]
+ i=0
+ while True: # power iteration: make up to max_iter iterations
+ xlast=x
+ x=dict.fromkeys(xlast.keys(),0)
+ danglesum=alpha*scale*sum(xlast[n] for n in dangle)
+ for n in x:
+ # this matrix multiply looks odd because it is
+ # doing a left multiply x^T=xlast^T*W
+ for nbr in W[n]:
+ x[nbr]+=alpha*xlast[n]*W[n][nbr][weight]
+ x[n]+=danglesum+(1.0-alpha)*p[n]
+ # normalize vector
+ s=1.0/sum(x.values())
+ for n in x:
+ x[n]*=s
+ # check convergence, l1 norm
+ err=sum([abs(x[n]-xlast[n]) for n in x])
+ if err < tol:
+ break
+ if i>max_iter:
+ raise NetworkXError('pagerank: power iteration failed to converge '
+ 'in %d iterations.'%(i-1))
+ i+=1
+ return x
+
+
+def google_matrix(G, alpha=0.85, personalization=None,
+ nodelist=None, weight='weight'):
+ """Return the Google matrix of the graph.
+
+ Parameters
+ -----------
+ G : graph
+ A NetworkX graph
+
+ alpha : float
+ The damping factor
+
+ personalization: dict, optional
+ The "personalization vector" consisting of a dictionary with a
+ key for every graph node and nonzero personalization value for each node.
+
+ nodelist : list, optional
+ The rows and columns are ordered according to the nodes in nodelist.
+ If nodelist is None, then the ordering is produced by G.nodes().
+
+ weight : key, optional
+ Edge data key to use as weight. If None weights are set to 1.
+
+ Returns
+ -------
+ A : NumPy matrix
+ Google matrix of the graph
+
+ See Also
+ --------
+ pagerank, pagerank_numpy, pagerank_scipy
+ """
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError(\
+ "google_matrix() requires NumPy: http://scipy.org/")
+ # choose ordering in matrix
+ if personalization is None: # use G.nodes() ordering
+ nodelist=G.nodes()
+ else: # use personalization "vector" ordering
+ nodelist=personalization.keys()
+ if set(nodelist)!=set(G):
+ raise NetworkXError('Personalization vector dictionary'
+ 'must have a value for every node')
+ M=nx.to_numpy_matrix(G,nodelist=nodelist,weight=weight)
+ (n,m)=M.shape # should be square
+ if n == 0:
+ return M
+ # add constant to dangling nodes' row
+ dangling=np.where(M.sum(axis=1)==0)
+ for d in dangling[0]:
+ M[d]=1.0/n
+ # normalize
+ M=M/M.sum(axis=1)
+ # add "teleportation"/personalization
+ e=np.ones((n))
+ if personalization is not None:
+ v=np.array(list(personalization.values()),dtype=float)
+ else:
+ v=e
+ v=v/v.sum()
+ P=alpha*M+(1-alpha)*np.outer(e,v)
+ return P
+
+
+def pagerank_numpy(G, alpha=0.85, personalization=None, weight='weight'):
+ """Return the PageRank of the nodes in the graph.
+
+ PageRank computes a ranking of the nodes in the graph G based on
+ the structure of the incoming links. It was originally designed as
+ an algorithm to rank web pages.
+
+ Parameters
+ -----------
+ G : graph
+ A NetworkX graph
+
+ alpha : float, optional
+ Damping parameter for PageRank, default=0.85
+
+ personalization: dict, optional
+ The "personalization vector" consisting of a dictionary with a
+ key for every graph node and nonzero personalization value for each node.
+
+ weight : key, optional
+ Edge data key to use as weight. If None weights are set to 1.
+
+ Returns
+ -------
+ pagerank : dictionary
+ Dictionary of nodes with PageRank as value
+
+ Examples
+ --------
+ >>> G=nx.DiGraph(nx.path_graph(4))
+ >>> pr=nx.pagerank_numpy(G,alpha=0.9)
+
+ Notes
+ -----
+ The eigenvector calculation uses NumPy's interface to the LAPACK
+ eigenvalue solvers. This will be the fastest and most accurate
+ for small graphs.
+
+ This implementation works with Multi(Di)Graphs.
+
+ See Also
+ --------
+ pagerank, pagerank_scipy, google_matrix
+
+ References
+ ----------
+ .. [1] A. Langville and C. Meyer,
+ "A survey of eigenvector methods of web information retrieval."
+ http://citeseer.ist.psu.edu/713792.html
+ .. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,
+ The PageRank citation ranking: Bringing order to the Web. 1999
+ http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
+ """
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError("pagerank_numpy() requires NumPy: http://scipy.org/")
+ if len(G) == 0:
+ return {}
+ # choose ordering in matrix
+ if personalization is None: # use G.nodes() ordering
+ nodelist=G.nodes()
+ else: # use personalization "vector" ordering
+ nodelist=personalization.keys()
+ M=google_matrix(G, alpha, personalization=personalization,
+ nodelist=nodelist, weight=weight)
+ # use numpy LAPACK solver
+ eigenvalues,eigenvectors=np.linalg.eig(M.T)
+ ind=eigenvalues.argsort()
+ # eigenvector of largest eigenvalue at ind[-1], normalized
+ largest=np.array(eigenvectors[:,ind[-1]]).flatten().real
+ norm=float(largest.sum())
+ centrality=dict(zip(nodelist,map(float,largest/norm)))
+ return centrality
+
+
+def pagerank_scipy(G, alpha=0.85, personalization=None,
+ max_iter=100, tol=1.0e-6, weight='weight'):
+ """Return the PageRank of the nodes in the graph.
+
+ PageRank computes a ranking of the nodes in the graph G based on
+ the structure of the incoming links. It was originally designed as
+ an algorithm to rank web pages.
+
+ Parameters
+ -----------
+ G : graph
+ A NetworkX graph
+
+ alpha : float, optional
+ Damping parameter for PageRank, default=0.85
+
+ personalization: dict, optional
+ The "personalization vector" consisting of a dictionary with a
+ key for every graph node and nonzero personalization value for each node.
+
+ max_iter : integer, optional
+ Maximum number of iterations in power method eigenvalue solver.
+
+ tol : float, optional
+ Error tolerance used to check convergence in power method solver.
+
+ weight : key, optional
+ Edge data key to use as weight. If None weights are set to 1.
+
+ Returns
+ -------
+ pagerank : dictionary
+ Dictionary of nodes with PageRank as value
+
+ Examples
+ --------
+ >>> G=nx.DiGraph(nx.path_graph(4))
+ >>> pr=nx.pagerank_scipy(G,alpha=0.9)
+
+ Notes
+ -----
+ The eigenvector calculation uses power iteration with a SciPy
+ sparse matrix representation.
+
+ See Also
+ --------
+ pagerank, pagerank_numpy, google_matrix
+
+ References
+ ----------
+ .. [1] A. Langville and C. Meyer,
+ "A survey of eigenvector methods of web information retrieval."
+ http://citeseer.ist.psu.edu/713792.html
+ .. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,
+ The PageRank citation ranking: Bringing order to the Web. 1999
+ http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
+ """
+ try:
+ import scipy.sparse
+ except ImportError:
+ raise ImportError("pagerank_scipy() requires SciPy: http://scipy.org/")
+ if len(G) == 0:
+ return {}
+ # choose ordering in matrix
+ if personalization is None: # use G.nodes() ordering
+ nodelist=G.nodes()
+ else: # use personalization "vector" ordering
+ nodelist=personalization.keys()
+ M=nx.to_scipy_sparse_matrix(G,nodelist=nodelist,weight=weight,dtype='f')
+ (n,m)=M.shape # should be square
+ S=scipy.array(M.sum(axis=1)).flatten()
+# for i, j, v in zip( *scipy.sparse.find(M) ):
+# M[i,j] = v / S[i]
+ S[S>0] = 1.0 / S[S>0]
+ Q = scipy.sparse.spdiags(S.T, 0, *M.shape, format='csr')
+ M = Q * M
+ x=scipy.ones((n))/n # initial guess
+ dangle=scipy.array(scipy.where(M.sum(axis=1)==0,1.0/n,0)).flatten()
+ # add "teleportation"/personalization
+ if personalization is not None:
+ v=scipy.array(list(personalization.values()),dtype=float)
+ v=v/v.sum()
+ else:
+ v=x
+ i=0
+ while i <= max_iter:
+ # power iteration: make up to max_iter iterations
+ xlast=x
+ x=alpha*(x*M+scipy.dot(dangle,xlast))+(1-alpha)*v
+ x=x/x.sum()
+ # check convergence, l1 norm
+ err=scipy.absolute(x-xlast).sum()
+ if err < n*tol:
+ return dict(zip(nodelist,map(float,x)))
+ i+=1
+ raise NetworkXError('pagerank_scipy: power iteration failed to converge'
+ 'in %d iterations.'%(i+1))
+
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import numpy
+ except:
+ raise SkipTest("NumPy not available")
+ try:
+ import scipy
+ except:
+ raise SkipTest("SciPy not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/link_analysis/tests/test_hits.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/link_analysis/tests/test_hits.py
new file mode 100644
index 0000000..7092d1d
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/link_analysis/tests/test_hits.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+from nose.tools import *
+from nose import SkipTest
+from nose.plugins.attrib import attr
+import networkx
+
+# Example from
+# A. Langville and C. Meyer, "A survey of eigenvector methods of web
+# information retrieval." http://citeseer.ist.psu.edu/713792.html
+
+
+class TestHITS:
+
+ def setUp(self):
+
+ G=networkx.DiGraph()
+
+ edges=[(1,3),(1,5),\
+ (2,1),\
+ (3,5),\
+ (5,4),(5,3),\
+ (6,5)]
+
+ G.add_edges_from(edges,weight=1)
+ self.G=G
+ self.G.a=dict(zip(G,[0.000000, 0.000000, 0.366025,
+ 0.133975, 0.500000, 0.000000]))
+ self.G.h=dict(zip(G,[ 0.366025, 0.000000, 0.211325,
+ 0.000000, 0.211325, 0.211325]))
+
+
+ def test_hits(self):
+ G=self.G
+ h,a=networkx.hits(G,tol=1.e-08)
+ for n in G:
+ assert_almost_equal(h[n],G.h[n],places=4)
+ for n in G:
+ assert_almost_equal(a[n],G.a[n],places=4)
+
+ def test_hits_nstart(self):
+ G = self.G
+ nstart = dict([(i, 1./2) for i in G])
+ h, a = networkx.hits(G, nstart = nstart)
+
+ @attr('numpy')
+ def test_hits_numpy(self):
+ try:
+ import numpy as np
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+
+ G=self.G
+ h,a=networkx.hits_numpy(G)
+ for n in G:
+ assert_almost_equal(h[n],G.h[n],places=4)
+ for n in G:
+ assert_almost_equal(a[n],G.a[n],places=4)
+
+
+ def test_hits_scipy(self):
+ try:
+ import scipy as sp
+ except ImportError:
+ raise SkipTest('SciPy not available.')
+
+ G=self.G
+ h,a=networkx.hits_scipy(G,tol=1.e-08)
+ for n in G:
+ assert_almost_equal(h[n],G.h[n],places=4)
+ for n in G:
+ assert_almost_equal(a[n],G.a[n],places=4)
+
+
+ @attr('numpy')
+ def test_empty(self):
+ try:
+ import numpy
+ except ImportError:
+ raise SkipTest('numpy not available.')
+ G=networkx.Graph()
+ assert_equal(networkx.hits(G),({},{}))
+ assert_equal(networkx.hits_numpy(G),({},{}))
+ assert_equal(networkx.authority_matrix(G).shape,(0,0))
+ assert_equal(networkx.hub_matrix(G).shape,(0,0))
+
+ def test_empty_scipy(self):
+ try:
+ import scipy
+ except ImportError:
+ raise SkipTest('scipy not available.')
+ G=networkx.Graph()
+ assert_equal(networkx.hits_scipy(G),({},{}))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/link_analysis/tests/test_pagerank.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/link_analysis/tests/test_pagerank.py
new file mode 100644
index 0000000..7b409ff
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/link_analysis/tests/test_pagerank.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+from nose.tools import *
+from nose import SkipTest
+from nose.plugins.attrib import attr
+import random
+import networkx
+
+# Example from
+# A. Langville and C. Meyer, "A survey of eigenvector methods of web
+# information retrieval." http://citeseer.ist.psu.edu/713792.html
+
+
+class TestPageRank:
+
+ def setUp(self):
+ G=networkx.DiGraph()
+ edges=[(1,2),(1,3),\
+ (3,1),(3,2),(3,5),\
+ (4,5),(4,6),\
+ (5,4),(5,6),\
+ (6,4)]
+ G.add_edges_from(edges)
+ self.G=G
+ self.G.pagerank=dict(zip(G,
+ [0.03721197,0.05395735,0.04150565,
+ 0.37508082,0.20599833, 0.28624589]))
+
+ def test_pagerank(self):
+ G=self.G
+ p=networkx.pagerank(G,alpha=0.9,tol=1.e-08)
+ for n in G:
+ assert_almost_equal(p[n],G.pagerank[n],places=4)
+
+ nstart = dict((n,random.random()) for n in G)
+ p=networkx.pagerank(G,alpha=0.9,tol=1.e-08, nstart=nstart)
+ for n in G:
+ assert_almost_equal(p[n],G.pagerank[n],places=4)
+
+ assert_raises(networkx.NetworkXError,networkx.pagerank,G,
+ max_iter=0)
+
+
+ @attr('numpy')
+ def test_numpy_pagerank(self):
+ try:
+ import numpy
+ except ImportError:
+ raise SkipTest('numpy not available.')
+ G=self.G
+ p=networkx.pagerank_numpy(G,alpha=0.9)
+ for n in G:
+ assert_almost_equal(p[n],G.pagerank[n],places=4)
+ personalize = dict((n,random.random()) for n in G)
+ p=networkx.pagerank_numpy(G,alpha=0.9, personalization=personalize)
+
+
+
+ @attr('numpy')
+ def test_google_matrix(self):
+ try:
+ import numpy.linalg
+ except ImportError:
+ raise SkipTest('numpy not available.')
+ G=self.G
+ M=networkx.google_matrix(G,alpha=0.9)
+ e,ev=numpy.linalg.eig(M.T)
+ p=numpy.array(ev[:,0]/ev[:,0].sum())[:,0]
+ for (a,b) in zip(p,self.G.pagerank.values()):
+ assert_almost_equal(a,b)
+
+ personalize = dict((n,random.random()) for n in G)
+ M=networkx.google_matrix(G,alpha=0.9, personalization=personalize)
+ _ = personalize.pop(1)
+ assert_raises(networkx.NetworkXError,networkx.google_matrix,G,
+ personalization=personalize)
+
+ def test_scipy_pagerank(self):
+ G=self.G
+ try:
+ import scipy
+ except ImportError:
+ raise SkipTest('scipy not available.')
+ p=networkx.pagerank_scipy(G,alpha=0.9,tol=1.e-08)
+ for n in G:
+ assert_almost_equal(p[n],G.pagerank[n],places=4)
+ personalize = dict((n,random.random()) for n in G)
+ p=networkx.pagerank_scipy(G,alpha=0.9,tol=1.e-08,
+ personalization=personalize)
+
+ assert_raises(networkx.NetworkXError,networkx.pagerank_scipy,G,
+ max_iter=0)
+
+ def test_personalization(self):
+ G=networkx.complete_graph(4)
+ personalize={0:1,1:1,2:4,3:4}
+ answer={0:0.1,1:0.1,2:0.4,3:0.4}
+ p=networkx.pagerank(G,alpha=0.0,personalization=personalize)
+ for n in G:
+ assert_almost_equal(p[n],answer[n],places=4)
+ _ = personalize.pop(0)
+ assert_raises(networkx.NetworkXError,networkx.pagerank,G,
+ personalization=personalize)
+
+
+ @attr('numpy')
+ def test_empty(self):
+ try:
+ import numpy
+ except ImportError:
+ raise SkipTest('numpy not available.')
+ G=networkx.Graph()
+ assert_equal(networkx.pagerank(G),{})
+ assert_equal(networkx.pagerank_numpy(G),{})
+ assert_equal(networkx.google_matrix(G).shape,(0,0))
+
+ def test_empty_scipy(self):
+ try:
+ import scipy
+ except ImportError:
+ raise SkipTest('scipy not available.')
+ G=networkx.Graph()
+ assert_equal(networkx.pagerank_scipy(G),{})
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/matching.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/matching.py
new file mode 100644
index 0000000..70c424f
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/matching.py
@@ -0,0 +1,825 @@
+"""
+********
+Matching
+********
+"""
+# Copyright (C) 2004-2008 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+# Copyright (C) 2011 by
+# Nicholas Mancuso <nick.mancuso@gmail.com>
+# All rights reserved.
+# BSD license.
+from itertools import repeat
+__author__ = """\n""".join(['Joris van Rantwijk',
+ 'Nicholas Mancuso (nick.mancuso@gmail.com)'])
+
+_all__ = ['max_weight_matching', 'maximal_matching']
+
+
+def maximal_matching(G):
+ r"""Find a maximal cardinality matching in the graph.
+
+ A matching is a subset of edges in which no node occurs more than once.
+ The cardinality of a matching is the number of matched edges.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ Returns
+ -------
+ matching : set
+ A maximal matching of the graph.
+
+ Notes
+ -----
+ The algorithm greedily selects a maximal matching M of the graph G
+ (i.e. no superset of M exists). It runs in `O(|E|)` time.
+ """
+ matching = set([])
+ edges = set([])
+ for edge in G.edges_iter():
+ # If the edge isn't covered, add it to the matching
+ # then remove neighborhood of u and v from consideration.
+ if edge not in edges:
+ u, v = edge
+ matching.add(edge)
+ edges |= set(G.edges(u))
+ edges |= set(G.edges(v))
+
+ return matching
+
+
+def max_weight_matching(G, maxcardinality=False):
+ """Compute a maximum-weighted matching of G.
+
+ A matching is a subset of edges in which no node occurs more than once.
+ The cardinality of a matching is the number of matched edges.
+ The weight of a matching is the sum of the weights of its edges.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ maxcardinality: bool, optional
+ If maxcardinality is True, compute the maximum-cardinality matching
+ with maximum weight among all maximum-cardinality matchings.
+
+ Returns
+ -------
+ mate : dictionary
+ The matching is returned as a dictionary, mate, such that
+ mate[v] == w if node v is matched to node w. Unmatched nodes do not
+ occur as a key in mate.
+
+
+ Notes
+ ------
+ If G has edges with 'weight' attribute the edge data are used as
+ weight values else the weights are assumed to be 1.
+
+ This function takes time O(number_of_nodes ** 3).
+
+ If all edge weights are integers, the algorithm uses only integer
+ computations. If floating point weights are used, the algorithm
+ could return a slightly suboptimal matching due to numeric
+ precision errors.
+
+ This method is based on the "blossom" method for finding augmenting
+ paths and the "primal-dual" method for finding a matching of maximum
+ weight, both methods invented by Jack Edmonds [1]_.
+
+ References
+ ----------
+ .. [1] "Efficient Algorithms for Finding Maximum Matching in Graphs",
+ Zvi Galil, ACM Computing Surveys, 1986.
+ """
+ #
+ # The algorithm is taken from "Efficient Algorithms for Finding Maximum
+ # Matching in Graphs" by Zvi Galil, ACM Computing Surveys, 1986.
+ # It is based on the "blossom" method for finding augmenting paths and
+ # the "primal-dual" method for finding a matching of maximum weight, both
+ # methods invented by Jack Edmonds.
+ #
+ # A C program for maximum weight matching by Ed Rothberg was used
+ # extensively to validate this new code.
+ #
+ # Many terms used in the code comments are explained in the paper
+ # by Galil. You will probably need the paper to make sense of this code.
+ #
+
+ class NoNode:
+ """Dummy value which is different from any node."""
+ pass
+
+ class Blossom:
+ """Representation of a non-trivial blossom or sub-blossom."""
+
+ __slots__ = [ 'childs', 'edges', 'mybestedges' ]
+
+ # b.childs is an ordered list of b's sub-blossoms, starting with
+ # the base and going round the blossom.
+
+ # b.edges is the list of b's connecting edges, such that
+ # b.edges[i] = (v, w) where v is a vertex in b.childs[i]
+ # and w is a vertex in b.childs[wrap(i+1)].
+
+ # If b is a top-level S-blossom,
+ # b.mybestedges is a list of least-slack edges to neighbouring
+ # S-blossoms, or None if no such list has been computed yet.
+ # This is used for efficient computation of delta3.
+
+ # Generate the blossom's leaf vertices.
+ def leaves(self):
+ for t in self.childs:
+ if isinstance(t, Blossom):
+ for v in t.leaves():
+ yield v
+ else:
+ yield t
+
+ # Get a list of vertices.
+ gnodes = G.nodes()
+ if not gnodes:
+ return { } # don't bother with empty graphs
+
+ # Find the maximum edge weight.
+ maxweight = 0
+ allinteger = True
+ for i,j,d in G.edges_iter(data=True):
+ wt=d.get('weight',1)
+ if i != j and wt > maxweight:
+ maxweight = wt
+ allinteger = allinteger and (str(type(wt)).split("'")[1]
+ in ('int', 'long'))
+
+ # If v is a matched vertex, mate[v] is its partner vertex.
+ # If v is a single vertex, v does not occur as a key in mate.
+ # Initially all vertices are single; updated during augmentation.
+ mate = { }
+
+ # If b is a top-level blossom,
+ # label.get(b) is None if b is unlabeled (free),
+ # 1 if b is an S-blossom,
+ # 2 if b is a T-blossom.
+ # The label of a vertex is found by looking at the label of its top-level
+ # containing blossom.
+ # If v is a vertex inside a T-blossom, label[v] is 2 iff v is reachable
+ # from an S-vertex outside the blossom.
+ # Labels are assigned during a stage and reset after each augmentation.
+ label = { }
+
+ # If b is a labeled top-level blossom,
+ # labeledge[b] = (v, w) is the edge through which b obtained its label
+ # such that w is a vertex in b, or None if b's base vertex is single.
+ # If w is a vertex inside a T-blossom and label[w] == 2,
+ # labeledge[w] = (v, w) is an edge through which w is reachable from
+ # outside the blossom.
+ labeledge = { }
+
+ # If v is a vertex, inblossom[v] is the top-level blossom to which v
+ # belongs.
+ # If v is a top-level vertex, inblossom[v] == v since v is itself
+ # a (trivial) top-level blossom.
+ # Initially all vertices are top-level trivial blossoms.
+ inblossom = dict(zip(gnodes, gnodes))
+
+ # If b is a sub-blossom,
+ # blossomparent[b] is its immediate parent (sub-)blossom.
+ # If b is a top-level blossom, blossomparent[b] is None.
+ blossomparent = dict(zip(gnodes, repeat(None)))
+
+ # If b is a (sub-)blossom,
+ # blossombase[b] is its base VERTEX (i.e. recursive sub-blossom).
+ blossombase = dict(zip(gnodes, gnodes))
+
+ # If w is a free vertex (or an unreached vertex inside a T-blossom),
+ # bestedge[w] = (v, w) is the least-slack edge from an S-vertex,
+ # or None if there is no such edge.
+ # If b is a (possibly trivial) top-level S-blossom,
+ # bestedge[b] = (v, w) is the least-slack edge to a different S-blossom
+ # (v inside b), or None if there is no such edge.
+ # This is used for efficient computation of delta2 and delta3.
+ bestedge = { }
+
+ # If v is a vertex,
+ # dualvar[v] = 2 * u(v) where u(v) is the v's variable in the dual
+ # optimization problem (if all edge weights are integers, multiplication
+ # by two ensures that all values remain integers throughout the algorithm).
+ # Initially, u(v) = maxweight / 2.
+ dualvar = dict(zip(gnodes, repeat(maxweight)))
+
+ # If b is a non-trivial blossom,
+ # blossomdual[b] = z(b) where z(b) is b's variable in the dual
+ # optimization problem.
+ blossomdual = { }
+
+ # If (v, w) in allowedge or (w, v) in allowedg, then the edge
+ # (v, w) is known to have zero slack in the optimization problem;
+ # otherwise the edge may or may not have zero slack.
+ allowedge = { }
+
+ # Queue of newly discovered S-vertices.
+ queue = [ ]
+
+ # Return 2 * slack of edge (v, w) (does not work inside blossoms).
+ def slack(v, w):
+ return dualvar[v] + dualvar[w] - 2 * G[v][w].get('weight',1)
+
+ # Assign label t to the top-level blossom containing vertex w,
+ # coming through an edge from vertex v.
+ def assignLabel(w, t, v):
+ b = inblossom[w]
+ assert label.get(w) is None and label.get(b) is None
+ label[w] = label[b] = t
+ if v is not None:
+ labeledge[w] = labeledge[b] = (v, w)
+ else:
+ labeledge[w] = labeledge[b] = None
+ bestedge[w] = bestedge[b] = None
+ if t == 1:
+ # b became an S-vertex/blossom; add it(s vertices) to the queue.
+ if isinstance(b, Blossom):
+ queue.extend(b.leaves())
+ else:
+ queue.append(b)
+ elif t == 2:
+ # b became a T-vertex/blossom; assign label S to its mate.
+ # (If b is a non-trivial blossom, its base is the only vertex
+ # with an external mate.)
+ base = blossombase[b]
+ assignLabel(mate[base], 1, base)
+
+ # Trace back from vertices v and w to discover either a new blossom
+ # or an augmenting path. Return the base vertex of the new blossom,
+ # or NoNode if an augmenting path was found.
+ def scanBlossom(v, w):
+ # Trace back from v and w, placing breadcrumbs as we go.
+ path = [ ]
+ base = NoNode
+ while v is not NoNode:
+ # Look for a breadcrumb in v's blossom or put a new breadcrumb.
+ b = inblossom[v]
+ if label[b] & 4:
+ base = blossombase[b]
+ break
+ assert label[b] == 1
+ path.append(b)
+ label[b] = 5
+ # Trace one step back.
+ if labeledge[b] is None:
+ # The base of blossom b is single; stop tracing this path.
+ assert blossombase[b] not in mate
+ v = NoNode
+ else:
+ assert labeledge[b][0] == mate[blossombase[b]]
+ v = labeledge[b][0]
+ b = inblossom[v]
+ assert label[b] == 2
+ # b is a T-blossom; trace one more step back.
+ v = labeledge[b][0]
+ # Swap v and w so that we alternate between both paths.
+ if w is not NoNode:
+ v, w = w, v
+ # Remove breadcrumbs.
+ for b in path:
+ label[b] = 1
+ # Return base vertex, if we found one.
+ return base
+
+ # Construct a new blossom with given base, through S-vertices v and w.
+ # Label the new blossom as S; set its dual variable to zero;
+ # relabel its T-vertices to S and add them to the queue.
+ def addBlossom(base, v, w):
+ bb = inblossom[base]
+ bv = inblossom[v]
+ bw = inblossom[w]
+ # Create blossom.
+ b = Blossom()
+ blossombase[b] = base
+ blossomparent[b] = None
+ blossomparent[bb] = b
+ # Make list of sub-blossoms and their interconnecting edge endpoints.
+ b.childs = path = [ ]
+ b.edges = edgs = [ (v, w) ]
+ # Trace back from v to base.
+ while bv != bb:
+ # Add bv to the new blossom.
+ blossomparent[bv] = b
+ path.append(bv)
+ edgs.append(labeledge[bv])
+ assert label[bv] == 2 or (label[bv] == 1 and labeledge[bv][0] == mate[blossombase[bv]])
+ # Trace one step back.
+ v = labeledge[bv][0]
+ bv = inblossom[v]
+ # Add base sub-blossom; reverse lists.
+ path.append(bb)
+ path.reverse()
+ edgs.reverse()
+ # Trace back from w to base.
+ while bw != bb:
+ # Add bw to the new blossom.
+ blossomparent[bw] = b
+ path.append(bw)
+ edgs.append((labeledge[bw][1], labeledge[bw][0]))
+ assert label[bw] == 2 or (label[bw] == 1 and labeledge[bw][0] == mate[blossombase[bw]])
+ # Trace one step back.
+ w = labeledge[bw][0]
+ bw = inblossom[w]
+ # Set label to S.
+ assert label[bb] == 1
+ label[b] = 1
+ labeledge[b] = labeledge[bb]
+ # Set dual variable to zero.
+ blossomdual[b] = 0
+ # Relabel vertices.
+ for v in b.leaves():
+ if label[inblossom[v]] == 2:
+ # This T-vertex now turns into an S-vertex because it becomes
+ # part of an S-blossom; add it to the queue.
+ queue.append(v)
+ inblossom[v] = b
+ # Compute b.mybestedges.
+ bestedgeto = { }
+ for bv in path:
+ if isinstance(bv, Blossom):
+ if bv.mybestedges is not None:
+ # Walk this subblossom's least-slack edges.
+ nblist = bv.mybestedges
+ # The sub-blossom won't need this data again.
+ bv.mybestedges = None
+ else:
+ # This subblossom does not have a list of least-slack
+ # edges; get the information from the vertices.
+ nblist = [ (v, w)
+ for v in bv.leaves()
+ for w in G.neighbors_iter(v)
+ if v != w ]
+ else:
+ nblist = [ (bv, w)
+ for w in G.neighbors_iter(bv)
+ if bv != w ]
+ for k in nblist:
+ (i, j) = k
+ if inblossom[j] == b:
+ i, j = j, i
+ bj = inblossom[j]
+ if (bj != b and label.get(bj) == 1 and
+ ((bj not in bestedgeto) or
+ slack(i, j) < slack(*bestedgeto[bj]))):
+ bestedgeto[bj] = k
+ # Forget about least-slack edge of the subblossom.
+ bestedge[bv] = None
+ b.mybestedges = list(bestedgeto.values())
+ # Select bestedge[b].
+ mybestedge = None
+ bestedge[b] = None
+ for k in b.mybestedges:
+ kslack = slack(*k)
+ if mybestedge is None or kslack < mybestslack:
+ mybestedge = k
+ mybestslack = kslack
+ bestedge[b] = mybestedge
+
+ # Expand the given top-level blossom.
+ def expandBlossom(b, endstage):
+ # Convert sub-blossoms into top-level blossoms.
+ for s in b.childs:
+ blossomparent[s] = None
+ if isinstance(s, Blossom):
+ if endstage and blossomdual[s] == 0:
+ # Recursively expand this sub-blossom.
+ expandBlossom(s, endstage)
+ else:
+ for v in s.leaves():
+ inblossom[v] = s
+ else:
+ inblossom[s] = s
+ # If we expand a T-blossom during a stage, its sub-blossoms must be
+ # relabeled.
+ if (not endstage) and label.get(b) == 2:
+ # Start at the sub-blossom through which the expanding
+ # blossom obtained its label, and relabel sub-blossoms untili
+ # we reach the base.
+ # Figure out through which sub-blossom the expanding blossom
+ # obtained its label initially.
+ entrychild = inblossom[labeledge[b][1]]
+ # Decide in which direction we will go round the blossom.
+ j = b.childs.index(entrychild)
+ if j & 1:
+ # Start index is odd; go forward and wrap.
+ j -= len(b.childs)
+ jstep = 1
+ else:
+ # Start index is even; go backward.
+ jstep = -1
+ # Move along the blossom until we get to the base.
+ v, w = labeledge[b]
+ while j != 0:
+ # Relabel the T-sub-blossom.
+ if jstep == 1:
+ p, q = b.edges[j]
+ else:
+ q, p = b.edges[j-1]
+ label[w] = None
+ label[q] = None
+ assignLabel(w, 2, v)
+ # Step to the next S-sub-blossom and note its forward edge.
+ allowedge[(p, q)] = allowedge[(q, p)] = True
+ j += jstep
+ if jstep == 1:
+ v, w = b.edges[j]
+ else:
+ w, v = b.edges[j-1]
+ # Step to the next T-sub-blossom.
+ allowedge[(v, w)] = allowedge[(w, v)] = True
+ j += jstep
+ # Relabel the base T-sub-blossom WITHOUT stepping through to
+ # its mate (so don't call assignLabel).
+ bw = b.childs[j]
+ label[w] = label[bw] = 2
+ labeledge[w] = labeledge[bw] = (v, w)
+ bestedge[bw] = None
+ # Continue along the blossom until we get back to entrychild.
+ j += jstep
+ while b.childs[j] != entrychild:
+ # Examine the vertices of the sub-blossom to see whether
+ # it is reachable from a neighbouring S-vertex outside the
+ # expanding blossom.
+ bv = b.childs[j]
+ if label.get(bv) == 1:
+ # This sub-blossom just got label S through one of its
+ # neighbours; leave it be.
+ j += jstep
+ continue
+ if isinstance(bv, Blossom):
+ for v in bv.leaves():
+ if label.get(v):
+ break
+ else:
+ v = bv
+ # If the sub-blossom contains a reachable vertex, assign
+ # label T to the sub-blossom.
+ if label.get(v):
+ assert label[v] == 2
+ assert inblossom[v] == bv
+ label[v] = None
+ label[mate[blossombase[bv]]] = None
+ assignLabel(v, 2, labeledge[v][0])
+ j += jstep
+ # Remove the expanded blossom entirely.
+ label.pop(b, None)
+ labeledge.pop(b, None)
+ bestedge.pop(b, None)
+ del blossomparent[b]
+ del blossombase[b]
+ del blossomdual[b]
+
+ # Swap matched/unmatched edges over an alternating path through blossom b
+ # between vertex v and the base vertex. Keep blossom bookkeeping consistent.
+ def augmentBlossom(b, v):
+ # Bubble up through the blossom tree from vertex v to an immediate
+ # sub-blossom of b.
+ t = v
+ while blossomparent[t] != b:
+ t = blossomparent[t]
+ # Recursively deal with the first sub-blossom.
+ if isinstance(t, Blossom):
+ augmentBlossom(t, v)
+ # Decide in which direction we will go round the blossom.
+ i = j = b.childs.index(t)
+ if i & 1:
+ # Start index is odd; go forward and wrap.
+ j -= len(b.childs)
+ jstep = 1
+ else:
+ # Start index is even; go backward.
+ jstep = -1
+ # Move along the blossom until we get to the base.
+ while j != 0:
+ # Step to the next sub-blossom and augment it recursively.
+ j += jstep
+ t = b.childs[j]
+ if jstep == 1:
+ w, x = b.edges[j]
+ else:
+ x, w = b.edges[j-1]
+ if isinstance(t, Blossom):
+ augmentBlossom(t, w)
+ # Step to the next sub-blossom and augment it recursively.
+ j += jstep
+ t = b.childs[j]
+ if isinstance(t, Blossom):
+ augmentBlossom(t, x)
+ # Match the edge connecting those sub-blossoms.
+ mate[w] = x
+ mate[x] = w
+ # Rotate the list of sub-blossoms to put the new base at the front.
+ b.childs = b.childs[i:] + b.childs[:i]
+ b.edges = b.edges[i:] + b.edges[:i]
+ blossombase[b] = blossombase[b.childs[0]]
+ assert blossombase[b] == v
+
+ # Swap matched/unmatched edges over an alternating path between two
+ # single vertices. The augmenting path runs through S-vertices v and w.
+ def augmentMatching(v, w):
+ for (s, j) in ((v, w), (w, v)):
+ # Match vertex s to vertex j. Then trace back from s
+ # until we find a single vertex, swapping matched and unmatched
+ # edges as we go.
+ while 1:
+ bs = inblossom[s]
+ assert label[bs] == 1
+ assert (labeledge[bs] is None and blossombase[bs] not in mate) or (labeledge[bs][0] == mate[blossombase[bs]])
+ # Augment through the S-blossom from s to base.
+ if isinstance(bs, Blossom):
+ augmentBlossom(bs, s)
+ # Update mate[s]
+ mate[s] = j
+ # Trace one step back.
+ if labeledge[bs] is None:
+ # Reached single vertex; stop.
+ break
+ t = labeledge[bs][0]
+ bt = inblossom[t]
+ assert label[bt] == 2
+ # Trace one more step back.
+ s, j = labeledge[bt]
+ # Augment through the T-blossom from j to base.
+ assert blossombase[bt] == t
+ if isinstance(bt, Blossom):
+ augmentBlossom(bt, j)
+ # Update mate[j]
+ mate[j] = s
+
+ # Verify that the optimum solution has been reached.
+ def verifyOptimum():
+ if maxcardinality:
+ # Vertices may have negative dual;
+ # find a constant non-negative number to add to all vertex duals.
+ vdualoffset = max(0, -min(dualvar.values()))
+ else:
+ vdualoffset = 0
+ # 0. all dual variables are non-negative
+ assert min(dualvar.values()) + vdualoffset >= 0
+ assert len(blossomdual) == 0 or min(blossomdual.values()) >= 0
+ # 0. all edges have non-negative slack and
+ # 1. all matched edges have zero slack;
+ for i,j,d in G.edges_iter(data=True):
+ wt=d.get('weight',1)
+ if i == j:
+ continue # ignore self-loops
+ s = dualvar[i] + dualvar[j] - 2 * wt
+ iblossoms = [ i ]
+ jblossoms = [ j ]
+ while blossomparent[iblossoms[-1]] is not None:
+ iblossoms.append(blossomparent[iblossoms[-1]])
+ while blossomparent[jblossoms[-1]] is not None:
+ jblossoms.append(blossomparent[jblossoms[-1]])
+ iblossoms.reverse()
+ jblossoms.reverse()
+ for (bi, bj) in zip(iblossoms, jblossoms):
+ if bi != bj:
+ break
+ s += 2 * blossomdual[bi]
+ assert s >= 0
+ if mate.get(i) == j or mate.get(j) == i:
+ assert mate[i] == j and mate[j] == i
+ assert s == 0
+ # 2. all single vertices have zero dual value;
+ for v in gnodes:
+ assert (v in mate) or dualvar[v] + vdualoffset == 0
+ # 3. all blossoms with positive dual value are full.
+ for b in blossomdual:
+ if blossomdual[b] > 0:
+ assert len(b.edges) % 2 == 1
+ for (i, j) in b.edges[1::2]:
+ assert mate[i] == j and mate[j] == i
+ # Ok.
+
+ # Main loop: continue until no further improvement is possible.
+ while 1:
+
+ # Each iteration of this loop is a "stage".
+ # A stage finds an augmenting path and uses that to improve
+ # the matching.
+
+ # Remove labels from top-level blossoms/vertices.
+ label.clear()
+ labeledge.clear()
+
+ # Forget all about least-slack edges.
+ bestedge.clear()
+ for b in blossomdual:
+ b.mybestedges = None
+
+ # Loss of labeling means that we can not be sure that currently
+ # allowable edges remain allowable througout this stage.
+ allowedge.clear()
+
+ # Make queue empty.
+ queue[:] = [ ]
+
+ # Label single blossoms/vertices with S and put them in the queue.
+ for v in gnodes:
+ if (v not in mate) and label.get(inblossom[v]) is None:
+ assignLabel(v, 1, None)
+
+
+ # Loop until we succeed in augmenting the matching.
+ augmented = 0
+ while 1:
+
+ # Each iteration of this loop is a "substage".
+ # A substage tries to find an augmenting path;
+ # if found, the path is used to improve the matching and
+ # the stage ends. If there is no augmenting path, the
+ # primal-dual method is used to pump some slack out of
+ # the dual variables.
+
+ # Continue labeling until all vertices which are reachable
+ # through an alternating path have got a label.
+ while queue and not augmented:
+
+ # Take an S vertex from the queue.
+ v = queue.pop()
+ assert label[inblossom[v]] == 1
+
+ # Scan its neighbours:
+ for w in G.neighbors_iter(v):
+ if w == v:
+ continue # ignore self-loops
+ # w is a neighbour to v
+ bv = inblossom[v]
+ bw = inblossom[w]
+ if bv == bw:
+ # this edge is internal to a blossom; ignore it
+ continue
+ if (v, w) not in allowedge:
+ kslack = slack(v, w)
+ if kslack <= 0:
+ # edge k has zero slack => it is allowable
+ allowedge[(v, w)] = allowedge[(w, v)] = True
+ if (v, w) in allowedge:
+ if label.get(bw) is None:
+ # (C1) w is a free vertex;
+ # label w with T and label its mate with S (R12).
+ assignLabel(w, 2, v)
+ elif label.get(bw) == 1:
+ # (C2) w is an S-vertex (not in the same blossom);
+ # follow back-links to discover either an
+ # augmenting path or a new blossom.
+ base = scanBlossom(v, w)
+ if base is not NoNode:
+ # Found a new blossom; add it to the blossom
+ # bookkeeping and turn it into an S-blossom.
+ addBlossom(base, v, w)
+ else:
+ # Found an augmenting path; augment the
+ # matching and end this stage.
+ augmentMatching(v, w)
+ augmented = 1
+ break
+ elif label.get(w) is None:
+ # w is inside a T-blossom, but w itself has not
+ # yet been reached from outside the blossom;
+ # mark it as reached (we need this to relabel
+ # during T-blossom expansion).
+ assert label[bw] == 2
+ label[w] = 2
+ labeledge[w] = (v, w)
+ elif label.get(bw) == 1:
+ # keep track of the least-slack non-allowable edge to
+ # a different S-blossom.
+ if bestedge.get(bv) is None or kslack < slack(*bestedge[bv]):
+ bestedge[bv] = (v, w)
+ elif label.get(w) is None:
+ # w is a free vertex (or an unreached vertex inside
+ # a T-blossom) but we can not reach it yet;
+ # keep track of the least-slack edge that reaches w.
+ if bestedge.get(w) is None or kslack < slack(*bestedge[w]):
+ bestedge[w] = (v, w)
+
+ if augmented:
+ break
+
+ # There is no augmenting path under these constraints;
+ # compute delta and reduce slack in the optimization problem.
+ # (Note that our vertex dual variables, edge slacks and delta's
+ # are pre-multiplied by two.)
+ deltatype = -1
+ delta = deltaedge = deltablossom = None
+
+ # Compute delta1: the minumum value of any vertex dual.
+ if not maxcardinality:
+ deltatype = 1
+ delta = min(dualvar.values())
+
+ # Compute delta2: the minimum slack on any edge between
+ # an S-vertex and a free vertex.
+ for v in G.nodes_iter():
+ if label.get(inblossom[v]) is None and bestedge.get(v) is not None:
+ d = slack(*bestedge[v])
+ if deltatype == -1 or d < delta:
+ delta = d
+ deltatype = 2
+ deltaedge = bestedge[v]
+
+ # Compute delta3: half the minimum slack on any edge between
+ # a pair of S-blossoms.
+ for b in blossomparent:
+ if ( blossomparent[b] is None and label.get(b) == 1 and
+ bestedge.get(b) is not None ):
+ kslack = slack(*bestedge[b])
+ if allinteger:
+ assert (kslack % 2) == 0
+ d = kslack // 2
+ else:
+ d = kslack / 2.0
+ if deltatype == -1 or d < delta:
+ delta = d
+ deltatype = 3
+ deltaedge = bestedge[b]
+
+ # Compute delta4: minimum z variable of any T-blossom.
+ for b in blossomdual:
+ if ( blossomparent[b] is None and label.get(b) == 2 and
+ (deltatype == -1 or blossomdual[b] < delta) ):
+ delta = blossomdual[b]
+ deltatype = 4
+ deltablossom = b
+
+ if deltatype == -1:
+ # No further improvement possible; max-cardinality optimum
+ # reached. Do a final delta update to make the optimum
+ # verifyable.
+ assert maxcardinality
+ deltatype = 1
+ delta = max(0, min(dualvar.values()))
+
+ # Update dual variables according to delta.
+ for v in gnodes:
+ if label.get(inblossom[v]) == 1:
+ # S-vertex: 2*u = 2*u - 2*delta
+ dualvar[v] -= delta
+ elif label.get(inblossom[v]) == 2:
+ # T-vertex: 2*u = 2*u + 2*delta
+ dualvar[v] += delta
+ for b in blossomdual:
+ if blossomparent[b] is None:
+ if label.get(b) == 1:
+ # top-level S-blossom: z = z + 2*delta
+ blossomdual[b] += delta
+ elif label.get(b) == 2:
+ # top-level T-blossom: z = z - 2*delta
+ blossomdual[b] -= delta
+
+ # Take action at the point where minimum delta occurred.
+ if deltatype == 1:
+ # No further improvement possible; optimum reached.
+ break
+ elif deltatype == 2:
+ # Use the least-slack edge to continue the search.
+ (v, w) = deltaedge
+ assert label[inblossom[v]] == 1
+ allowedge[(v, w)] = allowedge[(w, v)] = True
+ queue.append(v)
+ elif deltatype == 3:
+ # Use the least-slack edge to continue the search.
+ (v, w) = deltaedge
+ allowedge[(v, w)] = allowedge[(w, v)] = True
+ assert label[inblossom[v]] == 1
+ queue.append(v)
+ elif deltatype == 4:
+ # Expand the least-z blossom.
+ expandBlossom(deltablossom, False)
+
+ # End of a this substage.
+
+ # Paranoia check that the matching is symmetric.
+ for v in mate:
+ assert mate[mate[v]] == v
+
+ # Stop when no more augmenting path can be found.
+ if not augmented:
+ break
+
+ # End of a stage; expand all S-blossoms which have zero dual.
+ for b in list(blossomdual.keys()):
+ if b not in blossomdual:
+ continue # already expanded
+ if ( blossomparent[b] is None and label.get(b) == 1 and
+ blossomdual[b] == 0 ):
+ expandBlossom(b, True)
+
+ # Verify that we reached the optimum solution (only for integer weights).
+ if allinteger:
+ verifyOptimum()
+
+ return mate
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/mis.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/mis.py
new file mode 100644
index 0000000..d02c2d5
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/mis.py
@@ -0,0 +1,81 @@
+# -*- coding: utf-8 -*-
+# $Id: maximalIndependentSet.py 576 2011-03-01 05:50:34Z lleeoo $
+"""
+Algorithm to find a maximal (not maximum) independent set.
+
+"""
+# Leo Lopes <leo.lopes@monash.edu>
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+__author__ = "\n".join(["Leo Lopes <leo.lopes@monash.edu>",
+ "Loïc Séguin-C. <loicseguin@gmail.com>"])
+
+__all__ = ['maximal_independent_set']
+
+import random
+import networkx as nx
+
+def maximal_independent_set(G, nodes=None):
+ """Return a random maximal independent set guaranteed to contain
+ a given set of nodes.
+
+ An independent set is a set of nodes such that the subgraph
+ of G induced by these nodes contains no edges. A maximal
+ independent set is an independent set such that it is not possible
+ to add a new node and still get an independent set.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ nodes : list or iterable
+ Nodes that must be part of the independent set. This set of nodes
+ must be independent.
+
+ Returns
+ -------
+ indep_nodes : list
+ List of nodes that are part of a maximal independent set.
+
+ Raises
+ ------
+ NetworkXUnfeasible
+ If the nodes in the provided list are not part of the graph or
+ do not form an independent set, an exception is raised.
+
+ Examples
+ --------
+ >>> G = nx.path_graph(5)
+ >>> nx.maximal_independent_set(G) # doctest: +SKIP
+ [4, 0, 2]
+ >>> nx.maximal_independent_set(G, [1]) # doctest: +SKIP
+ [1, 3]
+
+ Notes
+ ------
+ This algorithm does not solve the maximum independent set problem.
+
+ """
+ if not nodes:
+ nodes = set([random.choice(G.nodes())])
+ else:
+ nodes = set(nodes)
+ if not nodes.issubset(G):
+ raise nx.NetworkXUnfeasible(
+ "%s is not a subset of the nodes of G" % nodes)
+ neighbors = set.union(*[set(G.neighbors(v)) for v in nodes])
+ if set.intersection(neighbors, nodes):
+ raise nx.NetworkXUnfeasible(
+ "%s is not an independent set of G" % nodes)
+ indep_nodes = list(nodes)
+ available_nodes = set(G.nodes()).difference(neighbors.union(nodes))
+ while available_nodes:
+ node = random.choice(list(available_nodes))
+ indep_nodes.append(node)
+ available_nodes.difference_update(G.neighbors(node) + [node])
+ return indep_nodes
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/mst.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/mst.py
new file mode 100644
index 0000000..03ca5e8
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/mst.py
@@ -0,0 +1,254 @@
+# -*- coding: utf-8 -*-
+"""
+Computes minimum spanning tree of a weighted graph.
+
+"""
+# Copyright (C) 2009-2010 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# Loïc Séguin-C. <loicseguin@gmail.com>
+# All rights reserved.
+# BSD license.
+
+__all__ = ['kruskal_mst',
+ 'minimum_spanning_edges',
+ 'minimum_spanning_tree',
+ 'prim_mst_edges', 'prim_mst']
+
+import networkx as nx
+from heapq import heappop, heappush
+
+def minimum_spanning_edges(G,weight='weight',data=True):
+ """Generate edges in a minimum spanning forest of an undirected
+ weighted graph.
+
+ A minimum spanning tree is a subgraph of the graph (a tree)
+ with the minimum sum of edge weights. A spanning forest is a
+ union of the spanning trees for each connected component of the graph.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+
+ weight : string
+ Edge data key to use for weight (default 'weight').
+
+ data : bool, optional
+ If True yield the edge data along with the edge.
+
+ Returns
+ -------
+ edges : iterator
+ A generator that produces edges in the minimum spanning tree.
+ The edges are three-tuples (u,v,w) where w is the weight.
+
+ Examples
+ --------
+ >>> G=nx.cycle_graph(4)
+ >>> G.add_edge(0,3,weight=2) # assign weight 2 to edge 0-3
+ >>> mst=nx.minimum_spanning_edges(G,data=False) # a generator of MST edges
+ >>> edgelist=list(mst) # make a list of the edges
+ >>> print(sorted(edgelist))
+ [(0, 1), (1, 2), (2, 3)]
+
+ Notes
+ -----
+ Uses Kruskal's algorithm.
+
+ If the graph edges do not have a weight attribute a default weight of 1
+ will be used.
+
+ Modified code from David Eppstein, April 2006
+ http://www.ics.uci.edu/~eppstein/PADS/
+ """
+ # Modified code from David Eppstein, April 2006
+ # http://www.ics.uci.edu/~eppstein/PADS/
+ # Kruskal's algorithm: sort edges by weight, and add them one at a time.
+ # We use Kruskal's algorithm, first because it is very simple to
+ # implement once UnionFind exists, and second, because the only slow
+ # part (the sort) is sped up by being built in to Python.
+ from networkx.utils import UnionFind
+ if G.is_directed():
+ raise nx.NetworkXError(
+ "Mimimum spanning tree not defined for directed graphs.")
+
+ subtrees = UnionFind()
+ edges = sorted(G.edges(data=True),key=lambda t: t[2].get(weight,1))
+ for u,v,d in edges:
+ if subtrees[u] != subtrees[v]:
+ if data:
+ yield (u,v,d)
+ else:
+ yield (u,v)
+ subtrees.union(u,v)
+
+
+def minimum_spanning_tree(G,weight='weight'):
+ """Return a minimum spanning tree or forest of an undirected
+ weighted graph.
+
+ A minimum spanning tree is a subgraph of the graph (a tree) with
+ the minimum sum of edge weights.
+
+ If the graph is not connected a spanning forest is constructed. A
+ spanning forest is a union of the spanning trees for each
+ connected component of the graph.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+
+ weight : string
+ Edge data key to use for weight (default 'weight').
+
+ Returns
+ -------
+ G : NetworkX Graph
+ A minimum spanning tree or forest.
+
+ Examples
+ --------
+ >>> G=nx.cycle_graph(4)
+ >>> G.add_edge(0,3,weight=2) # assign weight 2 to edge 0-3
+ >>> T=nx.minimum_spanning_tree(G)
+ >>> print(sorted(T.edges(data=True)))
+ [(0, 1, {}), (1, 2, {}), (2, 3, {})]
+
+ Notes
+ -----
+ Uses Kruskal's algorithm.
+
+ If the graph edges do not have a weight attribute a default weight of 1
+ will be used.
+ """
+ T=nx.Graph(nx.minimum_spanning_edges(G,weight=weight,data=True))
+ # Add isolated nodes
+ if len(T)!=len(G):
+ T.add_nodes_from([n for n,d in G.degree().items() if d==0])
+ # Add node and graph attributes as shallow copy
+ for n in T:
+ T.node[n]=G.node[n].copy()
+ T.graph=G.graph.copy()
+ return T
+
+kruskal_mst=minimum_spanning_tree
+
+def prim_mst_edges(G, weight = 'weight', data = True):
+ """Generate edges in a minimum spanning forest of an undirected
+ weighted graph.
+
+ A minimum spanning tree is a subgraph of the graph (a tree)
+ with the minimum sum of edge weights. A spanning forest is a
+ union of the spanning trees for each connected component of the graph.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+
+ weight : string
+ Edge data key to use for weight (default 'weight').
+
+ data : bool, optional
+ If True yield the edge data along with the edge.
+
+ Returns
+ -------
+ edges : iterator
+ A generator that produces edges in the minimum spanning tree.
+ The edges are three-tuples (u,v,w) where w is the weight.
+
+ Examples
+ --------
+ >>> G=nx.cycle_graph(4)
+ >>> G.add_edge(0,3,weight=2) # assign weight 2 to edge 0-3
+ >>> mst=nx.prim_mst_edges(G,data=False) # a generator of MST edges
+ >>> edgelist=list(mst) # make a list of the edges
+ >>> print(sorted(edgelist))
+ [(0, 1), (1, 2), (2, 3)]
+
+ Notes
+ -----
+ Uses Prim's algorithm.
+
+ If the graph edges do not have a weight attribute a default weight of 1
+ will be used.
+ """
+
+ if G.is_directed():
+ raise nx.NetworkXError(
+ "Mimimum spanning tree not defined for directed graphs.")
+
+ nodes = G.nodes()
+
+ while nodes:
+ u = nodes.pop(0)
+ frontier = []
+ visited = [u]
+ for u, v in G.edges(u):
+ heappush(frontier, (G[u][v].get(weight, 1), u, v))
+
+ while frontier:
+ W, u, v = heappop(frontier)
+ if v in visited:
+ continue
+ visited.append(v)
+ nodes.remove(v)
+ for v, w in G.edges(v):
+ if not w in visited:
+ heappush(frontier, (G[v][w].get(weight, 1), v, w))
+ if data:
+ yield u, v, G[u][v]
+ else:
+ yield u, v
+
+
+def prim_mst(G, weight = 'weight'):
+ """Return a minimum spanning tree or forest of an undirected
+ weighted graph.
+
+ A minimum spanning tree is a subgraph of the graph (a tree) with
+ the minimum sum of edge weights.
+
+ If the graph is not connected a spanning forest is constructed. A
+ spanning forest is a union of the spanning trees for each
+ connected component of the graph.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+
+ weight : string
+ Edge data key to use for weight (default 'weight').
+
+ Returns
+ -------
+ G : NetworkX Graph
+ A minimum spanning tree or forest.
+
+ Examples
+ --------
+ >>> G=nx.cycle_graph(4)
+ >>> G.add_edge(0,3,weight=2) # assign weight 2 to edge 0-3
+ >>> T=nx.prim_mst(G)
+ >>> print(sorted(T.edges(data=True)))
+ [(0, 1, {}), (1, 2, {}), (2, 3, {})]
+
+ Notes
+ -----
+ Uses Prim's algorithm.
+
+ If the graph edges do not have a weight attribute a default weight of 1
+ will be used.
+ """
+
+ T=nx.Graph(nx.prim_mst_edges(G,weight=weight,data=True))
+ # Add isolated nodes
+ if len(T)!=len(G):
+ T.add_nodes_from([n for n,d in G.degree().items() if d==0])
+ # Add node and graph attributes as shallow copy
+ for n in T:
+ T.node[n]=G.node[n].copy()
+ T.graph=G.graph.copy()
+ return T
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/__init__.py
new file mode 100644
index 0000000..0ebc6ab
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/__init__.py
@@ -0,0 +1,4 @@
+from networkx.algorithms.operators.all import *
+from networkx.algorithms.operators.binary import *
+from networkx.algorithms.operators.product import *
+from networkx.algorithms.operators.unary import *
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/all.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/all.py
new file mode 100644
index 0000000..755256b
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/all.py
@@ -0,0 +1,151 @@
+"""Operations on many graphs.
+"""
+# Copyright (C) 2012 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+try:
+ from itertools import izip_longest as zip_longest
+except ImportError: # Python3 has zip_longest
+ from itertools import zip_longest
+import networkx as nx
+from networkx.utils import is_string_like
+
+__author__ = """\n""".join([ 'Robert King <kingrobertking@gmail.com>',
+ 'Aric Hagberg <aric.hagberg@gmail.com>'])
+
+__all__ = ['union_all', 'compose_all', 'disjoint_union_all',
+ 'intersection_all']
+
+def union_all(graphs, rename=(None,) , name=None):
+ """Return the union of all graphs.
+
+ The graphs must be disjoint, otherwise an exception is raised.
+
+ Parameters
+ ----------
+ graphs : list of graphs
+ List of NetworkX graphs
+
+ rename : bool , default=(None, None)
+ Node names of G and H can be changed by specifying the tuple
+ rename=('G-','H-') (for example). Node "u" in G is then renamed
+ "G-u" and "v" in H is renamed "H-v".
+
+ name : string
+ Specify the name for the union graph@not_implemnted_for('direct
+
+ Returns
+ -------
+ U : a graph with the same type as the first graph in list
+
+ Notes
+ -----
+ To force a disjoint union with node relabeling, use
+ disjoint_union_all(G,H) or convert_node_labels_to integers().
+
+ Graph, edge, and node attributes are propagated to the union graph.
+ If a graph attribute is present in multiple graphs, then the value
+ from the last graph in the list with that attribute is used.
+
+ See Also
+ --------
+ union
+ disjoint_union_all
+ """
+ graphs_names = zip_longest(graphs,rename)
+ U, gname = next(graphs_names)
+ for H,hname in graphs_names:
+ U = nx.union(U, H, (gname,hname),name=name)
+ gname = None
+ return U
+
+def disjoint_union_all(graphs):
+ """Return the disjoint union of all graphs.
+
+ This operation forces distinct integer node labels starting with 0
+ for the first graph in the list and numbering consecutively.
+
+ Parameters
+ ----------
+ graphs : list
+ List of NetworkX graphs
+
+ Returns
+ -------
+ U : A graph with the same type as the first graph in list
+
+ Notes
+ -----
+ It is recommended that the graphs be either all directed or all undirected.
+
+ Graph, edge, and node attributes are propagated to the union graph.
+ If a graph attribute is present in multiple graphs, then the value
+ from the last graph in the list with that attribute is used.
+ """
+ graphs = iter(graphs)
+ U = next(graphs)
+ for H in graphs:
+ U = nx.disjoint_union(U, H)
+ return U
+
+def compose_all(graphs, name=None):
+ """Return the composition of all graphs.
+
+ Composition is the simple union of the node sets and edge sets.
+ The node sets of the supplied graphs need not be disjoint.
+
+ Parameters
+ ----------
+ graphs : list
+ List of NetworkX graphs
+
+ name : string
+ Specify name for new graph
+
+ Returns
+ -------
+ C : A graph with the same type as the first graph in list
+
+ Notes
+ -----
+ It is recommended that the supplied graphs be either all directed or all
+ undirected.
+
+ Graph, edge, and node attributes are propagated to the union graph.
+ If a graph attribute is present in multiple graphs, then the value
+ from the last graph in the list with that attribute is used.
+ """
+ graphs = iter(graphs)
+ C = next(graphs)
+ for H in graphs:
+ C = nx.compose(C, H, name=name)
+ return C
+
+def intersection_all(graphs):
+ """Return a new graph that contains only the edges that exist in
+ all graphs.
+
+ All supplied graphs must have the same node set.
+
+ Parameters
+ ----------
+ graphs_list : list
+ List of NetworkX graphs
+
+ Returns
+ -------
+ R : A new graph with the same type as the first graph in list
+
+ Notes
+ -----
+ Attributes from the graph, nodes, and edges are not copied to the new
+ graph.
+ """
+ graphs = iter(graphs)
+ R = next(graphs)
+ for H in graphs:
+ R = nx.intersection(R, H)
+ return R
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/binary.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/binary.py
new file mode 100644
index 0000000..a710008
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/binary.py
@@ -0,0 +1,329 @@
+"""
+Operations on graphs including union, intersection, difference.
+"""
+# Copyright (C) 2004-2012 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+from networkx.utils import is_string_like
+__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Dan Schult(dschult@colgate.edu)'])
+__all__ = ['union', 'compose', 'disjoint_union', 'intersection',
+ 'difference', 'symmetric_difference']
+
+def union(G, H, rename=(None, None), name=None):
+ """ Return the union of graphs G and H.
+
+ Graphs G and H must be disjoint, otherwise an exception is raised.
+
+ Parameters
+ ----------
+ G,H : graph
+ A NetworkX graph
+
+ create_using : NetworkX graph
+ Use specified graph for result. Otherwise
+
+ rename : bool , default=(None, None)
+ Node names of G and H can be changed by specifying the tuple
+ rename=('G-','H-') (for example). Node "u" in G is then renamed
+ "G-u" and "v" in H is renamed "H-v".
+
+ name : string
+ Specify the name for the union graph
+
+ Returns
+ -------
+ U : A union graph with the same type as G.
+
+ Notes
+ -----
+ To force a disjoint union with node relabeling, use
+ disjoint_union(G,H) or convert_node_labels_to integers().
+
+ Graph, edge, and node attributes are propagated from G and H
+ to the union graph. If a graph attribute is present in both
+ G and H the value from H is used.
+
+ See Also
+ --------
+ disjoint_union
+ """
+ # Union is the same type as G
+ R = G.__class__()
+ if name is None:
+ name = "union( %s, %s )"%(G.name,H.name)
+ R.name = name
+
+ # rename graph to obtain disjoint node labels
+ def add_prefix(graph, prefix):
+ if prefix is None:
+ return graph
+ def label(x):
+ if is_string_like(x):
+ name=prefix+x
+ else:
+ name=prefix+repr(x)
+ return name
+ return nx.relabel_nodes(graph, label)
+ G = add_prefix(G,rename[0])
+ H = add_prefix(H,rename[1])
+ if set(G) & set(H):
+ raise nx.NetworkXError('The node sets of G and H are not disjoint.',
+ 'Use appropriate rename=(Gprefix,Hprefix)'
+ 'or use disjoint_union(G,H).')
+ if G.is_multigraph():
+ G_edges = G.edges_iter(keys=True, data=True)
+ else:
+ G_edges = G.edges_iter(data=True)
+ if H.is_multigraph():
+ H_edges = H.edges_iter(keys=True, data=True)
+ else:
+ H_edges = H.edges_iter(data=True)
+
+ # add nodes
+ R.add_nodes_from(G)
+ R.add_edges_from(G_edges)
+ # add edges
+ R.add_nodes_from(H)
+ R.add_edges_from(H_edges)
+ # add node attributes
+ R.node.update(G.node)
+ R.node.update(H.node)
+ # add graph attributes, H attributes take precedent over G attributes
+ R.graph.update(G.graph)
+ R.graph.update(H.graph)
+
+
+ return R
+
+def disjoint_union(G,H):
+ """ Return the disjoint union of graphs G and H.
+
+ This algorithm forces distinct integer node labels.
+
+ Parameters
+ ----------
+ G,H : graph
+ A NetworkX graph
+
+ Returns
+ -------
+ U : A union graph with the same type as G.
+
+ Notes
+ -----
+ A new graph is created, of the same class as G. It is recommended
+ that G and H be either both directed or both undirected.
+
+ The nodes of G are relabeled 0 to len(G)-1, and the nodes of H are
+ relabeled len(G) to len(G)+len(H)-1.
+
+ Graph, edge, and node attributes are propagated from G and H
+ to the union graph. If a graph attribute is present in both
+ G and H the value from H is used.
+ """
+ R1=nx.convert_node_labels_to_integers(G)
+ R2=nx.convert_node_labels_to_integers(H,first_label=len(R1))
+ R=union(R1,R2)
+ R.name="disjoint_union( %s, %s )"%(G.name,H.name)
+ R.graph.update(G.graph)
+ R.graph.update(H.graph)
+ return R
+
+
+def intersection(G, H):
+ """Return a new graph that contains only the edges that exist in
+ both G and H.
+
+ The node sets of H and G must be the same.
+
+ Parameters
+ ----------
+ G,H : graph
+ A NetworkX graph. G and H must have the same node sets.
+
+ Returns
+ -------
+ GH : A new graph with the same type as G.
+
+ Notes
+ -----
+ Attributes from the graph, nodes, and edges are not copied to the new
+ graph. If you want a new graph of the intersection of G and H
+ with the attributes (including edge data) from G use remove_nodes_from()
+ as follows
+
+ >>> G=nx.path_graph(3)
+ >>> H=nx.path_graph(5)
+ >>> R=G.copy()
+ >>> R.remove_nodes_from(n for n in G if n not in H)
+ """
+ # create new graph
+ R=nx.create_empty_copy(G)
+
+ R.name="Intersection of (%s and %s)"%(G.name, H.name)
+
+ if set(G)!=set(H):
+ raise nx.NetworkXError("Node sets of graphs are not equal")
+
+ if G.number_of_edges()<=H.number_of_edges():
+ if G.is_multigraph():
+ edges=G.edges_iter(keys=True)
+ else:
+ edges=G.edges_iter()
+ for e in edges:
+ if H.has_edge(*e):
+ R.add_edge(*e)
+ else:
+ if H.is_multigraph():
+ edges=H.edges_iter(keys=True)
+ else:
+ edges=H.edges_iter()
+ for e in edges:
+ if G.has_edge(*e):
+ R.add_edge(*e)
+
+ return R
+
+def difference(G, H):
+ """Return a new graph that contains the edges that exist in G but not in H.
+
+ The node sets of H and G must be the same.
+
+ Parameters
+ ----------
+ G,H : graph
+ A NetworkX graph. G and H must have the same node sets.
+
+ Returns
+ -------
+ D : A new graph with the same type as G.
+
+ Notes
+ -----
+ Attributes from the graph, nodes, and edges are not copied to the new
+ graph. If you want a new graph of the difference of G and H with
+ with the attributes (including edge data) from G use remove_nodes_from()
+ as follows:
+
+ >>> G=nx.path_graph(3)
+ >>> H=nx.path_graph(5)
+ >>> R=G.copy()
+ >>> R.remove_nodes_from(n for n in G if n in H)
+ """
+ # create new graph
+ R=nx.create_empty_copy(G)
+ R.name="Difference of (%s and %s)"%(G.name, H.name)
+
+ if set(G)!=set(H):
+ raise nx.NetworkXError("Node sets of graphs not equal")
+
+ if G.is_multigraph():
+ edges=G.edges_iter(keys=True)
+ else:
+ edges=G.edges_iter()
+ for e in edges:
+ if not H.has_edge(*e):
+ R.add_edge(*e)
+ return R
+
+def symmetric_difference(G, H):
+ """Return new graph with edges that exist in either G or H but not both.
+
+ The node sets of H and G must be the same.
+
+ Parameters
+ ----------
+ G,H : graph
+ A NetworkX graph. G and H must have the same node sets.
+
+ Returns
+ -------
+ D : A new graph with the same type as G.
+
+ Notes
+ -----
+ Attributes from the graph, nodes, and edges are not copied to the new
+ graph.
+ """
+ # create new graph
+ R=nx.create_empty_copy(G)
+ R.name="Symmetric difference of (%s and %s)"%(G.name, H.name)
+
+ if set(G)!=set(H):
+ raise nx.NetworkXError("Node sets of graphs not equal")
+
+ gnodes=set(G) # set of nodes in G
+ hnodes=set(H) # set of nodes in H
+ nodes=gnodes.symmetric_difference(hnodes)
+ R.add_nodes_from(nodes)
+
+ if G.is_multigraph():
+ edges=G.edges_iter(keys=True)
+ else:
+ edges=G.edges_iter()
+ # we could copy the data here but then this function doesn't
+ # match intersection and difference
+ for e in edges:
+ if not H.has_edge(*e):
+ R.add_edge(*e)
+
+ if H.is_multigraph():
+ edges=H.edges_iter(keys=True)
+ else:
+ edges=H.edges_iter()
+ for e in edges:
+ if not G.has_edge(*e):
+ R.add_edge(*e)
+ return R
+
+def compose(G, H, name=None):
+ """Return a new graph of G composed with H.
+
+ Composition is the simple union of the node sets and edge sets.
+ The node sets of G and H need not be disjoint.
+
+ Parameters
+ ----------
+ G,H : graph
+ A NetworkX graph
+
+ name : string
+ Specify name for new graph
+
+ Returns
+ -------
+ C: A new graph with the same type as G
+
+ Notes
+ -----
+ It is recommended that G and H be either both directed or both undirected.
+ Attributes from H take precedent over attributes from G.
+ """
+ if name is None:
+ name="compose( %s, %s )"%(G.name,H.name)
+ R=G.__class__()
+ R.name=name
+ R.add_nodes_from(H.nodes())
+ R.add_nodes_from(G.nodes())
+ if H.is_multigraph():
+ R.add_edges_from(H.edges_iter(keys=True,data=True))
+ else:
+ R.add_edges_from(H.edges_iter(data=True))
+ if G.is_multigraph():
+ R.add_edges_from(G.edges_iter(keys=True,data=True))
+ else:
+ R.add_edges_from(G.edges_iter(data=True))
+
+ # add node attributes, H attributes take precedent over G attributes
+ R.node.update(G.node)
+ R.node.update(H.node)
+ # add graph attributes, H attributes take precedent over G attributes
+ R.graph.update(G.graph)
+ R.graph.update(H.graph)
+ return R
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/product.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/product.py
new file mode 100644
index 0000000..0fa8a17
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/product.py
@@ -0,0 +1,330 @@
+"""
+Graph products.
+"""
+# Copyright (C) 2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+from itertools import product
+
+__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Dan Schult(dschult@colgate.edu)'
+ 'Ben Edwards(bedwards@cs.unm.edu)'])
+
+__all__ = ['tensor_product','cartesian_product',
+ 'lexicographic_product', 'strong_product']
+
+def _dict_product(d1,d2):
+ return dict((k,(d1.get(k),d2.get(k))) for k in set(d1)|set(d2))
+
+
+# Generators for producting graph products
+def _node_product(G,H):
+ for u,v in product(G, H):
+ yield ((u,v), _dict_product(G.node[u], H.node[v]))
+
+def _directed_edges_cross_edges(G,H):
+ if not G.is_multigraph() and not H.is_multigraph():
+ for u,v,c in G.edges_iter(data=True):
+ for x,y,d in H.edges_iter(data=True):
+ yield (u,x),(v,y),_dict_product(c,d)
+ if not G.is_multigraph() and H.is_multigraph():
+ for u,v,c in G.edges_iter(data=True):
+ for x,y,k,d in H.edges_iter(data=True,keys=True):
+ yield (u,x),(v,y),k,_dict_product(c,d)
+ if G.is_multigraph() and not H.is_multigraph():
+ for u,v,k,c in G.edges_iter(data=True,keys=True):
+ for x,y,d in H.edges_iter(data=True):
+ yield (u,x),(v,y),k,_dict_product(c,d)
+ if G.is_multigraph() and H.is_multigraph():
+ for u,v,j,c in G.edges_iter(data=True,keys=True):
+ for x,y,k,d in H.edges_iter(data=True,keys=True):
+ yield (u,x),(v,y),(j,k),_dict_product(c,d)
+
+def _undirected_edges_cross_edges(G,H):
+ if not G.is_multigraph() and not H.is_multigraph():
+ for u,v,c in G.edges_iter(data=True):
+ for x,y,d in H.edges_iter(data=True):
+ yield (v,x),(u,y),_dict_product(c,d)
+ if not G.is_multigraph() and H.is_multigraph():
+ for u,v,c in G.edges_iter(data=True):
+ for x,y,k,d in H.edges_iter(data=True,keys=True):
+ yield (v,x),(u,y),k,_dict_product(c,d)
+ if G.is_multigraph() and not H.is_multigraph():
+ for u,v,k,c in G.edges_iter(data=True,keys=True):
+ for x,y,d in H.edges_iter(data=True):
+ yield (v,x),(u,y),k,_dict_product(c,d)
+ if G.is_multigraph() and H.is_multigraph():
+ for u,v,j,c in G.edges_iter(data=True,keys=True):
+ for x,y,k,d in H.edges_iter(data=True,keys=True):
+ yield (v,x),(u,y),(j,k),_dict_product(c,d)
+
+def _edges_cross_nodes(G,H):
+ if G.is_multigraph():
+ for u,v,k,d in G.edges_iter(data=True,keys=True):
+ for x in H:
+ yield (u,x),(v,x),k,d
+ else:
+ for u,v,d in G.edges_iter(data=True):
+ for x in H:
+ if H.is_multigraph():
+ yield (u,x),(v,x),None,d
+ else:
+ yield (u,x),(v,x),d
+
+
+def _nodes_cross_edges(G,H):
+ if H.is_multigraph():
+ for x in G:
+ for u,v,k,d in H.edges_iter(data=True,keys=True):
+ yield (x,u),(x,v),k,d
+ else:
+ for x in G:
+ for u,v,d in H.edges_iter(data=True):
+ if G.is_multigraph():
+ yield (x,u),(x,v),None,d
+ else:
+ yield (x,u),(x,v),d
+
+def _edges_cross_nodes_and_nodes(G,H):
+ if G.is_multigraph():
+ for u,v,k,d in G.edges_iter(data=True,keys=True):
+ for x in H:
+ for y in H:
+ yield (u,x),(v,y),k,d
+ else:
+ for u,v,d in G.edges_iter(data=True):
+ for x in H:
+ for y in H:
+ if H.is_multigraph():
+ yield (u,x),(v,y),None,d
+ else:
+ yield (u,x),(v,y),d
+
+def _init_product_graph(G,H):
+ if not G.is_directed() == H.is_directed():
+ raise nx.NetworkXError("G and H must be both directed or",
+ "both undirected")
+ if G.is_multigraph() or H.is_multigraph():
+ GH = nx.MultiGraph()
+ else:
+ GH = nx.Graph()
+ if G.is_directed():
+ GH = GH.to_directed()
+ return GH
+
+
+def tensor_product(G,H):
+ r"""Return the tensor product of G and H.
+
+ The tensor product P of the graphs G and H has a node set that
+ is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$.
+ P has an edge ((u,v),(x,y)) if and only if (u,v) is an edge in G
+ and (x,y) is an edge in H.
+
+ Sometimes referred to as the categorical product.
+
+
+ Parameters
+ ----------
+ G, H: graphs
+ Networkx graphs.
+
+ Returns
+ -------
+ P: NetworkX graph
+ The tensor product of G and H. P will be a multi-graph if either G
+ or H is a multi-graph. Will be a directed if G and H are directed,
+ and undirected if G and H are undirected.
+
+ Raises
+ ------
+ NetworkXError
+ If G and H are not both directed or both undirected.
+
+ Notes
+ -----
+ Node attributes in P are two-tuple of the G and H node attributes.
+ Missing attributes are assigned None.
+
+ For example
+ >>> G = nx.Graph()
+ >>> H = nx.Graph()
+ >>> G.add_node(0,a1=True)
+ >>> H.add_node('a',a2='Spam')
+ >>> P = nx.tensor_product(G,H)
+ >>> P.nodes()
+ [(0, 'a')]
+
+ Edge attributes and edge keys (for multigraphs) are also copied to the
+ new product graph
+ """
+ GH = _init_product_graph(G,H)
+ GH.add_nodes_from(_node_product(G,H))
+ GH.add_edges_from(_directed_edges_cross_edges(G,H))
+ if not GH.is_directed():
+ GH.add_edges_from(_undirected_edges_cross_edges(G,H))
+ GH.name = "Tensor product("+G.name+","+H.name+")"
+ return GH
+
+def cartesian_product(G,H):
+ """Return the Cartesian product of G and H.
+
+ The tensor product P of the graphs G and H has a node set that
+ is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$.
+ P has an edge ((u,v),(x,y)) if and only if (u,v) is an edge in G
+ and x==y or and (x,y) is an edge in H and u==v.
+ and (x,y) is an edge in H.
+
+ Parameters
+ ----------
+ G, H: graphs
+ Networkx graphs.
+
+ Returns
+ -------
+ P: NetworkX graph
+ The Cartesian product of G and H. P will be a multi-graph if either G
+ or H is a multi-graph. Will be a directed if G and H are directed,
+ and undirected if G and H are undirected.
+
+ Raises
+ ------
+ NetworkXError
+ If G and H are not both directed or both undirected.
+
+ Notes
+ -----
+ Node attributes in P are two-tuple of the G and H node attributes.
+ Missing attributes are assigned None.
+
+ For example
+ >>> G = nx.Graph()
+ >>> H = nx.Graph()
+ >>> G.add_node(0,a1=True)
+ >>> H.add_node('a',a2='Spam')
+ >>> P = nx.cartesian_product(G,H)
+ >>> P.nodes()
+ [(0, 'a')]
+
+ Edge attributes and edge keys (for multigraphs) are also copied to the
+ new product graph
+ """
+ if not G.is_directed() == H.is_directed():
+ raise nx.NetworkXError("G and H must be both directed or",
+ "both undirected")
+ GH = _init_product_graph(G,H)
+ GH.add_nodes_from(_node_product(G,H))
+ GH.add_edges_from(_edges_cross_nodes(G,H))
+ GH.add_edges_from(_nodes_cross_edges(G,H))
+ GH.name = "Cartesian product("+G.name+","+H.name+")"
+ return GH
+
+def lexicographic_product(G,H):
+ """Return the lexicographic product of G and H.
+
+ The lexicographical product P of the graphs G and H has a node set that
+ is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$.
+ P has an edge ((u,v),(x,y)) if and only if (u,v) is an edge in G
+ or u==v and (x,y) is an edge in H.
+
+ Parameters
+ ----------
+ G, H: graphs
+ Networkx graphs.
+
+ Returns
+ -------
+ P: NetworkX graph
+ The Cartesian product of G and H. P will be a multi-graph if either G
+ or H is a multi-graph. Will be a directed if G and H are directed,
+ and undirected if G and H are undirected.
+
+ Raises
+ ------
+ NetworkXError
+ If G and H are not both directed or both undirected.
+
+ Notes
+ -----
+ Node attributes in P are two-tuple of the G and H node attributes.
+ Missing attributes are assigned None.
+
+ For example
+ >>> G = nx.Graph()
+ >>> H = nx.Graph()
+ >>> G.add_node(0,a1=True)
+ >>> H.add_node('a',a2='Spam')
+ >>> P = nx.lexicographic_product(G,H)
+ >>> P.nodes()
+ [(0, 'a')]
+
+ Edge attributes and edge keys (for multigraphs) are also copied to the
+ new product graph
+ """
+ GH = _init_product_graph(G,H)
+ GH.add_nodes_from(_node_product(G,H))
+ # Edges in G regardless of H designation
+ GH.add_edges_from(_edges_cross_nodes_and_nodes(G,H))
+ # For each x in G, only if there is an edge in H
+ GH.add_edges_from(_nodes_cross_edges(G,H))
+ GH.name = "Lexicographic product("+G.name+","+H.name+")"
+ return GH
+
+def strong_product(G,H):
+ """Return the strong product of G and H.
+
+ The strong product P of the graphs G and H has a node set that
+ is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$.
+ P has an edge ((u,v),(x,y)) if and only if
+ u==v and (x,y) is an edge in H, or
+ x==y and (u,v) is an edge in G, or
+ (u,v) is an edge in G and (x,y) is an edge in H.
+
+ Parameters
+ ----------
+ G, H: graphs
+ Networkx graphs.
+
+ Returns
+ -------
+ P: NetworkX graph
+ The Cartesian product of G and H. P will be a multi-graph if either G
+ or H is a multi-graph. Will be a directed if G and H are directed,
+ and undirected if G and H are undirected.
+
+ Raises
+ ------
+ NetworkXError
+ If G and H are not both directed or both undirected.
+
+ Notes
+ -----
+ Node attributes in P are two-tuple of the G and H node attributes.
+ Missing attributes are assigned None.
+
+ For example
+ >>> G = nx.Graph()
+ >>> H = nx.Graph()
+ >>> G.add_node(0,a1=True)
+ >>> H.add_node('a',a2='Spam')
+ >>> P = nx.strong_product(G,H)
+ >>> P.nodes()
+ [(0, 'a')]
+
+ Edge attributes and edge keys (for multigraphs) are also copied to the
+ new product graph
+ """
+ GH = _init_product_graph(G,H)
+ GH.add_nodes_from(_node_product(G,H))
+ GH.add_edges_from(_nodes_cross_edges(G,H))
+ GH.add_edges_from(_edges_cross_nodes(G,H))
+ GH.add_edges_from(_directed_edges_cross_edges(G,H))
+ if not GH.is_directed():
+ GH.add_edges_from(_undirected_edges_cross_edges(G,H))
+ GH.name = "Strong product("+G.name+","+H.name+")"
+ return GH
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/tests/test_all.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/tests/test_all.py
new file mode 100644
index 0000000..fc4fa4a
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/tests/test_all.py
@@ -0,0 +1,167 @@
+from nose.tools import *
+import networkx as nx
+from networkx.testing import *
+
+def test_union_all_attributes():
+ g = nx.Graph()
+ g.add_node(0, x=4)
+ g.add_node(1, x=5)
+ g.add_edge(0, 1, size=5)
+ g.graph['name'] = 'g'
+
+ h = g.copy()
+ h.graph['name'] = 'h'
+ h.graph['attr'] = 'attr'
+ h.node[0]['x'] = 7
+
+ j = g.copy()
+ j.graph['name'] = 'j'
+ j.graph['attr'] = 'attr'
+ j.node[0]['x'] = 7
+
+ ghj = nx.union_all([g, h, j], rename=('g', 'h', 'j'))
+ assert_equal( set(ghj.nodes()) , set(['h0', 'h1', 'g0', 'g1', 'j0', 'j1']) )
+ for n in ghj:
+ graph, node = n
+ assert_equal( ghj.node[n], eval(graph).node[int(node)] )
+
+ assert_equal(ghj.graph['attr'],'attr')
+ assert_equal(ghj.graph['name'],'j') # j graph attributes take precendent
+
+
+
+def test_intersection_all():
+ G=nx.Graph()
+ H=nx.Graph()
+ R=nx.Graph()
+ G.add_nodes_from([1,2,3,4])
+ G.add_edge(1,2)
+ G.add_edge(2,3)
+ H.add_nodes_from([1,2,3,4])
+ H.add_edge(2,3)
+ H.add_edge(3,4)
+ R.add_nodes_from([1,2,3,4])
+ R.add_edge(2,3)
+ R.add_edge(4,1)
+ I=nx.intersection_all([G,H,R])
+ assert_equal( set(I.nodes()) , set([1,2,3,4]) )
+ assert_equal( sorted(I.edges()) , [(2,3)] )
+
+
+def test_intersection_all_attributes():
+ g = nx.Graph()
+ g.add_node(0, x=4)
+ g.add_node(1, x=5)
+ g.add_edge(0, 1, size=5)
+ g.graph['name'] = 'g'
+
+ h = g.copy()
+ h.graph['name'] = 'h'
+ h.graph['attr'] = 'attr'
+ h.node[0]['x'] = 7
+
+ gh = nx.intersection_all([g, h])
+ assert_equal( set(gh.nodes()) , set(g.nodes()) )
+ assert_equal( set(gh.nodes()) , set(h.nodes()) )
+ assert_equal( sorted(gh.edges()) , sorted(g.edges()) )
+
+ h.remove_node(0)
+ assert_raises(nx.NetworkXError, nx.intersection, g, h)
+
+def test_intersection_all_multigraph_attributes():
+ g = nx.MultiGraph()
+ g.add_edge(0, 1, key=0)
+ g.add_edge(0, 1, key=1)
+ g.add_edge(0, 1, key=2)
+ h = nx.MultiGraph()
+ h.add_edge(0, 1, key=0)
+ h.add_edge(0, 1, key=3)
+ gh = nx.intersection_all([g, h])
+ assert_equal( set(gh.nodes()) , set(g.nodes()) )
+ assert_equal( set(gh.nodes()) , set(h.nodes()) )
+ assert_equal( sorted(gh.edges()) , [(0,1)] )
+ assert_equal( sorted(gh.edges(keys=True)) , [(0,1,0)] )
+
+def test_union_all_and_compose_all():
+ K3=nx.complete_graph(3)
+ P3=nx.path_graph(3)
+
+ G1=nx.DiGraph()
+ G1.add_edge('A','B')
+ G1.add_edge('A','C')
+ G1.add_edge('A','D')
+ G2=nx.DiGraph()
+ G2.add_edge('1','2')
+ G2.add_edge('1','3')
+ G2.add_edge('1','4')
+
+ G=nx.union_all([G1,G2])
+ H=nx.compose_all([G1,G2])
+ assert_edges_equal(G.edges(),H.edges())
+ assert_false(G.has_edge('A','1'))
+ assert_raises(nx.NetworkXError, nx.union, K3, P3)
+ H1=nx.union_all([H,G1],rename=('H','G1'))
+ assert_equal(sorted(H1.nodes()),
+ ['G1A', 'G1B', 'G1C', 'G1D',
+ 'H1', 'H2', 'H3', 'H4', 'HA', 'HB', 'HC', 'HD'])
+
+ H2=nx.union_all([H,G2],rename=("H",""))
+ assert_equal(sorted(H2.nodes()),
+ ['1', '2', '3', '4',
+ 'H1', 'H2', 'H3', 'H4', 'HA', 'HB', 'HC', 'HD'])
+
+ assert_false(H1.has_edge('NB','NA'))
+
+ G=nx.compose_all([G,G])
+ assert_edges_equal(G.edges(),H.edges())
+
+ G2=nx.union_all([G2,G2],rename=('','copy'))
+ assert_equal(sorted(G2.nodes()),
+ ['1', '2', '3', '4', 'copy1', 'copy2', 'copy3', 'copy4'])
+
+ assert_equal(G2.neighbors('copy4'),[])
+ assert_equal(sorted(G2.neighbors('copy1')),['copy2', 'copy3', 'copy4'])
+ assert_equal(len(G),8)
+ assert_equal(nx.number_of_edges(G),6)
+
+ E=nx.disjoint_union_all([G,G])
+ assert_equal(len(E),16)
+ assert_equal(nx.number_of_edges(E),12)
+
+ E=nx.disjoint_union_all([G1,G2])
+ assert_equal(sorted(E.nodes()),[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
+
+ G1=nx.DiGraph()
+ G1.add_edge('A','B')
+ G2=nx.DiGraph()
+ G2.add_edge(1,2)
+ G3=nx.DiGraph()
+ G3.add_edge(11,22)
+ G4=nx.union_all([G1,G2,G3],rename=("G1","G2","G3"))
+ assert_equal(sorted(G4.nodes()),
+ ['G1A', 'G1B', 'G21', 'G22',
+ 'G311', 'G322'])
+
+
+def test_union_all_multigraph():
+ G=nx.MultiGraph()
+ G.add_edge(1,2,key=0)
+ G.add_edge(1,2,key=1)
+ H=nx.MultiGraph()
+ H.add_edge(3,4,key=0)
+ H.add_edge(3,4,key=1)
+ GH=nx.union_all([G,H])
+ assert_equal( set(GH) , set(G)|set(H))
+ assert_equal( set(GH.edges(keys=True)) ,
+ set(G.edges(keys=True))|set(H.edges(keys=True)))
+
+
+def test_input_output():
+ l = [nx.Graph([(1,2)]),nx.Graph([(3,4)])]
+ U = nx.disjoint_union_all(l)
+ assert_equal(len(l),2)
+ C = nx.compose_all(l)
+ assert_equal(len(l),2)
+ l = [nx.Graph([(1,2)]),nx.Graph([(1,2)])]
+ R = nx.intersection_all(l)
+ assert_equal(len(l),2)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/tests/test_binary.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/tests/test_binary.py
new file mode 100644
index 0000000..34b7e6e
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/tests/test_binary.py
@@ -0,0 +1,270 @@
+from nose.tools import *
+import networkx as nx
+from networkx import *
+from networkx.testing import *
+
+def test_union_attributes():
+ g = nx.Graph()
+ g.add_node(0, x=4)
+ g.add_node(1, x=5)
+ g.add_edge(0, 1, size=5)
+ g.graph['name'] = 'g'
+
+ h = g.copy()
+ h.graph['name'] = 'h'
+ h.graph['attr'] = 'attr'
+ h.node[0]['x'] = 7
+
+ gh = nx.union(g, h, rename=('g', 'h'))
+ assert_equal( set(gh.nodes()) , set(['h0', 'h1', 'g0', 'g1']) )
+ for n in gh:
+ graph, node = n
+ assert_equal( gh.node[n], eval(graph).node[int(node)] )
+
+ assert_equal(gh.graph['attr'],'attr')
+ assert_equal(gh.graph['name'],'h') # h graph attributes take precendent
+
+def test_intersection():
+ G=nx.Graph()
+ H=nx.Graph()
+ G.add_nodes_from([1,2,3,4])
+ G.add_edge(1,2)
+ G.add_edge(2,3)
+ H.add_nodes_from([1,2,3,4])
+ H.add_edge(2,3)
+ H.add_edge(3,4)
+ I=nx.intersection(G,H)
+ assert_equal( set(I.nodes()) , set([1,2,3,4]) )
+ assert_equal( sorted(I.edges()) , [(2,3)] )
+
+
+def test_intersection_attributes():
+ g = nx.Graph()
+ g.add_node(0, x=4)
+ g.add_node(1, x=5)
+ g.add_edge(0, 1, size=5)
+ g.graph['name'] = 'g'
+
+ h = g.copy()
+ h.graph['name'] = 'h'
+ h.graph['attr'] = 'attr'
+ h.node[0]['x'] = 7
+
+ gh = nx.intersection(g, h)
+ assert_equal( set(gh.nodes()) , set(g.nodes()) )
+ assert_equal( set(gh.nodes()) , set(h.nodes()) )
+ assert_equal( sorted(gh.edges()) , sorted(g.edges()) )
+
+ h.remove_node(0)
+ assert_raises(nx.NetworkXError, nx.intersection, g, h)
+
+
+
+def test_intersection_multigraph_attributes():
+ g = nx.MultiGraph()
+ g.add_edge(0, 1, key=0)
+ g.add_edge(0, 1, key=1)
+ g.add_edge(0, 1, key=2)
+ h = nx.MultiGraph()
+ h.add_edge(0, 1, key=0)
+ h.add_edge(0, 1, key=3)
+ gh = nx.intersection(g, h)
+ assert_equal( set(gh.nodes()) , set(g.nodes()) )
+ assert_equal( set(gh.nodes()) , set(h.nodes()) )
+ assert_equal( sorted(gh.edges()) , [(0,1)] )
+ assert_equal( sorted(gh.edges(keys=True)) , [(0,1,0)] )
+
+
+def test_difference():
+ G=nx.Graph()
+ H=nx.Graph()
+ G.add_nodes_from([1,2,3,4])
+ G.add_edge(1,2)
+ G.add_edge(2,3)
+ H.add_nodes_from([1,2,3,4])
+ H.add_edge(2,3)
+ H.add_edge(3,4)
+ D=nx.difference(G,H)
+ assert_equal( set(D.nodes()) , set([1,2,3,4]) )
+ assert_equal( sorted(D.edges()) , [(1,2)] )
+ D=nx.difference(H,G)
+ assert_equal( set(D.nodes()) , set([1,2,3,4]) )
+ assert_equal( sorted(D.edges()) , [(3,4)] )
+ D=nx.symmetric_difference(G,H)
+ assert_equal( set(D.nodes()) , set([1,2,3,4]) )
+ assert_equal( sorted(D.edges()) , [(1,2),(3,4)] )
+
+
+def test_difference2():
+ G=nx.Graph()
+ H=nx.Graph()
+ G.add_nodes_from([1,2,3,4])
+ H.add_nodes_from([1,2,3,4])
+ G.add_edge(1,2)
+ H.add_edge(1,2)
+ G.add_edge(2,3)
+ D=nx.difference(G,H)
+ assert_equal( set(D.nodes()) , set([1,2,3,4]) )
+ assert_equal( sorted(D.edges()) , [(2,3)] )
+ D=nx.difference(H,G)
+ assert_equal( set(D.nodes()) , set([1,2,3,4]) )
+ assert_equal( sorted(D.edges()) , [] )
+ H.add_edge(3,4)
+ D=nx.difference(H,G)
+ assert_equal( set(D.nodes()) , set([1,2,3,4]) )
+ assert_equal( sorted(D.edges()) , [(3,4)] )
+
+
+def test_difference_attributes():
+ g = nx.Graph()
+ g.add_node(0, x=4)
+ g.add_node(1, x=5)
+ g.add_edge(0, 1, size=5)
+ g.graph['name'] = 'g'
+
+ h = g.copy()
+ h.graph['name'] = 'h'
+ h.graph['attr'] = 'attr'
+ h.node[0]['x'] = 7
+
+ gh = nx.difference(g, h)
+ assert_equal( set(gh.nodes()) , set(g.nodes()) )
+ assert_equal( set(gh.nodes()) , set(h.nodes()) )
+ assert_equal( sorted(gh.edges()) , [])
+
+ h.remove_node(0)
+ assert_raises(nx.NetworkXError, nx.intersection, g, h)
+
+def test_difference_multigraph_attributes():
+ g = nx.MultiGraph()
+ g.add_edge(0, 1, key=0)
+ g.add_edge(0, 1, key=1)
+ g.add_edge(0, 1, key=2)
+ h = nx.MultiGraph()
+ h.add_edge(0, 1, key=0)
+ h.add_edge(0, 1, key=3)
+ gh = nx.difference(g, h)
+ assert_equal( set(gh.nodes()) , set(g.nodes()) )
+ assert_equal( set(gh.nodes()) , set(h.nodes()) )
+ assert_equal( sorted(gh.edges()) , [(0,1),(0,1)] )
+ assert_equal( sorted(gh.edges(keys=True)) , [(0,1,1),(0,1,2)] )
+
+
+@raises(nx.NetworkXError)
+def test_difference_raise():
+ G = nx.path_graph(4)
+ H = nx.path_graph(3)
+ GH = nx.difference(G, H)
+
+def test_symmetric_difference_multigraph():
+ g = nx.MultiGraph()
+ g.add_edge(0, 1, key=0)
+ g.add_edge(0, 1, key=1)
+ g.add_edge(0, 1, key=2)
+ h = nx.MultiGraph()
+ h.add_edge(0, 1, key=0)
+ h.add_edge(0, 1, key=3)
+ gh = nx.symmetric_difference(g, h)
+ assert_equal( set(gh.nodes()) , set(g.nodes()) )
+ assert_equal( set(gh.nodes()) , set(h.nodes()) )
+ assert_equal( sorted(gh.edges()) , 3*[(0,1)] )
+ assert_equal( sorted(sorted(e) for e in gh.edges(keys=True)),
+ [[0,1,1],[0,1,2],[0,1,3]] )
+
+@raises(nx.NetworkXError)
+def test_symmetric_difference_raise():
+ G = nx.path_graph(4)
+ H = nx.path_graph(3)
+ GH = nx.symmetric_difference(G, H)
+
+def test_union_and_compose():
+ K3=complete_graph(3)
+ P3=path_graph(3)
+
+ G1=nx.DiGraph()
+ G1.add_edge('A','B')
+ G1.add_edge('A','C')
+ G1.add_edge('A','D')
+ G2=nx.DiGraph()
+ G2.add_edge('1','2')
+ G2.add_edge('1','3')
+ G2.add_edge('1','4')
+
+ G=union(G1,G2)
+ H=compose(G1,G2)
+ assert_edges_equal(G.edges(),H.edges())
+ assert_false(G.has_edge('A',1))
+ assert_raises(nx.NetworkXError, nx.union, K3, P3)
+ H1=union(H,G1,rename=('H','G1'))
+ assert_equal(sorted(H1.nodes()),
+ ['G1A', 'G1B', 'G1C', 'G1D',
+ 'H1', 'H2', 'H3', 'H4', 'HA', 'HB', 'HC', 'HD'])
+
+ H2=union(H,G2,rename=("H",""))
+ assert_equal(sorted(H2.nodes()),
+ ['1', '2', '3', '4',
+ 'H1', 'H2', 'H3', 'H4', 'HA', 'HB', 'HC', 'HD'])
+
+ assert_false(H1.has_edge('NB','NA'))
+
+ G=compose(G,G)
+ assert_edges_equal(G.edges(),H.edges())
+
+ G2=union(G2,G2,rename=('','copy'))
+ assert_equal(sorted(G2.nodes()),
+ ['1', '2', '3', '4', 'copy1', 'copy2', 'copy3', 'copy4'])
+
+ assert_equal(G2.neighbors('copy4'),[])
+ assert_equal(sorted(G2.neighbors('copy1')),['copy2', 'copy3', 'copy4'])
+ assert_equal(len(G),8)
+ assert_equal(number_of_edges(G),6)
+
+ E=disjoint_union(G,G)
+ assert_equal(len(E),16)
+ assert_equal(number_of_edges(E),12)
+
+ E=disjoint_union(G1,G2)
+ assert_equal(sorted(E.nodes()),[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
+
+
+def test_union_multigraph():
+ G=nx.MultiGraph()
+ G.add_edge(1,2,key=0)
+ G.add_edge(1,2,key=1)
+ H=nx.MultiGraph()
+ H.add_edge(3,4,key=0)
+ H.add_edge(3,4,key=1)
+ GH=nx.union(G,H)
+ assert_equal( set(GH) , set(G)|set(H))
+ assert_equal( set(GH.edges(keys=True)) ,
+ set(G.edges(keys=True))|set(H.edges(keys=True)))
+
+def test_disjoint_union_multigraph():
+ G=nx.MultiGraph()
+ G.add_edge(0,1,key=0)
+ G.add_edge(0,1,key=1)
+ H=nx.MultiGraph()
+ H.add_edge(2,3,key=0)
+ H.add_edge(2,3,key=1)
+ GH=nx.disjoint_union(G,H)
+ assert_equal( set(GH) , set(G)|set(H))
+ assert_equal( set(GH.edges(keys=True)) ,
+ set(G.edges(keys=True))|set(H.edges(keys=True)))
+
+
+def test_compose_multigraph():
+ G=nx.MultiGraph()
+ G.add_edge(1,2,key=0)
+ G.add_edge(1,2,key=1)
+ H=nx.MultiGraph()
+ H.add_edge(3,4,key=0)
+ H.add_edge(3,4,key=1)
+ GH=nx.compose(G,H)
+ assert_equal( set(GH) , set(G)|set(H))
+ assert_equal( set(GH.edges(keys=True)) ,
+ set(G.edges(keys=True))|set(H.edges(keys=True)))
+ H.add_edge(1,2,key=2)
+ GH=nx.compose(G,H)
+ assert_equal( set(GH) , set(G)|set(H))
+ assert_equal( set(GH.edges(keys=True)) ,
+ set(G.edges(keys=True))|set(H.edges(keys=True)))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/tests/test_product.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/tests/test_product.py
new file mode 100644
index 0000000..b157aac
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/tests/test_product.py
@@ -0,0 +1,334 @@
+import networkx as nx
+from networkx import tensor_product,cartesian_product,lexicographic_product,strong_product
+from nose.tools import assert_raises, assert_true, assert_equal, raises
+
+@raises(nx.NetworkXError)
+def test_tensor_product_raises():
+ P = tensor_product(nx.DiGraph(),nx.Graph())
+
+def test_tensor_product_null():
+ null=nx.null_graph()
+ empty10=nx.empty_graph(10)
+ K3=nx.complete_graph(3)
+ K10=nx.complete_graph(10)
+ P3=nx.path_graph(3)
+ P10=nx.path_graph(10)
+ # null graph
+ G=tensor_product(null,null)
+ assert_true(nx.is_isomorphic(G,null))
+ # null_graph X anything = null_graph and v.v.
+ G=tensor_product(null,empty10)
+ assert_true(nx.is_isomorphic(G,null))
+ G=tensor_product(null,K3)
+ assert_true(nx.is_isomorphic(G,null))
+ G=tensor_product(null,K10)
+ assert_true(nx.is_isomorphic(G,null))
+ G=tensor_product(null,P3)
+ assert_true(nx.is_isomorphic(G,null))
+ G=tensor_product(null,P10)
+ assert_true(nx.is_isomorphic(G,null))
+ G=tensor_product(empty10,null)
+ assert_true(nx.is_isomorphic(G,null))
+ G=tensor_product(K3,null)
+ assert_true(nx.is_isomorphic(G,null))
+ G=tensor_product(K10,null)
+ assert_true(nx.is_isomorphic(G,null))
+ G=tensor_product(P3,null)
+ assert_true(nx.is_isomorphic(G,null))
+ G=tensor_product(P10,null)
+ assert_true(nx.is_isomorphic(G,null))
+
+def test_tensor_product_size():
+ P5 = nx.path_graph(5)
+ K3 = nx.complete_graph(3)
+ K5 = nx.complete_graph(5)
+
+ G=tensor_product(P5,K3)
+ assert_equal(nx.number_of_nodes(G),5*3)
+ G=tensor_product(K3,K5)
+ assert_equal(nx.number_of_nodes(G),3*5)
+
+
+def test_tensor_product_combinations():
+ # basic smoke test, more realistic tests would be usefule
+ P5 = nx.path_graph(5)
+ K3 = nx.complete_graph(3)
+ G=tensor_product(P5,K3)
+ assert_equal(nx.number_of_nodes(G),5*3)
+ G=tensor_product(P5,nx.MultiGraph(K3))
+ assert_equal(nx.number_of_nodes(G),5*3)
+ G=tensor_product(nx.MultiGraph(P5),K3)
+ assert_equal(nx.number_of_nodes(G),5*3)
+ G=tensor_product(nx.MultiGraph(P5),nx.MultiGraph(K3))
+ assert_equal(nx.number_of_nodes(G),5*3)
+
+ G=tensor_product(nx.DiGraph(P5),nx.DiGraph(K3))
+ assert_equal(nx.number_of_nodes(G),5*3)
+
+
+def test_tensor_product_classic_result():
+ K2 = nx.complete_graph(2)
+ G = nx.petersen_graph()
+ G = tensor_product(G,K2)
+ assert_true(nx.is_isomorphic(G,nx.desargues_graph()))
+
+ G = nx.cycle_graph(5)
+ G = tensor_product(G,K2)
+ assert_true(nx.is_isomorphic(G,nx.cycle_graph(10)))
+
+ G = nx.tetrahedral_graph()
+ G = tensor_product(G,K2)
+ assert_true(nx.is_isomorphic(G,nx.cubical_graph()))
+
+def test_tensor_product_random():
+ G = nx.erdos_renyi_graph(10,2/10.)
+ H = nx.erdos_renyi_graph(10,2/10.)
+ GH = tensor_product(G,H)
+
+ for (u_G,u_H) in GH.nodes_iter():
+ for (v_G,v_H) in GH.nodes_iter():
+ if H.has_edge(u_H,v_H) and G.has_edge(u_G,v_G):
+ assert_true(GH.has_edge((u_G,u_H),(v_G,v_H)))
+ else:
+ assert_true(not GH.has_edge((u_G,u_H),(v_G,v_H)))
+
+
+def test_cartesian_product_multigraph():
+ G=nx.MultiGraph()
+ G.add_edge(1,2,key=0)
+ G.add_edge(1,2,key=1)
+ H=nx.MultiGraph()
+ H.add_edge(3,4,key=0)
+ H.add_edge(3,4,key=1)
+ GH=cartesian_product(G,H)
+ assert_equal( set(GH) , set([(1, 3), (2, 3), (2, 4), (1, 4)]))
+ assert_equal( set(GH.edges(keys=True)) ,
+ set([((1, 3), (2, 3), 0), ((1, 3), (2, 3), 1),
+ ((1, 3), (1, 4), 0), ((1, 3), (1, 4), 1),
+ ((2, 3), (2, 4), 0), ((2, 3), (2, 4), 1),
+ ((2, 4), (1, 4), 0), ((2, 4), (1, 4), 1)]))
+
+@raises(nx.NetworkXError)
+def test_cartesian_product_raises():
+ P = cartesian_product(nx.DiGraph(),nx.Graph())
+
+def test_cartesian_product_null():
+ null=nx.null_graph()
+ empty10=nx.empty_graph(10)
+ K3=nx.complete_graph(3)
+ K10=nx.complete_graph(10)
+ P3=nx.path_graph(3)
+ P10=nx.path_graph(10)
+ # null graph
+ G=cartesian_product(null,null)
+ assert_true(nx.is_isomorphic(G,null))
+ # null_graph X anything = null_graph and v.v.
+ G=cartesian_product(null,empty10)
+ assert_true(nx.is_isomorphic(G,null))
+ G=cartesian_product(null,K3)
+ assert_true(nx.is_isomorphic(G,null))
+ G=cartesian_product(null,K10)
+ assert_true(nx.is_isomorphic(G,null))
+ G=cartesian_product(null,P3)
+ assert_true(nx.is_isomorphic(G,null))
+ G=cartesian_product(null,P10)
+ assert_true(nx.is_isomorphic(G,null))
+ G=cartesian_product(empty10,null)
+ assert_true(nx.is_isomorphic(G,null))
+ G=cartesian_product(K3,null)
+ assert_true(nx.is_isomorphic(G,null))
+ G=cartesian_product(K10,null)
+ assert_true(nx.is_isomorphic(G,null))
+ G=cartesian_product(P3,null)
+ assert_true(nx.is_isomorphic(G,null))
+ G=cartesian_product(P10,null)
+ assert_true(nx.is_isomorphic(G,null))
+
+def test_cartesian_product_size():
+ # order(GXH)=order(G)*order(H)
+ K5=nx.complete_graph(5)
+ P5=nx.path_graph(5)
+ K3=nx.complete_graph(3)
+ G=cartesian_product(P5,K3)
+ assert_equal(nx.number_of_nodes(G),5*3)
+ assert_equal(nx.number_of_edges(G),
+ nx.number_of_edges(P5)*nx.number_of_nodes(K3)+
+ nx.number_of_edges(K3)*nx.number_of_nodes(P5))
+ G=cartesian_product(K3,K5)
+ assert_equal(nx.number_of_nodes(G),3*5)
+ assert_equal(nx.number_of_edges(G),
+ nx.number_of_edges(K5)*nx.number_of_nodes(K3)+
+ nx.number_of_edges(K3)*nx.number_of_nodes(K5))
+
+def test_cartesian_product_classic():
+ # test some classic product graphs
+ P2 = nx.path_graph(2)
+ P3 = nx.path_graph(3)
+ # cube = 2-path X 2-path
+ G=cartesian_product(P2,P2)
+ G=cartesian_product(P2,G)
+ assert_true(nx.is_isomorphic(G,nx.cubical_graph()))
+
+ # 3x3 grid
+ G=cartesian_product(P3,P3)
+ assert_true(nx.is_isomorphic(G,nx.grid_2d_graph(3,3)))
+
+def test_cartesian_product_random():
+ G = nx.erdos_renyi_graph(10,2/10.)
+ H = nx.erdos_renyi_graph(10,2/10.)
+ GH = cartesian_product(G,H)
+
+ for (u_G,u_H) in GH.nodes_iter():
+ for (v_G,v_H) in GH.nodes_iter():
+ if (u_G==v_G and H.has_edge(u_H,v_H)) or \
+ (u_H==v_H and G.has_edge(u_G,v_G)):
+ assert_true(GH.has_edge((u_G,u_H),(v_G,v_H)))
+ else:
+ assert_true(not GH.has_edge((u_G,u_H),(v_G,v_H)))
+
+@raises(nx.NetworkXError)
+def test_lexicographic_product_raises():
+ P=lexicographic_product(nx.DiGraph(),nx.Graph())
+
+def test_lexicographic_product_null():
+ null=nx.null_graph()
+ empty10=nx.empty_graph(10)
+ K3=nx.complete_graph(3)
+ K10=nx.complete_graph(10)
+ P3=nx.path_graph(3)
+ P10=nx.path_graph(10)
+ # null graph
+ G=lexicographic_product(null,null)
+ assert_true(nx.is_isomorphic(G,null))
+ # null_graph X anything = null_graph and v.v.
+ G=lexicographic_product(null,empty10)
+ assert_true(nx.is_isomorphic(G,null))
+ G=lexicographic_product(null,K3)
+ assert_true(nx.is_isomorphic(G,null))
+ G=lexicographic_product(null,K10)
+ assert_true(nx.is_isomorphic(G,null))
+ G=lexicographic_product(null,P3)
+ assert_true(nx.is_isomorphic(G,null))
+ G=lexicographic_product(null,P10)
+ assert_true(nx.is_isomorphic(G,null))
+ G=lexicographic_product(empty10,null)
+ assert_true(nx.is_isomorphic(G,null))
+ G=lexicographic_product(K3,null)
+ assert_true(nx.is_isomorphic(G,null))
+ G=lexicographic_product(K10,null)
+ assert_true(nx.is_isomorphic(G,null))
+ G=lexicographic_product(P3,null)
+ assert_true(nx.is_isomorphic(G,null))
+ G=lexicographic_product(P10,null)
+ assert_true(nx.is_isomorphic(G,null))
+
+def test_lexicographic_product_size():
+ K5=nx.complete_graph(5)
+ P5=nx.path_graph(5)
+ K3=nx.complete_graph(3)
+ G=lexicographic_product(P5,K3)
+ assert_equal(nx.number_of_nodes(G),5*3)
+ G=lexicographic_product(K3,K5)
+ assert_equal(nx.number_of_nodes(G),3*5)
+
+def test_lexicographic_product_combinations():
+ P5=nx.path_graph(5)
+ K3=nx.complete_graph(3)
+ G=lexicographic_product(P5,K3)
+ assert_equal(nx.number_of_nodes(G),5*3)
+ G=lexicographic_product(nx.MultiGraph(P5),K3)
+ assert_equal(nx.number_of_nodes(G),5*3)
+ G=lexicographic_product(P5,nx.MultiGraph(K3))
+ assert_equal(nx.number_of_nodes(G),5*3)
+ G=lexicographic_product(nx.MultiGraph(P5),nx.MultiGraph(K3))
+ assert_equal(nx.number_of_nodes(G),5*3)
+
+
+
+
+ #No classic easily found classic results for lexicographic product
+def test_lexicographic_product_random():
+ G = nx.erdos_renyi_graph(10,2/10.)
+ H = nx.erdos_renyi_graph(10,2/10.)
+ GH = lexicographic_product(G,H)
+
+ for (u_G,u_H) in GH.nodes_iter():
+ for (v_G,v_H) in GH.nodes_iter():
+ if G.has_edge(u_G,v_G) or (u_G==v_G and H.has_edge(u_H,v_H)):
+ assert_true(GH.has_edge((u_G,u_H),(v_G,v_H)))
+ else:
+ assert_true(not GH.has_edge((u_G,u_H),(v_G,v_H)))
+
+@raises(nx.NetworkXError)
+def test_strong_product_raises():
+ P = strong_product(nx.DiGraph(),nx.Graph())
+
+def test_strong_product_null():
+ null=nx.null_graph()
+ empty10=nx.empty_graph(10)
+ K3=nx.complete_graph(3)
+ K10=nx.complete_graph(10)
+ P3=nx.path_graph(3)
+ P10=nx.path_graph(10)
+ # null graph
+ G=strong_product(null,null)
+ assert_true(nx.is_isomorphic(G,null))
+ # null_graph X anything = null_graph and v.v.
+ G=strong_product(null,empty10)
+ assert_true(nx.is_isomorphic(G,null))
+ G=strong_product(null,K3)
+ assert_true(nx.is_isomorphic(G,null))
+ G=strong_product(null,K10)
+ assert_true(nx.is_isomorphic(G,null))
+ G=strong_product(null,P3)
+ assert_true(nx.is_isomorphic(G,null))
+ G=strong_product(null,P10)
+ assert_true(nx.is_isomorphic(G,null))
+ G=strong_product(empty10,null)
+ assert_true(nx.is_isomorphic(G,null))
+ G=strong_product(K3,null)
+ assert_true(nx.is_isomorphic(G,null))
+ G=strong_product(K10,null)
+ assert_true(nx.is_isomorphic(G,null))
+ G=strong_product(P3,null)
+ assert_true(nx.is_isomorphic(G,null))
+ G=strong_product(P10,null)
+ assert_true(nx.is_isomorphic(G,null))
+
+def test_strong_product_size():
+ K5=nx.complete_graph(5)
+ P5=nx.path_graph(5)
+ K3 = nx.complete_graph(3)
+ G=strong_product(P5,K3)
+ assert_equal(nx.number_of_nodes(G),5*3)
+ G=strong_product(K3,K5)
+ assert_equal(nx.number_of_nodes(G),3*5)
+
+def test_strong_product_combinations():
+ P5=nx.path_graph(5)
+ K3 = nx.complete_graph(3)
+ G=strong_product(P5,K3)
+ assert_equal(nx.number_of_nodes(G),5*3)
+ G=strong_product(nx.MultiGraph(P5),K3)
+ assert_equal(nx.number_of_nodes(G),5*3)
+ G=strong_product(P5,nx.MultiGraph(K3))
+ assert_equal(nx.number_of_nodes(G),5*3)
+ G=strong_product(nx.MultiGraph(P5),nx.MultiGraph(K3))
+ assert_equal(nx.number_of_nodes(G),5*3)
+
+
+
+ #No classic easily found classic results for strong product
+def test_strong_product_random():
+ G = nx.erdos_renyi_graph(10,2/10.)
+ H = nx.erdos_renyi_graph(10,2/10.)
+ GH = strong_product(G,H)
+
+ for (u_G,u_H) in GH.nodes_iter():
+ for (v_G,v_H) in GH.nodes_iter():
+ if (u_G==v_G and H.has_edge(u_H,v_H)) or \
+ (u_H==v_H and G.has_edge(u_G,v_G)) or \
+ (G.has_edge(u_G,v_G) and H.has_edge(u_H,v_H)):
+ assert_true(GH.has_edge((u_G,u_H),(v_G,v_H)))
+ else:
+ assert_true(not GH.has_edge((u_G,u_H),(v_G,v_H)))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/tests/test_unary.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/tests/test_unary.py
new file mode 100644
index 0000000..ea10d75
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/tests/test_unary.py
@@ -0,0 +1,47 @@
+from nose.tools import *
+import networkx as nx
+from networkx import *
+
+
+def test_complement():
+ null=null_graph()
+ empty1=empty_graph(1)
+ empty10=empty_graph(10)
+ K3=complete_graph(3)
+ K5=complete_graph(5)
+ K10=complete_graph(10)
+ P2=path_graph(2)
+ P3=path_graph(3)
+ P5=path_graph(5)
+ P10=path_graph(10)
+ #complement of the complete graph is empty
+
+ G=complement(K3)
+ assert_true(is_isomorphic(G,empty_graph(3)))
+ G=complement(K5)
+ assert_true(is_isomorphic(G,empty_graph(5)))
+ # for any G, G=complement(complement(G))
+ P3cc=complement(complement(P3))
+ assert_true(is_isomorphic(P3,P3cc))
+ nullcc=complement(complement(null))
+ assert_true(is_isomorphic(null,nullcc))
+ b=bull_graph()
+ bcc=complement(complement(b))
+ assert_true(is_isomorphic(b,bcc))
+
+def test_complement_2():
+ G1=nx.DiGraph()
+ G1.add_edge('A','B')
+ G1.add_edge('A','C')
+ G1.add_edge('A','D')
+ G1C=complement(G1)
+ assert_equal(sorted(G1C.edges()),
+ [('B', 'A'), ('B', 'C'),
+ ('B', 'D'), ('C', 'A'), ('C', 'B'),
+ ('C', 'D'), ('D', 'A'), ('D', 'B'), ('D', 'C')])
+
+def test_reverse1():
+ # Other tests for reverse are done by the DiGraph and MultiDigraph.
+ G1=nx.Graph()
+ assert_raises(nx.NetworkXError, nx.reverse, G1)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/unary.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/unary.py
new file mode 100644
index 0000000..fbbb31e
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/operators/unary.py
@@ -0,0 +1,69 @@
+"""Unary operations on graphs"""
+# Copyright (C) 2004-2012 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+from networkx.utils import is_string_like
+__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Dan Schult(dschult@colgate.edu)'])
+__all__ = ['complement', 'reverse']
+
+def complement(G, name=None):
+ """Return the graph complement of G.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ name : string
+ Specify name for new graph
+
+ Returns
+ -------
+ GC : A new graph.
+
+ Notes
+ ------
+ Note that complement() does not create self-loops and also
+ does not produce parallel edges for MultiGraphs.
+
+ Graph, node, and edge data are not propagated to the new graph.
+ """
+ if name is None:
+ name="complement(%s)"%(G.name)
+ R=G.__class__()
+ R.name=name
+ R.add_nodes_from(G)
+ R.add_edges_from( ((n,n2)
+ for n,nbrs in G.adjacency_iter()
+ for n2 in G if n2 not in nbrs
+ if n != n2) )
+ return R
+
+def reverse(G, copy=True):
+ """Return the reverse directed graph of G.
+
+ Parameters
+ ----------
+ G : directed graph
+ A NetworkX directed graph
+ copy : bool
+ If True, then a new graph is returned. If False, then the graph is
+ reversed in place.
+
+ Returns
+ -------
+ H : directed graph
+ The reversed G.
+
+ """
+ if not G.is_directed():
+ raise nx.NetworkXError("Cannot reverse an undirected graph.")
+ else:
+ return G.reverse(copy=copy)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/richclub.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/richclub.py
new file mode 100644
index 0000000..5701806
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/richclub.py
@@ -0,0 +1,101 @@
+# -*- coding: utf-8 -*-
+import networkx as nx
+__author__ = """\n""".join(['Ben Edwards',
+ 'Aric Hagberg <hagberg@lanl.gov>'])
+
+__all__ = ['rich_club_coefficient']
+
+def rich_club_coefficient(G, normalized=True, Q=100):
+ """Return the rich-club coefficient of the graph G.
+
+ The rich-club coefficient is the ratio, for every degree k, of the
+ number of actual to the number of potential edges for nodes
+ with degree greater than k:
+
+ .. math::
+
+ \\phi(k) = \\frac{2 Ek}{Nk(Nk-1)}
+
+ where Nk is the number of nodes with degree larger than k, and Ek
+ be the number of edges among those nodes.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ normalized : bool (optional)
+ Normalize using randomized network (see [1]_)
+ Q : float (optional, default=100)
+ If normalized=True build a random network by performing
+ Q*M double-edge swaps, where M is the number of edges in G,
+ to use as a null-model for normalization.
+
+ Returns
+ -------
+ rc : dictionary
+ A dictionary, keyed by degree, with rich club coefficient values.
+
+ Examples
+ --------
+ >>> G = nx.Graph([(0,1),(0,2),(1,2),(1,3),(1,4),(4,5)])
+ >>> rc = nx.rich_club_coefficient(G,normalized=False)
+ >>> rc[0] # doctest: +SKIP
+ 0.4
+
+ Notes
+ ------
+ The rich club definition and algorithm are found in [1]_. This
+ algorithm ignores any edge weights and is not defined for directed
+ graphs or graphs with parallel edges or self loops.
+
+ Estimates for appropriate values of Q are found in [2]_.
+
+ References
+ ----------
+ .. [1] Julian J. McAuley, Luciano da Fontoura Costa, and Tibério S. Caetano,
+ "The rich-club phenomenon across complex network hierarchies",
+ Applied Physics Letters Vol 91 Issue 8, August 2007.
+ http://arxiv.org/abs/physics/0701290
+ .. [2] R. Milo, N. Kashtan, S. Itzkovitz, M. E. J. Newman, U. Alon,
+ "Uniform generation of random graphs with arbitrary degree
+ sequences", 2006. http://arxiv.org/abs/cond-mat/0312028
+ """
+ if G.is_multigraph() or G.is_directed():
+ raise Exception('rich_club_coefficient is not implemented for ',
+ 'directed or multiedge graphs.')
+ if len(G.selfloop_edges()) > 0:
+ raise Exception('rich_club_coefficient is not implemented for ',
+ 'graphs with self loops.')
+ rc=_compute_rc(G)
+ if normalized:
+ # make R a copy of G, randomize with Q*|E| double edge swaps
+ # and use rich_club coefficient of R to normalize
+ R = G.copy()
+ E = R.number_of_edges()
+ nx.double_edge_swap(R,Q*E,max_tries=Q*E*10)
+ rcran=_compute_rc(R)
+ for d in rc:
+# if rcran[d] > 0:
+ rc[d]/=rcran[d]
+ return rc
+
+
+def _compute_rc(G):
+ # compute rich club coefficient for all k degrees in G
+ deghist = nx.degree_histogram(G)
+ total = sum(deghist)
+ # number of nodes with degree > k (omit last entry which is zero)
+ nks = [total-cs for cs in nx.utils.cumulative_sum(deghist) if total-cs > 1]
+ deg=G.degree()
+ edge_degrees=sorted(sorted((deg[u],deg[v])) for u,v in G.edges_iter())
+ ek=G.number_of_edges()
+ k1,k2=edge_degrees.pop(0)
+ rc={}
+ for d,nk in zip(range(len(nks)),nks):
+ while k1 <= d:
+ if len(edge_degrees)==0:
+ break
+ k1,k2=edge_degrees.pop(0)
+ ek-=1
+ rc[d] = 2.0*ek/(nk*(nk-1))
+ return rc
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/__init__.py
new file mode 100644
index 0000000..64846eb
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/__init__.py
@@ -0,0 +1,6 @@
+from networkx.algorithms.shortest_paths.generic import *
+from networkx.algorithms.shortest_paths.unweighted import *
+from networkx.algorithms.shortest_paths.weighted import *
+from networkx.algorithms.shortest_paths.astar import *
+from networkx.algorithms.shortest_paths.dense import *
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/astar.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/astar.py
new file mode 100644
index 0000000..7b25d64
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/astar.py
@@ -0,0 +1,159 @@
+# -*- coding: utf-8 -*-
+"""Shortest paths and path lengths using A* ("A star") algorithm.
+"""
+
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+from heapq import heappush, heappop
+from networkx import NetworkXError
+import networkx as nx
+
+__author__ = "\n".join(["Salim Fadhley <salimfadhley@gmail.com>",
+ "Matteo Dell'Amico <matteodellamico@gmail.com>"])
+__all__ = ['astar_path', 'astar_path_length']
+
+
+def astar_path(G, source, target, heuristic=None, weight='weight'):
+ """Return a list of nodes in a shortest path between source and target
+ using the A* ("A-star") algorithm.
+
+ There may be more than one shortest path. This returns only one.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : node
+ Starting node for path
+
+ target : node
+ Ending node for path
+
+ heuristic : function
+ A function to evaluate the estimate of the distance
+ from the a node to the target. The function takes
+ two nodes arguments and must return a number.
+
+ weight: string, optional (default='weight')
+ Edge data key corresponding to the edge weight.
+
+ Raises
+ ------
+ NetworkXNoPath
+ If no path exists between source and target.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(5)
+ >>> print(nx.astar_path(G,0,4))
+ [0, 1, 2, 3, 4]
+ >>> G=nx.grid_graph(dim=[3,3]) # nodes are two-tuples (x,y)
+ >>> def dist(a, b):
+ ... (x1, y1) = a
+ ... (x2, y2) = b
+ ... return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
+ >>> print(nx.astar_path(G,(0,0),(2,2),dist))
+ [(0, 0), (0, 1), (1, 1), (1, 2), (2, 2)]
+
+
+ See Also
+ --------
+ shortest_path, dijkstra_path
+
+ """
+ if G.is_multigraph():
+ raise NetworkXError("astar_path() not implemented for Multi(Di)Graphs")
+
+ if heuristic is None:
+ # The default heuristic is h=0 - same as Dijkstra's algorithm
+ def heuristic(u, v):
+ return 0
+ # The queue stores priority, node, cost to reach, and parent.
+ # Uses Python heapq to keep in priority order.
+ # Add each node's hash to the queue to prevent the underlying heap from
+ # attempting to compare the nodes themselves. The hash breaks ties in the
+ # priority and is guarenteed unique for all nodes in the graph.
+ queue = [(0, hash(source), source, 0, None)]
+
+ # Maps enqueued nodes to distance of discovered paths and the
+ # computed heuristics to target. We avoid computing the heuristics
+ # more than once and inserting the node into the queue too many times.
+ enqueued = {}
+ # Maps explored nodes to parent closest to the source.
+ explored = {}
+
+ while queue:
+ # Pop the smallest item from queue.
+ _, __, curnode, dist, parent = heappop(queue)
+
+ if curnode == target:
+ path = [curnode]
+ node = parent
+ while node is not None:
+ path.append(node)
+ node = explored[node]
+ path.reverse()
+ return path
+
+ if curnode in explored:
+ continue
+
+ explored[curnode] = parent
+
+ for neighbor, w in G[curnode].items():
+ if neighbor in explored:
+ continue
+ ncost = dist + w.get(weight, 1)
+ if neighbor in enqueued:
+ qcost, h = enqueued[neighbor]
+ # if qcost < ncost, a longer path to neighbor remains
+ # enqueued. Removing it would need to filter the whole
+ # queue, it's better just to leave it there and ignore
+ # it when we visit the node a second time.
+ if qcost <= ncost:
+ continue
+ else:
+ h = heuristic(neighbor, target)
+ enqueued[neighbor] = ncost, h
+ heappush(queue, (ncost + h, hash(neighbor), neighbor,
+ ncost, curnode))
+
+ raise nx.NetworkXNoPath("Node %s not reachable from %s" % (source, target))
+
+
+def astar_path_length(G, source, target, heuristic=None, weight='weight'):
+ """Return the length of the shortest path between source and target using
+ the A* ("A-star") algorithm.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : node
+ Starting node for path
+
+ target : node
+ Ending node for path
+
+ heuristic : function
+ A function to evaluate the estimate of the distance
+ from the a node to the target. The function takes
+ two nodes arguments and must return a number.
+
+ Raises
+ ------
+ NetworkXNoPath
+ If no path exists between source and target.
+
+ See Also
+ --------
+ astar_path
+
+ """
+ path = astar_path(G, source, target, heuristic, weight)
+ return sum(G[u][v].get(weight, 1) for u, v in zip(path[:-1], path[1:]))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/dense.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/dense.py
new file mode 100644
index 0000000..4dccae6
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/dense.py
@@ -0,0 +1,156 @@
+# -*- coding: utf-8 -*-
+"""Floyd-Warshall algorithm for shortest paths.
+"""
+# Copyright (C) 2004-2012 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+__author__ = """Aric Hagberg <aric.hagberg@gmail.com>"""
+__all__ = ['floyd_warshall',
+ 'floyd_warshall_predecessor_and_distance',
+ 'floyd_warshall_numpy']
+
+def floyd_warshall_numpy(G, nodelist=None, weight='weight'):
+ """Find all-pairs shortest path lengths using Floyd's algorithm.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ nodelist : list, optional
+ The rows and columns are ordered by the nodes in nodelist.
+ If nodelist is None then the ordering is produced by G.nodes().
+
+ weight: string, optional (default= 'weight')
+ Edge data key corresponding to the edge weight.
+
+ Returns
+ -------
+ distance : NumPy matrix
+ A matrix of shortest path distances between nodes.
+ If there is no path between to nodes the corresponding matrix entry
+ will be Inf.
+
+ Notes
+ ------
+ Floyd's algorithm is appropriate for finding shortest paths in
+ dense graphs or graphs with negative weights when Dijkstra's
+ algorithm fails. This algorithm can still fail if there are
+ negative cycles. It has running time O(n^3) with running space of O(n^2).
+ """
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError(\
+ "to_numpy_matrix() requires numpy: http://scipy.org/ ")
+ A = nx.to_numpy_matrix(G, nodelist=nodelist, multigraph_weight=min,
+ weight=weight)
+ n,m = A.shape
+ I = np.identity(n)
+ A[A==0] = np.inf # set zero entries to inf
+ A[I==1] = 0 # except diagonal which should be zero
+ for i in range(n):
+ A = np.minimum(A, A[i,:] + A[:,i])
+ return A
+
+def floyd_warshall_predecessor_and_distance(G, weight='weight'):
+ """Find all-pairs shortest path lengths using Floyd's algorithm.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ weight: string, optional (default= 'weight')
+ Edge data key corresponding to the edge weight.
+
+ Returns
+ -------
+ predecessor,distance : dictionaries
+ Dictionaries, keyed by source and target, of predecessors and distances
+ in the shortest path.
+
+ Notes
+ ------
+ Floyd's algorithm is appropriate for finding shortest paths
+ in dense graphs or graphs with negative weights when Dijkstra's algorithm
+ fails. This algorithm can still fail if there are negative cycles.
+ It has running time O(n^3) with running space of O(n^2).
+
+ See Also
+ --------
+ floyd_warshall
+ floyd_warshall_numpy
+ all_pairs_shortest_path
+ all_pairs_shortest_path_length
+ """
+ from collections import defaultdict
+ # dictionary-of-dictionaries representation for dist and pred
+ # use some defaultdict magick here
+ # for dist the default is the floating point inf value
+ dist = defaultdict(lambda : defaultdict(lambda: float('inf')))
+ for u in G:
+ dist[u][u] = 0
+ pred = defaultdict(dict)
+ # initialize path distance dictionary to be the adjacency matrix
+ # also set the distance to self to 0 (zero diagonal)
+ undirected = not G.is_directed()
+ for u,v,d in G.edges(data=True):
+ e_weight = d.get(weight, 1.0)
+ dist[u][v] = min(e_weight, dist[u][v])
+ pred[u][v] = u
+ if undirected:
+ dist[v][u] = min(e_weight, dist[v][u])
+ pred[v][u] = v
+ for w in G:
+ for u in G:
+ for v in G:
+ if dist[u][v] > dist[u][w] + dist[w][v]:
+ dist[u][v] = dist[u][w] + dist[w][v]
+ pred[u][v] = pred[w][v]
+ return dict(pred),dict(dist)
+
+
+def floyd_warshall(G, weight='weight'):
+ """Find all-pairs shortest path lengths using Floyd's algorithm.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ weight: string, optional (default= 'weight')
+ Edge data key corresponding to the edge weight.
+
+
+ Returns
+ -------
+ distance : dict
+ A dictionary, keyed by source and target, of shortest paths distances
+ between nodes.
+
+ Notes
+ ------
+ Floyd's algorithm is appropriate for finding shortest paths
+ in dense graphs or graphs with negative weights when Dijkstra's algorithm
+ fails. This algorithm can still fail if there are negative cycles.
+ It has running time O(n^3) with running space of O(n^2).
+
+ See Also
+ --------
+ floyd_warshall_predecessor_and_distance
+ floyd_warshall_numpy
+ all_pairs_shortest_path
+ all_pairs_shortest_path_length
+ """
+ # could make this its own function to reduce memory costs
+ return floyd_warshall_predecessor_and_distance(G, weight=weight)[1]
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import numpy
+ except:
+ raise SkipTest("NumPy not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/generic.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/generic.py
new file mode 100644
index 0000000..a337cbb
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/generic.py
@@ -0,0 +1,392 @@
+# -*- coding: utf-8 -*-
+"""
+Compute the shortest paths and path lengths between nodes in the graph.
+
+These algorithms work with undirected and directed graphs.
+
+For directed graphs the paths can be computed in the reverse
+order by first flipping the edge orientation using R=G.reverse(copy=False).
+
+"""
+# Copyright (C) 2004-2012 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>',
+ 'Sérgio Nery Simões <sergionery@gmail.com>'])
+__all__ = ['shortest_path', 'all_shortest_paths',
+ 'shortest_path_length', 'average_shortest_path_length',
+ 'has_path']
+
+def has_path(G, source, target):
+ """Return True if G has a path from source to target, False otherwise.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : node
+ Starting node for path
+
+ target : node
+ Ending node for path
+ """
+ try:
+ sp = nx.shortest_path(G,source, target)
+ except nx.NetworkXNoPath:
+ return False
+ return True
+
+
+def shortest_path(G, source=None, target=None, weight=None):
+ """Compute shortest paths in the graph.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : node, optional
+ Starting node for path.
+ If not specified, compute shortest paths using all nodes as source nodes.
+
+ target : node, optional
+ Ending node for path.
+ If not specified, compute shortest paths using all nodes as target nodes.
+
+ weight : None or string, optional (default = None)
+ If None, every edge has weight/distance/cost 1.
+ If a string, use this edge attribute as the edge weight.
+ Any edge attribute not present defaults to 1.
+
+ Returns
+ -------
+ path: list or dictionary
+ All returned paths include both the source and target in the path.
+
+ If the source and target are both specified, return a single list
+ of nodes in a shortest path from the source to the target.
+
+ If only the source is specified, return a dictionary keyed by
+ targets with a list of nodes in a shortest path from the source
+ to one of the targets.
+
+ If only the target is specified, return a dictionary keyed by
+ sources with a list of nodes in a shortest path from one of the
+ sources to the target.
+
+ If neither the source nor target are specified return a dictionary
+ of dictionaries with path[source][target]=[list of nodes in path].
+
+ Examples
+ --------
+ >>> G=nx.path_graph(5)
+ >>> print(nx.shortest_path(G,source=0,target=4))
+ [0, 1, 2, 3, 4]
+ >>> p=nx.shortest_path(G,source=0) # target not specified
+ >>> p[4]
+ [0, 1, 2, 3, 4]
+ >>> p=nx.shortest_path(G,target=4) # source not specified
+ >>> p[0]
+ [0, 1, 2, 3, 4]
+ >>> p=nx.shortest_path(G) # source,target not specified
+ >>> p[0][4]
+ [0, 1, 2, 3, 4]
+
+ Notes
+ -----
+ There may be more than one shortest path between a source and target.
+ This returns only one of them.
+
+ For digraphs this returns a shortest directed path. To find paths in the
+ reverse direction first use G.reverse(copy=False) to flip the edge
+ orientation.
+
+ See Also
+ --------
+ all_pairs_shortest_path()
+ all_pairs_dijkstra_path()
+ single_source_shortest_path()
+ single_source_dijkstra_path()
+ """
+ if source is None:
+ if target is None:
+ ## Find paths between all pairs.
+ if weight is None:
+ paths=nx.all_pairs_shortest_path(G)
+ else:
+ paths=nx.all_pairs_dijkstra_path(G,weight=weight)
+ else:
+ ## Find paths from all nodes co-accessible to the target.
+ directed = G.is_directed()
+ if directed:
+ G.reverse(copy=False)
+
+ if weight is None:
+ paths=nx.single_source_shortest_path(G,target)
+ else:
+ paths=nx.single_source_dijkstra_path(G,target,weight=weight)
+
+ # Now flip the paths so they go from a source to the target.
+ for target in paths:
+ paths[target] = list(reversed(paths[target]))
+
+ if directed:
+ G.reverse(copy=False)
+ else:
+ if target is None:
+ ## Find paths to all nodes accessible from the source.
+ if weight is None:
+ paths=nx.single_source_shortest_path(G,source)
+ else:
+ paths=nx.single_source_dijkstra_path(G,source,weight=weight)
+ else:
+ ## Find shortest source-target path.
+ if weight is None:
+ paths=nx.bidirectional_shortest_path(G,source,target)
+ else:
+ paths=nx.dijkstra_path(G,source,target,weight)
+
+ return paths
+
+
+def shortest_path_length(G, source=None, target=None, weight=None):
+ """Compute shortest path lengths in the graph.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : node, optional
+ Starting node for path.
+ If not specified, compute shortest path lengths using all nodes as
+ source nodes.
+
+ target : node, optional
+ Ending node for path.
+ If not specified, compute shortest path lengths using all nodes as
+ target nodes.
+
+ weight : None or string, optional (default = None)
+ If None, every edge has weight/distance/cost 1.
+ If a string, use this edge attribute as the edge weight.
+ Any edge attribute not present defaults to 1.
+
+ Returns
+ -------
+ length: int or dictionary
+ If the source and target are both specified, return the length of
+ the shortest path from the source to the target.
+
+ If only the source is specified, return a dictionary keyed by
+ targets whose values are the lengths of the shortest path from the
+ source to one of the targets.
+
+ If only the target is specified, return a dictionary keyed by
+ sources whose values are the lengths of the shortest path from one
+ of the sources to the target.
+
+ If neither the source nor target are specified return a dictionary
+ of dictionaries with path[source][target]=L, where L is the length
+ of the shortest path from source to target.
+
+ Raises
+ ------
+ NetworkXNoPath
+ If no path exists between source and target.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(5)
+ >>> print(nx.shortest_path_length(G,source=0,target=4))
+ 4
+ >>> p=nx.shortest_path_length(G,source=0) # target not specified
+ >>> p[4]
+ 4
+ >>> p=nx.shortest_path_length(G,target=4) # source not specified
+ >>> p[0]
+ 4
+ >>> p=nx.shortest_path_length(G) # source,target not specified
+ >>> p[0][4]
+ 4
+
+ Notes
+ -----
+ The length of the path is always 1 less than the number of nodes involved
+ in the path since the length measures the number of edges followed.
+
+ For digraphs this returns the shortest directed path length. To find path
+ lengths in the reverse direction use G.reverse(copy=False) first to flip
+ the edge orientation.
+
+ See Also
+ --------
+ all_pairs_shortest_path_length()
+ all_pairs_dijkstra_path_length()
+ single_source_shortest_path_length()
+ single_source_dijkstra_path_length()
+
+ """
+ if source is None:
+ if target is None:
+ ## Find paths between all pairs.
+ if weight is None:
+ paths=nx.all_pairs_shortest_path_length(G)
+ else:
+ paths=nx.all_pairs_dijkstra_path_length(G, weight=weight)
+ else:
+ ## Find paths from all nodes co-accessible to the target.
+ directed = G.is_directed()
+ if directed:
+ G.reverse(copy=False)
+
+ if weight is None:
+ paths=nx.single_source_shortest_path_length(G,target)
+ else:
+ paths=nx.single_source_dijkstra_path_length(G,target,
+ weight=weight)
+
+ if directed:
+ G.reverse(copy=False)
+ else:
+ if target is None:
+ ## Find paths to all nodes accessible from the source.
+ if weight is None:
+ paths=nx.single_source_shortest_path_length(G,source)
+ else:
+ paths=nx.single_source_dijkstra_path_length(G,source,weight=weight)
+ else:
+ ## Find shortest source-target path.
+ if weight is None:
+ p=nx.bidirectional_shortest_path(G,source,target)
+ paths=len(p)-1
+ else:
+ paths=nx.dijkstra_path_length(G,source,target,weight)
+ return paths
+
+
+def average_shortest_path_length(G, weight=None):
+ r"""Return the average shortest path length.
+
+ The average shortest path length is
+
+ .. math::
+
+ a =\sum_{s,t \in V} \frac{d(s, t)}{n(n-1)}
+
+ where `V` is the set of nodes in `G`,
+ `d(s, t)` is the shortest path from `s` to `t`,
+ and `n` is the number of nodes in `G`.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ weight : None or string, optional (default = None)
+ If None, every edge has weight/distance/cost 1.
+ If a string, use this edge attribute as the edge weight.
+ Any edge attribute not present defaults to 1.
+
+ Raises
+ ------
+ NetworkXError:
+ if the graph is not connected.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(5)
+ >>> print(nx.average_shortest_path_length(G))
+ 2.0
+
+ For disconnected graphs you can compute the average shortest path
+ length for each component:
+ >>> G=nx.Graph([(1,2),(3,4)])
+ >>> for g in nx.connected_component_subgraphs(G):
+ ... print(nx.average_shortest_path_length(g))
+ 1.0
+ 1.0
+
+ """
+ if G.is_directed():
+ if not nx.is_weakly_connected(G):
+ raise nx.NetworkXError("Graph is not connected.")
+ else:
+ if not nx.is_connected(G):
+ raise nx.NetworkXError("Graph is not connected.")
+ avg=0.0
+ if weight is None:
+ for node in G:
+ path_length=nx.single_source_shortest_path_length(G, node)
+ avg += sum(path_length.values())
+ else:
+ for node in G:
+ path_length=nx.single_source_dijkstra_path_length(G, node, weight=weight)
+ avg += sum(path_length.values())
+ n=len(G)
+ return avg/(n*(n-1))
+
+
+def all_shortest_paths(G, source, target, weight=None):
+ """Compute all shortest paths in the graph.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : node
+ Starting node for path.
+
+ target : node
+ Ending node for path.
+
+ weight : None or string, optional (default = None)
+ If None, every edge has weight/distance/cost 1.
+ If a string, use this edge attribute as the edge weight.
+ Any edge attribute not present defaults to 1.
+
+ Returns
+ -------
+ paths: generator of lists
+ A generator of all paths between source and target.
+
+ Examples
+ --------
+ >>> G=nx.Graph()
+ >>> G.add_path([0,1,2])
+ >>> G.add_path([0,10,2])
+ >>> print([p for p in nx.all_shortest_paths(G,source=0,target=2)])
+ [[0, 1, 2], [0, 10, 2]]
+
+ Notes
+ -----
+ There may be many shortest paths between the source and target.
+
+ See Also
+ --------
+ shortest_path()
+ single_source_shortest_path()
+ all_pairs_shortest_path()
+ """
+ if weight is not None:
+ pred,dist = nx.dijkstra_predecessor_and_distance(G,source,weight=weight)
+ else:
+ pred = nx.predecessor(G,source)
+ if target not in pred:
+ raise nx.NetworkXNoPath()
+ stack = [[target,0]]
+ top = 0
+ while top >= 0:
+ node,i = stack[top]
+ if node == source:
+ yield [p for p,n in reversed(stack[:top+1])]
+ if len(pred[node]) > i:
+ top += 1
+ if top == len(stack):
+ stack.append([pred[node][i],0])
+ else:
+ stack[top] = [pred[node][i],0]
+ else:
+ stack[top-1][1] += 1
+ top -= 1
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_astar.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_astar.py
new file mode 100644
index 0000000..81ba6ab
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_astar.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+from random import random, choice
+
+class TestAStar:
+
+ def setUp(self):
+ self.XG=nx.DiGraph()
+ self.XG.add_edges_from([('s','u',{'weight':10}),
+ ('s','x',{'weight':5}),
+ ('u','v',{'weight':1}),
+ ('u','x',{'weight':2}),
+ ('v','y',{'weight':1}),
+ ('x','u',{'weight':3}),
+ ('x','v',{'weight':5}),
+ ('x','y',{'weight':2}),
+ ('y','s',{'weight':7}),
+ ('y','v',{'weight':6})])
+
+ def test_random_graph(self):
+
+ def dist(a, b):
+ (x1, y1) = a
+ (x2, y2) = b
+ return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
+
+ G = nx.Graph()
+
+ points = [(random(), random()) for _ in range(100)]
+
+ # Build a path from points[0] to points[-1] to be sure it exists
+ for p1, p2 in zip(points[:-1], points[1:]):
+ G.add_edge(p1, p2, weight=dist(p1, p2))
+
+ # Add other random edges
+ for _ in range(100):
+ p1, p2 = choice(points), choice(points)
+ G.add_edge(p1, p2, weight=dist(p1, p2))
+
+ path = nx.astar_path(G, points[0], points[-1], dist)
+ assert path == nx.dijkstra_path(G, points[0], points[-1])
+
+ def test_astar_directed(self):
+ assert nx.astar_path(self.XG,'s','v')==['s', 'x', 'u', 'v']
+ assert nx.astar_path_length(self.XG,'s','v')==9
+
+ def test_astar_multigraph(self):
+ G=nx.MultiDiGraph(self.XG)
+ assert_raises((TypeError,nx.NetworkXError),
+ nx.astar_path, [G,'s','v'])
+ assert_raises((TypeError,nx.NetworkXError),
+ nx.astar_path_length, [G,'s','v'])
+
+ def test_astar_undirected(self):
+ GG=self.XG.to_undirected()
+ # make sure we get lower weight
+ # to_undirected might choose either edge with weight 2 or weight 3
+ GG['u']['x']['weight']=2
+ GG['y']['v']['weight'] = 2
+ assert_equal(nx.astar_path(GG,'s','v'),['s', 'x', 'u', 'v'])
+ assert_equal(nx.astar_path_length(GG,'s','v'),8)
+
+ def test_astar_directed2(self):
+ XG2=nx.DiGraph()
+ XG2.add_edges_from([[1,4,{'weight':1}],
+ [4,5,{'weight':1}],
+ [5,6,{'weight':1}],
+ [6,3,{'weight':1}],
+ [1,3,{'weight':50}],
+ [1,2,{'weight':100}],
+ [2,3,{'weight':100}]])
+ assert nx.astar_path(XG2,1,3)==[1, 4, 5, 6, 3]
+
+ def test_astar_undirected2(self):
+ XG3=nx.Graph()
+ XG3.add_edges_from([ [0,1,{'weight':2}],
+ [1,2,{'weight':12}],
+ [2,3,{'weight':1}],
+ [3,4,{'weight':5}],
+ [4,5,{'weight':1}],
+ [5,0,{'weight':10}] ])
+ assert nx.astar_path(XG3,0,3)==[0, 1, 2, 3]
+ assert nx.astar_path_length(XG3,0,3)==15
+
+
+ def test_astar_undirected3(self):
+ XG4=nx.Graph()
+ XG4.add_edges_from([ [0,1,{'weight':2}],
+ [1,2,{'weight':2}],
+ [2,3,{'weight':1}],
+ [3,4,{'weight':1}],
+ [4,5,{'weight':1}],
+ [5,6,{'weight':1}],
+ [6,7,{'weight':1}],
+ [7,0,{'weight':1}] ])
+ assert nx.astar_path(XG4,0,2)==[0, 1, 2]
+ assert nx.astar_path_length(XG4,0,2)==4
+
+
+# >>> MXG4=NX.MultiGraph(XG4)
+# >>> MXG4.add_edge(0,1,3)
+# >>> NX.dijkstra_path(MXG4,0,2)
+# [0, 1, 2]
+
+ def test_astar_w1(self):
+ G=nx.DiGraph()
+ G.add_edges_from([('s','u'), ('s','x'), ('u','v'), ('u','x'),
+ ('v','y'), ('x','u'), ('x','w'), ('w', 'v'), ('x','y'),
+ ('y','s'), ('y','v')])
+ assert nx.astar_path(G,'s','v')==['s', 'u', 'v']
+ assert nx.astar_path_length(G,'s','v')== 2
+
+ @raises(nx.NetworkXNoPath)
+ def test_astar_nopath(self):
+ p = nx.astar_path(self.XG,'s','moon')
+
+ def test_cycle(self):
+ C=nx.cycle_graph(7)
+ assert nx.astar_path(C,0,3)==[0, 1, 2, 3]
+ assert nx.dijkstra_path(C,0,4)==[0, 6, 5, 4]
+
+
+ def test_orderable(self):
+ class UnorderableClass: pass
+ node_1 = UnorderableClass()
+ node_2 = UnorderableClass()
+ node_3 = UnorderableClass()
+ node_4 = UnorderableClass()
+ G = nx.Graph()
+ G.add_edge(node_1, node_2)
+ G.add_edge(node_1, node_3)
+ G.add_edge(node_2, node_4)
+ G.add_edge(node_3, node_4)
+ path=nx.algorithms.shortest_paths.astar.astar_path(G, node_1, node_4)
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_dense.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_dense.py
new file mode 100644
index 0000000..6c170da
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_dense.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python
+from nose.tools import *
+from nose import SkipTest
+import networkx as nx
+
+class TestFloyd:
+ def setUp(self):
+ pass
+
+ def test_floyd_warshall_predecessor_and_distance(self):
+ XG=nx.DiGraph()
+ XG.add_weighted_edges_from([('s','u',10) ,('s','x',5) ,
+ ('u','v',1) ,('u','x',2) ,
+ ('v','y',1) ,('x','u',3) ,
+ ('x','v',5) ,('x','y',2) ,
+ ('y','s',7) ,('y','v',6)])
+ path, dist =nx.floyd_warshall_predecessor_and_distance(XG)
+ assert_equal(dist['s']['v'],9)
+ assert_equal(path['s']['v'],'u')
+ assert_equal(dist,
+ {'y': {'y': 0, 'x': 12, 's': 7, 'u': 15, 'v': 6},
+ 'x': {'y': 2, 'x': 0, 's': 9, 'u': 3, 'v': 4},
+ 's': {'y': 7, 'x': 5, 's': 0, 'u': 8, 'v': 9},
+ 'u': {'y': 2, 'x': 2, 's': 9, 'u': 0, 'v': 1},
+ 'v': {'y': 1, 'x': 13, 's': 8, 'u': 16, 'v': 0}})
+
+
+ GG=XG.to_undirected()
+ # make sure we get lower weight
+ # to_undirected might choose either edge with weight 2 or weight 3
+ GG['u']['x']['weight']=2
+ path, dist = nx.floyd_warshall_predecessor_and_distance(GG)
+ assert_equal(dist['s']['v'],8)
+ # skip this test, could be alternate path s-u-v
+# assert_equal(path['s']['v'],'y')
+
+ G=nx.DiGraph() # no weights
+ G.add_edges_from([('s','u'), ('s','x'),
+ ('u','v'), ('u','x'),
+ ('v','y'), ('x','u'),
+ ('x','v'), ('x','y'),
+ ('y','s'), ('y','v')])
+ path, dist = nx.floyd_warshall_predecessor_and_distance(G)
+ assert_equal(dist['s']['v'],2)
+ # skip this test, could be alternate path s-u-v
+ # assert_equal(path['s']['v'],'x')
+
+ # alternate interface
+ dist = nx.floyd_warshall(G)
+ assert_equal(dist['s']['v'],2)
+
+ def test_cycle(self):
+ path, dist = nx.floyd_warshall_predecessor_and_distance(nx.cycle_graph(7))
+ assert_equal(dist[0][3],3)
+ assert_equal(path[0][3],2)
+ assert_equal(dist[0][4],3)
+
+ def test_weighted(self):
+ XG3=nx.Graph()
+ XG3.add_weighted_edges_from([ [0,1,2],[1,2,12],[2,3,1],
+ [3,4,5],[4,5,1],[5,0,10] ])
+ path, dist = nx.floyd_warshall_predecessor_and_distance(XG3)
+ assert_equal(dist[0][3],15)
+ assert_equal(path[0][3],2)
+
+ def test_weighted2(self):
+ XG4=nx.Graph()
+ XG4.add_weighted_edges_from([ [0,1,2],[1,2,2],[2,3,1],
+ [3,4,1],[4,5,1],[5,6,1],
+ [6,7,1],[7,0,1] ])
+ path, dist = nx.floyd_warshall_predecessor_and_distance(XG4)
+ assert_equal(dist[0][2],4)
+ assert_equal(path[0][2],1)
+
+ def test_weight_parameter(self):
+ XG4 = nx.Graph()
+ XG4.add_edges_from([ (0, 1, {'heavy': 2}), (1, 2, {'heavy': 2}),
+ (2, 3, {'heavy': 1}), (3, 4, {'heavy': 1}),
+ (4, 5, {'heavy': 1}), (5, 6, {'heavy': 1}),
+ (6, 7, {'heavy': 1}), (7, 0, {'heavy': 1}) ])
+ path, dist = nx.floyd_warshall_predecessor_and_distance(XG4,
+ weight='heavy')
+ assert_equal(dist[0][2], 4)
+ assert_equal(path[0][2], 1)
+
+ def test_zero_distance(self):
+ XG=nx.DiGraph()
+ XG.add_weighted_edges_from([('s','u',10) ,('s','x',5) ,
+ ('u','v',1) ,('u','x',2) ,
+ ('v','y',1) ,('x','u',3) ,
+ ('x','v',5) ,('x','y',2) ,
+ ('y','s',7) ,('y','v',6)])
+ path, dist =nx.floyd_warshall_predecessor_and_distance(XG)
+
+ for u in XG:
+ assert_equal(dist[u][u], 0)
+
+ GG=XG.to_undirected()
+ # make sure we get lower weight
+ # to_undirected might choose either edge with weight 2 or weight 3
+ GG['u']['x']['weight']=2
+ path, dist = nx.floyd_warshall_predecessor_and_distance(GG)
+
+ for u in GG:
+ dist[u][u] = 0
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_dense_numpy.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_dense_numpy.py
new file mode 100644
index 0000000..2fa0b67
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_dense_numpy.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+from nose.tools import *
+from nose import SkipTest
+import networkx as nx
+
+class TestFloydNumpy(object):
+ numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
+ @classmethod
+ def setupClass(cls):
+ global numpy
+ global assert_equal
+ global assert_almost_equal
+ try:
+ import numpy
+ from numpy.testing import assert_equal,assert_almost_equal
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+ def test_cycle_numpy(self):
+ dist = nx.floyd_warshall_numpy(nx.cycle_graph(7))
+ assert_equal(dist[0,3],3)
+ assert_equal(dist[0,4],3)
+
+ def test_weighted_numpy(self):
+ XG3=nx.Graph()
+ XG3.add_weighted_edges_from([ [0,1,2],[1,2,12],[2,3,1],
+ [3,4,5],[4,5,1],[5,0,10] ])
+ dist = nx.floyd_warshall_numpy(XG3)
+ assert_equal(dist[0,3],15)
+
+ def test_weighted_numpy(self):
+ XG4=nx.Graph()
+ XG4.add_weighted_edges_from([ [0,1,2],[1,2,2],[2,3,1],
+ [3,4,1],[4,5,1],[5,6,1],
+ [6,7,1],[7,0,1] ])
+ dist = nx.floyd_warshall_numpy(XG4)
+ assert_equal(dist[0,2],4)
+
+ def test_weight_parameter_numpy(self):
+ XG4 = nx.Graph()
+ XG4.add_edges_from([ (0, 1, {'heavy': 2}), (1, 2, {'heavy': 2}),
+ (2, 3, {'heavy': 1}), (3, 4, {'heavy': 1}),
+ (4, 5, {'heavy': 1}), (5, 6, {'heavy': 1}),
+ (6, 7, {'heavy': 1}), (7, 0, {'heavy': 1}) ])
+ dist = nx.floyd_warshall_numpy(XG4, weight='heavy')
+ assert_equal(dist[0, 2], 4)
+
+ def test_directed_cycle_numpy(self):
+ G = nx.DiGraph()
+ G.add_cycle([0,1,2,3])
+ pred,dist = nx.floyd_warshall_predecessor_and_distance(G)
+ D = nx.utils.dict_to_numpy_array(dist)
+ assert_equal(nx.floyd_warshall_numpy(G),D)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_generic.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_generic.py
new file mode 100644
index 0000000..edaa9f9
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_generic.py
@@ -0,0 +1,145 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+class TestGenericPath:
+
+ def setUp(self):
+ from networkx import convert_node_labels_to_integers as cnlti
+ self.grid=cnlti(nx.grid_2d_graph(4,4),first_label=1,ordering="sorted")
+ self.cycle=nx.cycle_graph(7)
+ self.directed_cycle=nx.cycle_graph(7,create_using=nx.DiGraph())
+
+
+ def test_shortest_path(self):
+ assert_equal(nx.shortest_path(self.cycle,0,3),[0, 1, 2, 3])
+ assert_equal(nx.shortest_path(self.cycle,0,4),[0, 6, 5, 4])
+ assert_equal(nx.shortest_path(self.grid,1,12),[1, 2, 3, 4, 8, 12])
+ assert_equal(nx.shortest_path(self.directed_cycle,0,3),[0, 1, 2, 3])
+ # now with weights
+ assert_equal(nx.shortest_path(self.cycle,0,3,weight='weight'),[0, 1, 2, 3])
+ assert_equal(nx.shortest_path(self.cycle,0,4,weight='weight'),[0, 6, 5, 4])
+ assert_equal(nx.shortest_path(self.grid,1,12,weight='weight'),[1, 2, 3, 4, 8, 12])
+ assert_equal(nx.shortest_path(self.directed_cycle,0,3,weight='weight'),
+ [0, 1, 2, 3])
+
+ def test_shortest_path_target(self):
+ sp = nx.shortest_path(nx.path_graph(3), target=1)
+ assert_equal(sp, {0: [0, 1], 1: [1], 2: [2, 1]})
+
+ def test_shortest_path_length(self):
+ assert_equal(nx.shortest_path_length(self.cycle,0,3),3)
+ assert_equal(nx.shortest_path_length(self.grid,1,12),5)
+ assert_equal(nx.shortest_path_length(self.directed_cycle,0,4),4)
+ # now with weights
+ assert_equal(nx.shortest_path_length(self.cycle,0,3,weight='weight'),3)
+ assert_equal(nx.shortest_path_length(self.grid,1,12,weight='weight'),5)
+ assert_equal(nx.shortest_path_length(self.directed_cycle,0,4,weight='weight'),4)
+
+ def test_shortest_path_length_target(self):
+ sp = nx.shortest_path_length(nx.path_graph(3), target=1)
+ assert_equal(sp[0], 1)
+ assert_equal(sp[1], 0)
+ assert_equal(sp[2], 1)
+
+ def test_single_source_shortest_path(self):
+ p=nx.shortest_path(self.cycle,0)
+ assert_equal(p[3],[0,1,2,3])
+ assert_equal(p,nx.single_source_shortest_path(self.cycle,0))
+ p=nx.shortest_path(self.grid,1)
+ assert_equal(p[12],[1, 2, 3, 4, 8, 12])
+ # now with weights
+ p=nx.shortest_path(self.cycle,0,weight='weight')
+ assert_equal(p[3],[0,1,2,3])
+ assert_equal(p,nx.single_source_dijkstra_path(self.cycle,0))
+ p=nx.shortest_path(self.grid,1,weight='weight')
+ assert_equal(p[12],[1, 2, 3, 4, 8, 12])
+
+
+ def test_single_source_shortest_path_length(self):
+ l=nx.shortest_path_length(self.cycle,0)
+ assert_equal(l,{0:0,1:1,2:2,3:3,4:3,5:2,6:1})
+ assert_equal(l,nx.single_source_shortest_path_length(self.cycle,0))
+ l=nx.shortest_path_length(self.grid,1)
+ assert_equal(l[16],6)
+ # now with weights
+ l=nx.shortest_path_length(self.cycle,0,weight='weight')
+ assert_equal(l,{0:0,1:1,2:2,3:3,4:3,5:2,6:1})
+ assert_equal(l,nx.single_source_dijkstra_path_length(self.cycle,0))
+ l=nx.shortest_path_length(self.grid,1,weight='weight')
+ assert_equal(l[16],6)
+
+
+ def test_all_pairs_shortest_path(self):
+ p=nx.shortest_path(self.cycle)
+ assert_equal(p[0][3],[0,1,2,3])
+ assert_equal(p,nx.all_pairs_shortest_path(self.cycle))
+ p=nx.shortest_path(self.grid)
+ assert_equal(p[1][12],[1, 2, 3, 4, 8, 12])
+ # now with weights
+ p=nx.shortest_path(self.cycle,weight='weight')
+ assert_equal(p[0][3],[0,1,2,3])
+ assert_equal(p,nx.all_pairs_dijkstra_path(self.cycle))
+ p=nx.shortest_path(self.grid,weight='weight')
+ assert_equal(p[1][12],[1, 2, 3, 4, 8, 12])
+
+
+ def test_all_pairs_shortest_path_length(self):
+ l=nx.shortest_path_length(self.cycle)
+ assert_equal(l[0],{0:0,1:1,2:2,3:3,4:3,5:2,6:1})
+ assert_equal(l,nx.all_pairs_shortest_path_length(self.cycle))
+ l=nx.shortest_path_length(self.grid)
+ assert_equal(l[1][16],6)
+ # now with weights
+ l=nx.shortest_path_length(self.cycle,weight='weight')
+ assert_equal(l[0],{0:0,1:1,2:2,3:3,4:3,5:2,6:1})
+ assert_equal(l,nx.all_pairs_dijkstra_path_length(self.cycle))
+ l=nx.shortest_path_length(self.grid,weight='weight')
+ assert_equal(l[1][16],6)
+
+ def test_average_shortest_path(self):
+ l=nx.average_shortest_path_length(self.cycle)
+ assert_almost_equal(l,2)
+ l=nx.average_shortest_path_length(nx.path_graph(5))
+ assert_almost_equal(l,2)
+
+
+ def test_weighted_average_shortest_path(self):
+ G=nx.Graph()
+ G.add_cycle(range(7),weight=2)
+ l=nx.average_shortest_path_length(G,weight='weight')
+ assert_almost_equal(l,4)
+ G=nx.Graph()
+ G.add_path(range(5),weight=2)
+ l=nx.average_shortest_path_length(G,weight='weight')
+ assert_almost_equal(l,4)
+
+
+ def test_average_shortest_disconnected(self):
+ g = nx.Graph()
+ g.add_nodes_from(range(3))
+ g.add_edge(0, 1)
+ assert_raises(nx.NetworkXError,nx.average_shortest_path_length,g)
+ g = g.to_directed()
+ assert_raises(nx.NetworkXError,nx.average_shortest_path_length,g)
+
+ def test_has_path(self):
+ G = nx.Graph()
+ G.add_path(range(3))
+ G.add_path(range(3,5))
+ assert_true(nx.has_path(G,0,2))
+ assert_false(nx.has_path(G,0,4))
+
+ def test_all_shortest_paths(self):
+ G = nx.Graph()
+ G.add_path([0,1,2,3])
+ G.add_path([0,10,20,3])
+ assert_equal([[0,1,2,3],[0,10,20,3]],
+ sorted(nx.all_shortest_paths(G,0,3)))
+
+ @raises(nx.NetworkXNoPath)
+ def test_all_shortest_paths_raise(self):
+ G = nx.Graph()
+ G.add_path([0,1,2,3])
+ G.add_node(4)
+ paths = list(nx.all_shortest_paths(G,0,4))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_unweighted.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_unweighted.py
new file mode 100644
index 0000000..fc2abfb
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_unweighted.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+class TestUnweightedPath:
+
+ def setUp(self):
+ from networkx import convert_node_labels_to_integers as cnlti
+ self.grid=cnlti(nx.grid_2d_graph(4,4),first_label=1,ordering="sorted")
+ self.cycle=nx.cycle_graph(7)
+ self.directed_cycle=nx.cycle_graph(7,create_using=nx.DiGraph())
+
+
+ def test_bidirectional_shortest_path(self):
+ assert_equal(nx.bidirectional_shortest_path(self.cycle,0,3),
+ [0, 1, 2, 3])
+ assert_equal(nx.bidirectional_shortest_path(self.cycle,0,4),
+ [0, 6, 5, 4])
+ assert_equal(nx.bidirectional_shortest_path(self.grid,1,12),
+ [1, 2, 3, 4, 8, 12])
+ assert_equal(nx.bidirectional_shortest_path(self.directed_cycle,0,3),
+ [0, 1, 2, 3])
+
+ def test_shortest_path_length(self):
+ assert_equal(nx.shortest_path_length(self.cycle,0,3),3)
+ assert_equal(nx.shortest_path_length(self.grid,1,12),5)
+ assert_equal(nx.shortest_path_length(self.directed_cycle,0,4),4)
+ # now with weights
+ assert_equal(nx.shortest_path_length(self.cycle,0,3,weight=True),3)
+ assert_equal(nx.shortest_path_length(self.grid,1,12,weight=True),5)
+ assert_equal(nx.shortest_path_length(self.directed_cycle,0,4,weight=True),4)
+
+
+ def test_single_source_shortest_path(self):
+ p=nx.single_source_shortest_path(self.cycle,0)
+ assert_equal(p[3],[0,1,2,3])
+ p=nx.single_source_shortest_path(self.cycle,0, cutoff=0)
+ assert_equal(p,{0 : [0]})
+
+ def test_single_source_shortest_path_length(self):
+ assert_equal(nx.single_source_shortest_path_length(self.cycle,0),
+ {0:0,1:1,2:2,3:3,4:3,5:2,6:1})
+
+ def test_all_pairs_shortest_path(self):
+ p=nx.all_pairs_shortest_path(self.cycle)
+ assert_equal(p[0][3],[0,1,2,3])
+ p=nx.all_pairs_shortest_path(self.grid)
+ assert_equal(p[1][12],[1, 2, 3, 4, 8, 12])
+
+ def test_all_pairs_shortest_path_length(self):
+ l=nx.all_pairs_shortest_path_length(self.cycle)
+ assert_equal(l[0],{0:0,1:1,2:2,3:3,4:3,5:2,6:1})
+ l=nx.all_pairs_shortest_path_length(self.grid)
+ assert_equal(l[1][16],6)
+
+ def test_predecessor(self):
+ G=nx.path_graph(4)
+ assert_equal(nx.predecessor(G,0),{0: [], 1: [0], 2: [1], 3: [2]})
+ assert_equal(nx.predecessor(G,0,3),[2])
+ G=nx.grid_2d_graph(2,2)
+ assert_equal(sorted(nx.predecessor(G,(0,0)).items()),
+ [((0, 0), []), ((0, 1), [(0, 0)]),
+ ((1, 0), [(0, 0)]), ((1, 1), [(0, 1), (1, 0)])])
+
+ def test_predecessor_cutoff(self):
+ G=nx.path_graph(4)
+ p = nx.predecessor(G,0,3)
+ assert_false(4 in p)
+
+ def test_predecessor_target(self):
+ G=nx.path_graph(4)
+ p = nx.predecessor(G,0,3)
+ assert_equal(p,[2])
+ p = nx.predecessor(G,0,3,cutoff=2)
+ assert_equal(p,[])
+ p,s = nx.predecessor(G,0,3,return_seen=True)
+ assert_equal(p,[2])
+ assert_equal(s,3)
+ p,s = nx.predecessor(G,0,3,cutoff=2,return_seen=True)
+ assert_equal(p,[])
+ assert_equal(s,-1)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_weighted.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_weighted.py
new file mode 100644
index 0000000..c3998d4
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/tests/test_weighted.py
@@ -0,0 +1,246 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+class TestWeightedPath:
+
+ def setUp(self):
+ from networkx import convert_node_labels_to_integers as cnlti
+ self.grid=cnlti(nx.grid_2d_graph(4,4),first_label=1,ordering="sorted")
+ self.cycle=nx.cycle_graph(7)
+ self.directed_cycle=nx.cycle_graph(7,create_using=nx.DiGraph())
+ self.XG=nx.DiGraph()
+ self.XG.add_weighted_edges_from([('s','u',10) ,('s','x',5) ,
+ ('u','v',1) ,('u','x',2) ,
+ ('v','y',1) ,('x','u',3) ,
+ ('x','v',5) ,('x','y',2) ,
+ ('y','s',7) ,('y','v',6)])
+ self.MXG=nx.MultiDiGraph(self.XG)
+ self.MXG.add_edge('s','u',weight=15)
+ self.XG2=nx.DiGraph()
+ self.XG2.add_weighted_edges_from([[1,4,1],[4,5,1],
+ [5,6,1],[6,3,1],
+ [1,3,50],[1,2,100],[2,3,100]])
+
+ self.XG3=nx.Graph()
+ self.XG3.add_weighted_edges_from([ [0,1,2],[1,2,12],
+ [2,3,1],[3,4,5],
+ [4,5,1],[5,0,10] ])
+
+ self.XG4=nx.Graph()
+ self.XG4.add_weighted_edges_from([ [0,1,2],[1,2,2],
+ [2,3,1],[3,4,1],
+ [4,5,1],[5,6,1],
+ [6,7,1],[7,0,1] ])
+ self.MXG4=nx.MultiGraph(self.XG4)
+ self.MXG4.add_edge(0,1,weight=3)
+ self.G=nx.DiGraph() # no weights
+ self.G.add_edges_from([('s','u'), ('s','x'),
+ ('u','v'), ('u','x'),
+ ('v','y'), ('x','u'),
+ ('x','v'), ('x','y'),
+ ('y','s'), ('y','v')])
+
+ def test_dijkstra(self):
+ (D,P)= nx.single_source_dijkstra(self.XG,'s')
+ assert_equal(P['v'], ['s', 'x', 'u', 'v'])
+ assert_equal(D['v'],9)
+
+ assert_equal(nx.single_source_dijkstra_path(self.XG,'s')['v'],
+ ['s', 'x', 'u', 'v'])
+ assert_equal(nx.single_source_dijkstra_path_length(self.XG,'s')['v'],9)
+
+ assert_equal(nx.single_source_dijkstra(self.XG,'s')[1]['v'],
+ ['s', 'x', 'u', 'v'])
+
+ assert_equal(nx.single_source_dijkstra_path(self.MXG,'s')['v'],
+ ['s', 'x', 'u', 'v'])
+
+ GG=self.XG.to_undirected()
+ # make sure we get lower weight
+ # to_undirected might choose either edge with weight 2 or weight 3
+ GG['u']['x']['weight']=2
+ (D,P)= nx.single_source_dijkstra(GG,'s')
+ assert_equal(P['v'] , ['s', 'x', 'u', 'v'])
+ assert_equal(D['v'],8) # uses lower weight of 2 on u<->x edge
+ assert_equal(nx.dijkstra_path(GG,'s','v'), ['s', 'x', 'u', 'v'])
+ assert_equal(nx.dijkstra_path_length(GG,'s','v'),8)
+
+ assert_equal(nx.dijkstra_path(self.XG2,1,3), [1, 4, 5, 6, 3])
+ assert_equal(nx.dijkstra_path(self.XG3,0,3), [0, 1, 2, 3])
+ assert_equal(nx.dijkstra_path_length(self.XG3,0,3),15)
+ assert_equal(nx.dijkstra_path(self.XG4,0,2), [0, 1, 2])
+ assert_equal(nx.dijkstra_path_length(self.XG4,0,2), 4)
+ assert_equal(nx.dijkstra_path(self.MXG4,0,2), [0, 1, 2])
+ assert_equal(nx.single_source_dijkstra(self.G,'s','v')[1]['v'],
+ ['s', 'u', 'v'])
+ assert_equal(nx.single_source_dijkstra(self.G,'s')[1]['v'],
+ ['s', 'u', 'v'])
+
+ assert_equal(nx.dijkstra_path(self.G,'s','v'), ['s', 'u', 'v'])
+ assert_equal(nx.dijkstra_path_length(self.G,'s','v'), 2)
+
+ # NetworkXError: node s not reachable from moon
+ assert_raises(nx.NetworkXNoPath,nx.dijkstra_path,self.G,'s','moon')
+ assert_raises(nx.NetworkXNoPath,nx.dijkstra_path_length,self.G,'s','moon')
+
+ assert_equal(nx.dijkstra_path(self.cycle,0,3),[0, 1, 2, 3])
+ assert_equal(nx.dijkstra_path(self.cycle,0,4), [0, 6, 5, 4])
+
+ assert_equal(nx.single_source_dijkstra(self.cycle,0,0),({0:0}, {0:[0]}) )
+
+ def test_bidirectional_dijkstra(self):
+ assert_equal(nx.bidirectional_dijkstra(self.XG, 's', 'v'),
+ (9, ['s', 'x', 'u', 'v']))
+ (dist,path) = nx.bidirectional_dijkstra(self.G,'s','v')
+ assert_equal(dist,2)
+ # skip this test, correct path could also be ['s','u','v']
+# assert_equal(nx.bidirectional_dijkstra(self.G,'s','v'),
+# (2, ['s', 'x', 'v']))
+ assert_equal(nx.bidirectional_dijkstra(self.cycle,0,3),
+ (3, [0, 1, 2, 3]))
+ assert_equal(nx.bidirectional_dijkstra(self.cycle,0,4),
+ (3, [0, 6, 5, 4]))
+ assert_equal(nx.bidirectional_dijkstra(self.XG3,0,3),
+ (15, [0, 1, 2, 3]))
+ assert_equal(nx.bidirectional_dijkstra(self.XG4,0,2),
+ (4, [0, 1, 2]))
+
+ # need more tests here
+ assert_equal(nx.dijkstra_path(self.XG,'s','v'),
+ nx.single_source_dijkstra_path(self.XG,'s')['v'])
+
+
+ @raises(nx.NetworkXNoPath)
+ def test_bidirectional_dijkstra_no_path(self):
+ G = nx.Graph()
+ G.add_path([1,2,3])
+ G.add_path([4,5,6])
+ path = nx.bidirectional_dijkstra(G,1,6)
+
+ def test_dijkstra_predecessor(self):
+ G=nx.path_graph(4)
+ assert_equal(nx.dijkstra_predecessor_and_distance(G,0),
+ ({0: [], 1: [0], 2: [1], 3: [2]}, {0: 0, 1: 1, 2: 2, 3: 3}))
+ G=nx.grid_2d_graph(2,2)
+ pred,dist=nx.dijkstra_predecessor_and_distance(G,(0,0))
+ assert_equal(sorted(pred.items()),
+ [((0, 0), []), ((0, 1), [(0, 0)]),
+ ((1, 0), [(0, 0)]), ((1, 1), [(0, 1), (1, 0)])])
+ assert_equal(sorted(dist.items()),
+ [((0, 0), 0), ((0, 1), 1), ((1, 0), 1), ((1, 1), 2)])
+
+ XG=nx.DiGraph()
+ XG.add_weighted_edges_from([('s','u',10) ,('s','x',5) ,
+ ('u','v',1) ,('u','x',2) ,
+ ('v','y',1) ,('x','u',3) ,
+ ('x','v',5) ,('x','y',2) ,
+ ('y','s',7) ,('y','v',6)])
+ (P,D)= nx.dijkstra_predecessor_and_distance(XG,'s')
+ assert_equal(P['v'],['u'])
+ assert_equal(D['v'],9)
+ (P,D)= nx.dijkstra_predecessor_and_distance(XG,'s',cutoff=8)
+ assert_false('v' in D)
+
+ def test_single_source_dijkstra_path_length(self):
+ pl = nx.single_source_dijkstra_path_length
+ assert_equal(pl(self.MXG4,0)[2], 4)
+ spl = pl(self.MXG4,0,cutoff=2)
+ assert_false(2 in spl)
+
+ def test_bidirectional_dijkstra_multigraph(self):
+ G = nx.MultiGraph()
+ G.add_edge('a', 'b', weight=10)
+ G.add_edge('a', 'b', weight=100)
+ dp= nx.bidirectional_dijkstra(G, 'a', 'b')
+ assert_equal(dp,(10, ['a', 'b']))
+
+
+ def test_dijkstra_pred_distance_multigraph(self):
+ G = nx.MultiGraph()
+ G.add_edge('a', 'b', key='short',foo=5, weight=100)
+ G.add_edge('a', 'b', key='long',bar=1, weight=110)
+ p,d= nx.dijkstra_predecessor_and_distance(G, 'a')
+ assert_equal(p,{'a': [], 'b': ['a']})
+ assert_equal(d,{'a': 0, 'b': 100})
+
+ def test_negative_edge_cycle(self):
+ G = nx.cycle_graph(5, create_using = nx.DiGraph())
+ assert_equal(nx.negative_edge_cycle(G), False)
+ G.add_edge(8, 9, weight = -7)
+ G.add_edge(9, 8, weight = 3)
+ assert_equal(nx.negative_edge_cycle(G), True)
+ assert_raises(ValueError,nx.single_source_dijkstra_path_length,G,8)
+ assert_raises(ValueError,nx.single_source_dijkstra,G,8)
+ assert_raises(ValueError,nx.dijkstra_predecessor_and_distance,G,8)
+ G.add_edge(9,10)
+ assert_raises(ValueError,nx.bidirectional_dijkstra,G,8,10)
+
+ def test_bellman_ford(self):
+ # single node graph
+ G = nx.DiGraph()
+ G.add_node(0)
+ assert_equal(nx.bellman_ford(G, 0), ({0: None}, {0: 0}))
+ assert_raises(KeyError, nx.bellman_ford, G, 1)
+
+ # negative weight cycle
+ G = nx.cycle_graph(5, create_using = nx.DiGraph())
+ G.add_edge(1, 2, weight = -7)
+ for i in range(5):
+ assert_raises(nx.NetworkXUnbounded, nx.bellman_ford, G, i)
+ G = nx.cycle_graph(5) # undirected Graph
+ G.add_edge(1, 2, weight = -3)
+ for i in range(5):
+ assert_raises(nx.NetworkXUnbounded, nx.bellman_ford, G, i)
+ # no negative cycle but negative weight
+ G = nx.cycle_graph(5, create_using = nx.DiGraph())
+ G.add_edge(1, 2, weight = -3)
+ assert_equal(nx.bellman_ford(G, 0),
+ ({0: None, 1: 0, 2: 1, 3: 2, 4: 3},
+ {0: 0, 1: 1, 2: -2, 3: -1, 4: 0}))
+
+ # not connected
+ G = nx.complete_graph(6)
+ G.add_edge(10, 11)
+ G.add_edge(10, 12)
+ assert_equal(nx.bellman_ford(G, 0),
+ ({0: None, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0},
+ {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}))
+
+ # not connected, with a component not containing the source that
+ # contains a negative cost cycle.
+ G = nx.complete_graph(6)
+ G.add_edges_from([('A', 'B', {'load': 3}),
+ ('B', 'C', {'load': -10}),
+ ('C', 'A', {'load': 2})])
+ assert_equal(nx.bellman_ford(G, 0, weight = 'load'),
+ ({0: None, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0},
+ {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}))
+
+ # multigraph
+ P, D = nx.bellman_ford(self.MXG,'s')
+ assert_equal(P['v'], 'u')
+ assert_equal(D['v'], 9)
+ P, D = nx.bellman_ford(self.MXG4, 0)
+ assert_equal(P[2], 1)
+ assert_equal(D[2], 4)
+
+ # other tests
+ (P,D)= nx.bellman_ford(self.XG,'s')
+ assert_equal(P['v'], 'u')
+ assert_equal(D['v'], 9)
+
+ G=nx.path_graph(4)
+ assert_equal(nx.bellman_ford(G,0),
+ ({0: None, 1: 0, 2: 1, 3: 2}, {0: 0, 1: 1, 2: 2, 3: 3}))
+ assert_equal(nx.bellman_ford(G, 3),
+ ({0: 1, 1: 2, 2: 3, 3: None}, {0: 3, 1: 2, 2: 1, 3: 0}))
+
+ G=nx.grid_2d_graph(2,2)
+ pred,dist=nx.bellman_ford(G,(0,0))
+ assert_equal(sorted(pred.items()),
+ [((0, 0), None), ((0, 1), (0, 0)),
+ ((1, 0), (0, 0)), ((1, 1), (0, 1))])
+ assert_equal(sorted(dist.items()),
+ [((0, 0), 0), ((0, 1), 1), ((1, 0), 1), ((1, 1), 2)])
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/unweighted.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/unweighted.py
new file mode 100644
index 0000000..b7ce18a
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/unweighted.py
@@ -0,0 +1,359 @@
+# -*- coding: utf-8 -*-
+"""
+Shortest path algorithms for unweighted graphs.
+"""
+__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
+# Copyright (C) 2004-2010 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+__all__ = ['bidirectional_shortest_path',
+ 'single_source_shortest_path',
+ 'single_source_shortest_path_length',
+ 'all_pairs_shortest_path',
+ 'all_pairs_shortest_path_length',
+ 'predecessor']
+
+
+import networkx as nx
+
+def single_source_shortest_path_length(G,source,cutoff=None):
+ """Compute the shortest path lengths from source to all reachable nodes.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : node
+ Starting node for path
+
+ cutoff : integer, optional
+ Depth to stop the search. Only paths of length <= cutoff are returned.
+
+ Returns
+ -------
+ lengths : dictionary
+ Dictionary of shortest path lengths keyed by target.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(5)
+ >>> length=nx.single_source_shortest_path_length(G,0)
+ >>> length[4]
+ 4
+ >>> print(length)
+ {0: 0, 1: 1, 2: 2, 3: 3, 4: 4}
+
+ See Also
+ --------
+ shortest_path_length
+ """
+ seen={} # level (number of hops) when seen in BFS
+ level=0 # the current level
+ nextlevel={source:1} # dict of nodes to check at next level
+ while nextlevel:
+ thislevel=nextlevel # advance to next level
+ nextlevel={} # and start a new list (fringe)
+ for v in thislevel:
+ if v not in seen:
+ seen[v]=level # set the level of vertex v
+ nextlevel.update(G[v]) # add neighbors of v
+ if (cutoff is not None and cutoff <= level): break
+ level=level+1
+ return seen # return all path lengths as dictionary
+
+
+def all_pairs_shortest_path_length(G,cutoff=None):
+ """ Compute the shortest path lengths between all nodes in G.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ cutoff : integer, optional
+ depth to stop the search. Only paths of length <= cutoff are returned.
+
+ Returns
+ -------
+ lengths : dictionary
+ Dictionary of shortest path lengths keyed by source and target.
+
+ Notes
+ -----
+ The dictionary returned only has keys for reachable node pairs.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(5)
+ >>> length=nx.all_pairs_shortest_path_length(G)
+ >>> print(length[1][4])
+ 3
+ >>> length[1]
+ {0: 1, 1: 0, 2: 1, 3: 2, 4: 3}
+
+ """
+ paths={}
+ for n in G:
+ paths[n]=single_source_shortest_path_length(G,n,cutoff=cutoff)
+ return paths
+
+
+
+
+def bidirectional_shortest_path(G,source,target):
+ """Return a list of nodes in a shortest path between source and target.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : node label
+ starting node for path
+
+ target : node label
+ ending node for path
+
+ Returns
+ -------
+ path: list
+ List of nodes in a path from source to target.
+
+ Raises
+ ------
+ NetworkXNoPath
+ If no path exists between source and target.
+
+ See Also
+ --------
+ shortest_path
+
+ Notes
+ -----
+ This algorithm is used by shortest_path(G,source,target).
+ """
+ # call helper to do the real work
+ results=_bidirectional_pred_succ(G,source,target)
+ pred,succ,w=results
+
+ # build path from pred+w+succ
+ path=[]
+ # from w to target
+ while w is not None:
+ path.append(w)
+ w=succ[w]
+ # from source to w
+ w=pred[path[0]]
+ while w is not None:
+ path.insert(0,w)
+ w=pred[w]
+
+ return path
+
+def _bidirectional_pred_succ(G, source, target):
+ """Bidirectional shortest path helper.
+
+ Returns (pred,succ,w) where
+ pred is a dictionary of predecessors from w to the source, and
+ succ is a dictionary of successors from w to the target.
+ """
+ # does BFS from both source and target and meets in the middle
+ if target == source:
+ return ({target:None},{source:None},source)
+
+ # handle either directed or undirected
+ if G.is_directed():
+ Gpred=G.predecessors_iter
+ Gsucc=G.successors_iter
+ else:
+ Gpred=G.neighbors_iter
+ Gsucc=G.neighbors_iter
+
+ # predecesssor and successors in search
+ pred={source:None}
+ succ={target:None}
+
+ # initialize fringes, start with forward
+ forward_fringe=[source]
+ reverse_fringe=[target]
+
+ while forward_fringe and reverse_fringe:
+ if len(forward_fringe) <= len(reverse_fringe):
+ this_level=forward_fringe
+ forward_fringe=[]
+ for v in this_level:
+ for w in Gsucc(v):
+ if w not in pred:
+ forward_fringe.append(w)
+ pred[w]=v
+ if w in succ: return pred,succ,w # found path
+ else:
+ this_level=reverse_fringe
+ reverse_fringe=[]
+ for v in this_level:
+ for w in Gpred(v):
+ if w not in succ:
+ succ[w]=v
+ reverse_fringe.append(w)
+ if w in pred: return pred,succ,w # found path
+
+ raise nx.NetworkXNoPath("No path between %s and %s." % (source, target))
+
+
+def single_source_shortest_path(G,source,cutoff=None):
+ """Compute shortest path between source
+ and all other nodes reachable from source.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : node label
+ Starting node for path
+
+ cutoff : integer, optional
+ Depth to stop the search. Only paths of length <= cutoff are returned.
+
+ Returns
+ -------
+ lengths : dictionary
+ Dictionary, keyed by target, of shortest paths.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(5)
+ >>> path=nx.single_source_shortest_path(G,0)
+ >>> path[4]
+ [0, 1, 2, 3, 4]
+
+ Notes
+ -----
+ The shortest path is not necessarily unique. So there can be multiple
+ paths between the source and each target node, all of which have the
+ same 'shortest' length. For each target node, this function returns
+ only one of those paths.
+
+ See Also
+ --------
+ shortest_path
+ """
+ level=0 # the current level
+ nextlevel={source:1} # list of nodes to check at next level
+ paths={source:[source]} # paths dictionary (paths to key from source)
+ if cutoff==0:
+ return paths
+ while nextlevel:
+ thislevel=nextlevel
+ nextlevel={}
+ for v in thislevel:
+ for w in G[v]:
+ if w not in paths:
+ paths[w]=paths[v]+[w]
+ nextlevel[w]=1
+ level=level+1
+ if (cutoff is not None and cutoff <= level): break
+ return paths
+
+
+def all_pairs_shortest_path(G,cutoff=None):
+ """ Compute shortest paths between all nodes.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ cutoff : integer, optional
+ Depth to stop the search. Only paths of length <= cutoff are returned.
+
+ Returns
+ -------
+ lengths : dictionary
+ Dictionary, keyed by source and target, of shortest paths.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(5)
+ >>> path=nx.all_pairs_shortest_path(G)
+ >>> print(path[0][4])
+ [0, 1, 2, 3, 4]
+
+ See Also
+ --------
+ floyd_warshall()
+
+ """
+ paths={}
+ for n in G:
+ paths[n]=single_source_shortest_path(G,n,cutoff=cutoff)
+ return paths
+
+
+
+
+def predecessor(G,source,target=None,cutoff=None,return_seen=None):
+ """ Returns dictionary of predecessors for the path from source to all nodes in G.
+
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : node label
+ Starting node for path
+
+ target : node label, optional
+ Ending node for path. If provided only predecessors between
+ source and target are returned
+
+ cutoff : integer, optional
+ Depth to stop the search. Only paths of length <= cutoff are returned.
+
+
+ Returns
+ -------
+ pred : dictionary
+ Dictionary, keyed by node, of predecessors in the shortest path.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> print(G.nodes())
+ [0, 1, 2, 3]
+ >>> nx.predecessor(G,0)
+ {0: [], 1: [0], 2: [1], 3: [2]}
+
+ """
+ level=0 # the current level
+ nextlevel=[source] # list of nodes to check at next level
+ seen={source:level} # level (number of hops) when seen in BFS
+ pred={source:[]} # predecessor dictionary
+ while nextlevel:
+ level=level+1
+ thislevel=nextlevel
+ nextlevel=[]
+ for v in thislevel:
+ for w in G[v]:
+ if w not in seen:
+ pred[w]=[v]
+ seen[w]=level
+ nextlevel.append(w)
+ elif (seen[w]==level):# add v to predecessor list if it
+ pred[w].append(v) # is at the correct level
+ if (cutoff and cutoff <= level):
+ break
+
+ if target is not None:
+ if return_seen:
+ if not target in pred: return ([],-1) # No predecessor
+ return (pred[target],seen[target])
+ else:
+ if not target in pred: return [] # No predecessor
+ return pred[target]
+ else:
+ if return_seen:
+ return (pred,seen)
+ else:
+ return pred
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/weighted.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/weighted.py
new file mode 100644
index 0000000..41757f9
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/shortest_paths/weighted.py
@@ -0,0 +1,765 @@
+# -*- coding: utf-8 -*-
+"""
+Shortest path algorithms for weighed graphs.
+"""
+__author__ = """\n""".join(['Aric Hagberg <hagberg@lanl.gov>',
+ 'Loïc Séguin-C. <loicseguin@gmail.com>',
+ 'Dan Schult <dschult@colgate.edu>'])
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+__all__ = ['dijkstra_path',
+ 'dijkstra_path_length',
+ 'bidirectional_dijkstra',
+ 'single_source_dijkstra',
+ 'single_source_dijkstra_path',
+ 'single_source_dijkstra_path_length',
+ 'all_pairs_dijkstra_path',
+ 'all_pairs_dijkstra_path_length',
+ 'dijkstra_predecessor_and_distance',
+ 'bellman_ford','negative_edge_cycle']
+
+import heapq
+import networkx as nx
+from networkx.utils import generate_unique_node
+
+def dijkstra_path(G, source, target, weight='weight'):
+ """Returns the shortest path from source to target in a weighted graph G.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : node
+ Starting node
+
+ target : node
+ Ending node
+
+ weight: string, optional (default='weight')
+ Edge data key corresponding to the edge weight
+
+ Returns
+ -------
+ path : list
+ List of nodes in a shortest path.
+
+ Raises
+ ------
+ NetworkXNoPath
+ If no path exists between source and target.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(5)
+ >>> print(nx.dijkstra_path(G,0,4))
+ [0, 1, 2, 3, 4]
+
+ Notes
+ ------
+ Edge weight attributes must be numerical.
+ Distances are calculated as sums of weighted edges traversed.
+
+ See Also
+ --------
+ bidirectional_dijkstra()
+ """
+ (length,path)=single_source_dijkstra(G, source, target=target,
+ weight=weight)
+ try:
+ return path[target]
+ except KeyError:
+ raise nx.NetworkXNoPath("node %s not reachable from %s"%(source,target))
+
+
+def dijkstra_path_length(G, source, target, weight='weight'):
+ """Returns the shortest path length from source to target
+ in a weighted graph.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : node label
+ starting node for path
+
+ target : node label
+ ending node for path
+
+ weight: string, optional (default='weight')
+ Edge data key corresponding to the edge weight
+
+ Returns
+ -------
+ length : number
+ Shortest path length.
+
+ Raises
+ ------
+ NetworkXNoPath
+ If no path exists between source and target.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(5)
+ >>> print(nx.dijkstra_path_length(G,0,4))
+ 4
+
+ Notes
+ -----
+ Edge weight attributes must be numerical.
+ Distances are calculated as sums of weighted edges traversed.
+
+ See Also
+ --------
+ bidirectional_dijkstra()
+ """
+ length=single_source_dijkstra_path_length(G, source, weight=weight)
+ try:
+ return length[target]
+ except KeyError:
+ raise nx.NetworkXNoPath("node %s not reachable from %s"%(source,target))
+
+
+def single_source_dijkstra_path(G,source, cutoff=None, weight='weight'):
+ """Compute shortest path between source and all other reachable
+ nodes for a weighted graph.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : node
+ Starting node for path.
+
+ weight: string, optional (default='weight')
+ Edge data key corresponding to the edge weight
+
+ cutoff : integer or float, optional
+ Depth to stop the search. Only paths of length <= cutoff are returned.
+
+ Returns
+ -------
+ paths : dictionary
+ Dictionary of shortest path lengths keyed by target.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(5)
+ >>> path=nx.single_source_dijkstra_path(G,0)
+ >>> path[4]
+ [0, 1, 2, 3, 4]
+
+ Notes
+ -----
+ Edge weight attributes must be numerical.
+ Distances are calculated as sums of weighted edges traversed.
+
+ See Also
+ --------
+ single_source_dijkstra()
+
+ """
+ (length,path)=single_source_dijkstra(G,source, cutoff = cutoff, weight = weight)
+ return path
+
+
+def single_source_dijkstra_path_length(G, source, cutoff= None,
+ weight= 'weight'):
+ """Compute the shortest path length between source and all other
+ reachable nodes for a weighted graph.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : node label
+ Starting node for path
+
+ weight: string, optional (default='weight')
+ Edge data key corresponding to the edge weight.
+
+ cutoff : integer or float, optional
+ Depth to stop the search. Only paths of length <= cutoff are returned.
+
+ Returns
+ -------
+ length : dictionary
+ Dictionary of shortest lengths keyed by target.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(5)
+ >>> length=nx.single_source_dijkstra_path_length(G,0)
+ >>> length[4]
+ 4
+ >>> print(length)
+ {0: 0, 1: 1, 2: 2, 3: 3, 4: 4}
+
+ Notes
+ -----
+ Edge weight attributes must be numerical.
+ Distances are calculated as sums of weighted edges traversed.
+
+ See Also
+ --------
+ single_source_dijkstra()
+
+ """
+ dist = {} # dictionary of final distances
+ seen = {source:0}
+ fringe=[] # use heapq with (distance,label) tuples
+ heapq.heappush(fringe,(0,source))
+ while fringe:
+ (d,v)=heapq.heappop(fringe)
+ if v in dist:
+ continue # already searched this node.
+ dist[v] = d
+ #for ignore,w,edgedata in G.edges_iter(v,data=True):
+ #is about 30% slower than the following
+ if G.is_multigraph():
+ edata=[]
+ for w,keydata in G[v].items():
+ minweight=min((dd.get(weight,1)
+ for k,dd in keydata.items()))
+ edata.append((w,{weight:minweight}))
+ else:
+ edata=iter(G[v].items())
+
+ for w,edgedata in edata:
+ vw_dist = dist[v] + edgedata.get(weight,1)
+ if cutoff is not None:
+ if vw_dist>cutoff:
+ continue
+ if w in dist:
+ if vw_dist < dist[w]:
+ raise ValueError('Contradictory paths found:',
+ 'negative weights?')
+ elif w not in seen or vw_dist < seen[w]:
+ seen[w] = vw_dist
+ heapq.heappush(fringe,(vw_dist,w))
+ return dist
+
+
+def single_source_dijkstra(G,source,target=None,cutoff=None,weight='weight'):
+ """Compute shortest paths and lengths in a weighted graph G.
+
+ Uses Dijkstra's algorithm for shortest paths.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : node label
+ Starting node for path
+
+ target : node label, optional
+ Ending node for path
+
+ cutoff : integer or float, optional
+ Depth to stop the search. Only paths of length <= cutoff are returned.
+
+ Returns
+ -------
+ distance,path : dictionaries
+ Returns a tuple of two dictionaries keyed by node.
+ The first dictionary stores distance from the source.
+ The second stores the path from the source to that node.
+
+
+ Examples
+ --------
+ >>> G=nx.path_graph(5)
+ >>> length,path=nx.single_source_dijkstra(G,0)
+ >>> print(length[4])
+ 4
+ >>> print(length)
+ {0: 0, 1: 1, 2: 2, 3: 3, 4: 4}
+ >>> path[4]
+ [0, 1, 2, 3, 4]
+
+ Notes
+ ---------
+ Edge weight attributes must be numerical.
+ Distances are calculated as sums of weighted edges traversed.
+
+ Based on the Python cookbook recipe (119466) at
+ http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/119466
+
+ This algorithm is not guaranteed to work if edge weights
+ are negative or are floating point numbers
+ (overflows and roundoff errors can cause problems).
+
+ See Also
+ --------
+ single_source_dijkstra_path()
+ single_source_dijkstra_path_length()
+ """
+ if source==target:
+ return ({source:0}, {source:[source]})
+ dist = {} # dictionary of final distances
+ paths = {source:[source]} # dictionary of paths
+ seen = {source:0}
+ fringe=[] # use heapq with (distance,label) tuples
+ heapq.heappush(fringe,(0,source))
+ while fringe:
+ (d,v)=heapq.heappop(fringe)
+ if v in dist:
+ continue # already searched this node.
+ dist[v] = d
+ if v == target:
+ break
+ #for ignore,w,edgedata in G.edges_iter(v,data=True):
+ #is about 30% slower than the following
+ if G.is_multigraph():
+ edata=[]
+ for w,keydata in G[v].items():
+ minweight=min((dd.get(weight,1)
+ for k,dd in keydata.items()))
+ edata.append((w,{weight:minweight}))
+ else:
+ edata=iter(G[v].items())
+
+ for w,edgedata in edata:
+ vw_dist = dist[v] + edgedata.get(weight,1)
+ if cutoff is not None:
+ if vw_dist>cutoff:
+ continue
+ if w in dist:
+ if vw_dist < dist[w]:
+ raise ValueError('Contradictory paths found:',
+ 'negative weights?')
+ elif w not in seen or vw_dist < seen[w]:
+ seen[w] = vw_dist
+ heapq.heappush(fringe,(vw_dist,w))
+ paths[w] = paths[v]+[w]
+ return (dist,paths)
+
+
+def dijkstra_predecessor_and_distance(G,source, cutoff=None, weight='weight'):
+ """Compute shortest path length and predecessors on shortest paths
+ in weighted graphs.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : node label
+ Starting node for path
+
+ weight: string, optional (default='weight')
+ Edge data key corresponding to the edge weight
+
+ cutoff : integer or float, optional
+ Depth to stop the search. Only paths of length <= cutoff are returned.
+
+ Returns
+ -------
+ pred,distance : dictionaries
+ Returns two dictionaries representing a list of predecessors
+ of a node and the distance to each node.
+
+ Notes
+ -----
+ Edge weight attributes must be numerical.
+ Distances are calculated as sums of weighted edges traversed.
+
+ The list of predecessors contains more than one element only when
+ there are more than one shortest paths to the key node.
+ """
+ push=heapq.heappush
+ pop=heapq.heappop
+ dist = {} # dictionary of final distances
+ pred = {source:[]} # dictionary of predecessors
+ seen = {source:0}
+ fringe=[] # use heapq with (distance,label) tuples
+ push(fringe,(0,source))
+ while fringe:
+ (d,v)=pop(fringe)
+ if v in dist: continue # already searched this node.
+ dist[v] = d
+ if G.is_multigraph():
+ edata=[]
+ for w,keydata in G[v].items():
+ minweight=min((dd.get(weight,1)
+ for k,dd in keydata.items()))
+ edata.append((w,{weight:minweight}))
+ else:
+ edata=iter(G[v].items())
+ for w,edgedata in edata:
+ vw_dist = dist[v] + edgedata.get(weight,1)
+ if cutoff is not None:
+ if vw_dist>cutoff:
+ continue
+ if w in dist:
+ if vw_dist < dist[w]:
+ raise ValueError('Contradictory paths found:',
+ 'negative weights?')
+ elif w not in seen or vw_dist < seen[w]:
+ seen[w] = vw_dist
+ push(fringe,(vw_dist,w))
+ pred[w] = [v]
+ elif vw_dist==seen[w]:
+ pred[w].append(v)
+ return (pred,dist)
+
+
+def all_pairs_dijkstra_path_length(G, cutoff=None, weight='weight'):
+ """ Compute shortest path lengths between all nodes in a weighted graph.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ weight: string, optional (default='weight')
+ Edge data key corresponding to the edge weight
+
+ cutoff : integer or float, optional
+ Depth to stop the search. Only paths of length <= cutoff are returned.
+
+ Returns
+ -------
+ distance : dictionary
+ Dictionary, keyed by source and target, of shortest path lengths.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(5)
+ >>> length=nx.all_pairs_dijkstra_path_length(G)
+ >>> print(length[1][4])
+ 3
+ >>> length[1]
+ {0: 1, 1: 0, 2: 1, 3: 2, 4: 3}
+
+ Notes
+ -----
+ Edge weight attributes must be numerical.
+ Distances are calculated as sums of weighted edges traversed.
+
+ The dictionary returned only has keys for reachable node pairs.
+ """
+ paths={}
+ for n in G:
+ paths[n]=single_source_dijkstra_path_length(G,n, cutoff=cutoff,
+ weight=weight)
+ return paths
+
+def all_pairs_dijkstra_path(G, cutoff=None, weight='weight'):
+ """ Compute shortest paths between all nodes in a weighted graph.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ weight: string, optional (default='weight')
+ Edge data key corresponding to the edge weight
+
+ cutoff : integer or float, optional
+ Depth to stop the search. Only paths of length <= cutoff are returned.
+
+ Returns
+ -------
+ distance : dictionary
+ Dictionary, keyed by source and target, of shortest paths.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(5)
+ >>> path=nx.all_pairs_dijkstra_path(G)
+ >>> print(path[0][4])
+ [0, 1, 2, 3, 4]
+
+ Notes
+ -----
+ Edge weight attributes must be numerical.
+ Distances are calculated as sums of weighted edges traversed.
+
+ See Also
+ --------
+ floyd_warshall()
+
+ """
+ paths={}
+ for n in G:
+ paths[n]=single_source_dijkstra_path(G, n, cutoff=cutoff,
+ weight=weight)
+ return paths
+
+def bellman_ford(G, source, weight = 'weight'):
+ """Compute shortest path lengths and predecessors on shortest paths
+ in weighted graphs.
+
+ The algorithm has a running time of O(mn) where n is the number of
+ nodes and m is the number of edges. It is slower than Dijkstra but
+ can handle negative edge weights.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ The algorithm works for all types of graphs, including directed
+ graphs and multigraphs.
+
+ source: node label
+ Starting node for path
+
+ weight: string, optional (default='weight')
+ Edge data key corresponding to the edge weight
+
+ Returns
+ -------
+ pred, dist : dictionaries
+ Returns two dictionaries keyed by node to predecessor in the
+ path and to the distance from the source respectively.
+
+ Raises
+ ------
+ NetworkXUnbounded
+ If the (di)graph contains a negative cost (di)cycle, the
+ algorithm raises an exception to indicate the presence of the
+ negative cost (di)cycle. Note: any negative weight edge in an
+ undirected graph is a negative cost cycle.
+
+ Examples
+ --------
+ >>> import networkx as nx
+ >>> G = nx.path_graph(5, create_using = nx.DiGraph())
+ >>> pred, dist = nx.bellman_ford(G, 0)
+ >>> pred
+ {0: None, 1: 0, 2: 1, 3: 2, 4: 3}
+ >>> dist
+ {0: 0, 1: 1, 2: 2, 3: 3, 4: 4}
+
+ >>> from nose.tools import assert_raises
+ >>> G = nx.cycle_graph(5, create_using = nx.DiGraph())
+ >>> G[1][2]['weight'] = -7
+ >>> assert_raises(nx.NetworkXUnbounded, nx.bellman_ford, G, 0)
+
+ Notes
+ -----
+ Edge weight attributes must be numerical.
+ Distances are calculated as sums of weighted edges traversed.
+
+ The dictionaries returned only have keys for nodes reachable from
+ the source.
+
+ In the case where the (di)graph is not connected, if a component
+ not containing the source contains a negative cost (di)cycle, it
+ will not be detected.
+
+ """
+ if source not in G:
+ raise KeyError("Node %s is not found in the graph"%source)
+ numb_nodes = len(G)
+
+ dist = {source: 0}
+ pred = {source: None}
+
+ if numb_nodes == 1:
+ return pred, dist
+
+ if G.is_multigraph():
+ def get_weight(edge_dict):
+ return min([eattr.get(weight,1) for eattr in edge_dict.values()])
+ else:
+ def get_weight(edge_dict):
+ return edge_dict.get(weight,1)
+
+ for i in range(numb_nodes):
+ no_changes=True
+ # Only need edges from nodes in dist b/c all others have dist==inf
+ for u, dist_u in list(dist.items()): # get all edges from nodes in dist
+ for v, edict in G[u].items(): # double loop handles undirected too
+ dist_v = dist_u + get_weight(edict)
+ if v not in dist or dist[v] > dist_v:
+ dist[v] = dist_v
+ pred[v] = u
+ no_changes = False
+ if no_changes:
+ break
+ else:
+ raise nx.NetworkXUnbounded("Negative cost cycle detected.")
+ return pred, dist
+
+def negative_edge_cycle(G, weight = 'weight'):
+ """Return True if there exists a negative edge cycle anywhere in G.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ weight: string, optional (default='weight')
+ Edge data key corresponding to the edge weight
+
+ Returns
+ -------
+ negative_cycle : bool
+ True if a negative edge cycle exists, otherwise False.
+
+ Examples
+ --------
+ >>> import networkx as nx
+ >>> G = nx.cycle_graph(5, create_using = nx.DiGraph())
+ >>> print(nx.negative_edge_cycle(G))
+ False
+ >>> G[1][2]['weight'] = -7
+ >>> print(nx.negative_edge_cycle(G))
+ True
+
+ Notes
+ -----
+ Edge weight attributes must be numerical.
+ Distances are calculated as sums of weighted edges traversed.
+
+ This algorithm uses bellman_ford() but finds negative cycles
+ on any component by first adding a new node connected to
+ every node, and starting bellman_ford on that node. It then
+ removes that extra node.
+ """
+ newnode = generate_unique_node()
+ G.add_edges_from([ (newnode,n) for n in G])
+
+ try:
+ bellman_ford(G, newnode, weight)
+ except nx.NetworkXUnbounded:
+ G.remove_node(newnode)
+ return True
+ G.remove_node(newnode)
+ return False
+
+
+def bidirectional_dijkstra(G, source, target, weight = 'weight'):
+ """Dijkstra's algorithm for shortest paths using bidirectional search.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : node
+ Starting node.
+
+ target : node
+ Ending node.
+
+ weight: string, optional (default='weight')
+ Edge data key corresponding to the edge weight
+
+ Returns
+ -------
+ length : number
+ Shortest path length.
+
+ Returns a tuple of two dictionaries keyed by node.
+ The first dictionary stores distance from the source.
+ The second stores the path from the source to that node.
+
+ Raises
+ ------
+ NetworkXNoPath
+ If no path exists between source and target.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(5)
+ >>> length,path=nx.bidirectional_dijkstra(G,0,4)
+ >>> print(length)
+ 4
+ >>> print(path)
+ [0, 1, 2, 3, 4]
+
+ Notes
+ -----
+ Edge weight attributes must be numerical.
+ Distances are calculated as sums of weighted edges traversed.
+
+ In practice bidirectional Dijkstra is much more than twice as fast as
+ ordinary Dijkstra.
+
+ Ordinary Dijkstra expands nodes in a sphere-like manner from the
+ source. The radius of this sphere will eventually be the length
+ of the shortest path. Bidirectional Dijkstra will expand nodes
+ from both the source and the target, making two spheres of half
+ this radius. Volume of the first sphere is pi*r*r while the
+ others are 2*pi*r/2*r/2, making up half the volume.
+
+ This algorithm is not guaranteed to work if edge weights
+ are negative or are floating point numbers
+ (overflows and roundoff errors can cause problems).
+
+ See Also
+ --------
+ shortest_path
+ shortest_path_length
+ """
+ if source == target: return (0, [source])
+ #Init: Forward Backward
+ dists = [{}, {}]# dictionary of final distances
+ paths = [{source:[source]}, {target:[target]}] # dictionary of paths
+ fringe = [[], []] #heap of (distance, node) tuples for extracting next node to expand
+ seen = [{source:0}, {target:0} ]#dictionary of distances to nodes seen
+ #initialize fringe heap
+ heapq.heappush(fringe[0], (0, source))
+ heapq.heappush(fringe[1], (0, target))
+ #neighs for extracting correct neighbor information
+ if G.is_directed():
+ neighs = [G.successors_iter, G.predecessors_iter]
+ else:
+ neighs = [G.neighbors_iter, G.neighbors_iter]
+ #variables to hold shortest discovered path
+ #finaldist = 1e30000
+ finalpath = []
+ dir = 1
+ while fringe[0] and fringe[1]:
+ # choose direction
+ # dir == 0 is forward direction and dir == 1 is back
+ dir = 1-dir
+ # extract closest to expand
+ (dist, v )= heapq.heappop(fringe[dir])
+ if v in dists[dir]:
+ # Shortest path to v has already been found
+ continue
+ # update distance
+ dists[dir][v] = dist #equal to seen[dir][v]
+ if v in dists[1-dir]:
+ # if we have scanned v in both directions we are done
+ # we have now discovered the shortest path
+ return (finaldist,finalpath)
+
+ for w in neighs[dir](v):
+ if(dir==0): #forward
+ if G.is_multigraph():
+ minweight=min((dd.get(weight,1)
+ for k,dd in G[v][w].items()))
+ else:
+ minweight=G[v][w].get(weight,1)
+ vwLength = dists[dir][v] + minweight #G[v][w].get(weight,1)
+ else: #back, must remember to change v,w->w,v
+ if G.is_multigraph():
+ minweight=min((dd.get(weight,1)
+ for k,dd in G[w][v].items()))
+ else:
+ minweight=G[w][v].get(weight,1)
+ vwLength = dists[dir][v] + minweight #G[w][v].get(weight,1)
+
+ if w in dists[dir]:
+ if vwLength < dists[dir][w]:
+ raise ValueError("Contradictory paths found: negative weights?")
+ elif w not in seen[dir] or vwLength < seen[dir][w]:
+ # relaxing
+ seen[dir][w] = vwLength
+ heapq.heappush(fringe[dir], (vwLength,w))
+ paths[dir][w] = paths[dir][v]+[w]
+ if w in seen[0] and w in seen[1]:
+ #see if this path is better than than the already
+ #discovered shortest path
+ totaldist = seen[0][w] + seen[1][w]
+ if finalpath == [] or finaldist > totaldist:
+ finaldist = totaldist
+ revpath = paths[1][w][:]
+ revpath.reverse()
+ finalpath = paths[0][w] + revpath[1:]
+ raise nx.NetworkXNoPath("No path between %s and %s." % (source, target))
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/simple_paths.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/simple_paths.py
new file mode 100644
index 0000000..f72d4d2
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/simple_paths.py
@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2012 by
+# Sergio Nery Simoes <sergionery@gmail.com>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+__author__ = """\n""".join(['Sérgio Nery Simões <sergionery@gmail.com>',
+ 'Aric Hagberg <aric.hagberg@gmail.com>'])
+__all__ = ['all_simple_paths']
+
+def all_simple_paths(G, source, target, cutoff=None):
+ """Generate all simple paths in the graph G from source to target.
+
+ A simple path is a path with no repeated nodes.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : node
+ Starting node for path
+
+ target : node
+ Ending node for path
+
+ cutoff : integer, optional
+ Depth to stop the search. Only paths of length <= cutoff are returned.
+
+ Returns
+ -------
+ path_generator: generator
+ A generator that produces lists of simple paths. If there are no paths
+ between the source and target within the given cutoff the generator
+ produces no output.
+
+ Examples
+ --------
+ >>> G = nx.complete_graph(4)
+ >>> for path in nx.all_simple_paths(G, source=0, target=3):
+ ... print(path)
+ ...
+ [0, 1, 2, 3]
+ [0, 1, 3]
+ [0, 2, 1, 3]
+ [0, 2, 3]
+ [0, 3]
+ >>> paths = nx.all_simple_paths(G, source=0, target=3, cutoff=2)
+ >>> print(list(paths))
+ [[0, 1, 3], [0, 2, 3], [0, 3]]
+
+ Notes
+ -----
+ This algorithm uses a modified depth-first search to generate the
+ paths [1]_. A single path can be found in `O(V+E)` time but the
+ number of simple paths in a graph can be very large, e.g. `O(n!)` in
+ the complete graph of order n.
+
+ References
+ ----------
+ .. [1] R. Sedgewick, "Algorithms in C, Part 5: Graph Algorithms",
+ Addison Wesley Professional, 3rd ed., 2001.
+
+ See Also
+ --------
+ all_shortest_paths, shortest_path
+ """
+ if source not in G:
+ raise nx.NetworkXError('source node %s not in graph'%source)
+ if target not in G:
+ raise nx.NetworkXError('target node %s not in graph'%target)
+ if cutoff is None:
+ cutoff = len(G)-1
+ if G.is_multigraph():
+ return _all_simple_paths_multigraph(G, source, target, cutoff=cutoff)
+ else:
+ return _all_simple_paths_graph(G, source, target, cutoff=cutoff)
+
+def _all_simple_paths_graph(G, source, target, cutoff=None):
+ if cutoff < 1:
+ return
+ visited = [source]
+ stack = [iter(G[source])]
+ while stack:
+ children = stack[-1]
+ child = next(children, None)
+ if child is None:
+ stack.pop()
+ visited.pop()
+ elif len(visited) < cutoff:
+ if child == target:
+ yield visited + [target]
+ elif child not in visited:
+ visited.append(child)
+ stack.append(iter(G[child]))
+ else: #len(visited) == cutoff:
+ if child == target or target in children:
+ yield visited + [target]
+ stack.pop()
+ visited.pop()
+
+
+def _all_simple_paths_multigraph(G, source, target, cutoff=None):
+ if cutoff < 1:
+ return
+ visited = [source]
+ stack = [(v for u,v in G.edges(source))]
+ while stack:
+ children = stack[-1]
+ child = next(children, None)
+ if child is None:
+ stack.pop()
+ visited.pop()
+ elif len(visited) < cutoff:
+ if child == target:
+ yield visited + [target]
+ elif child not in visited:
+ visited.append(child)
+ stack.append((v for u,v in G.edges(child)))
+ else: #len(visited) == cutoff:
+ count = ([child]+list(children)).count(target)
+ for i in range(count):
+ yield visited + [target]
+ stack.pop()
+ visited.pop()
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/smetric.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/smetric.py
new file mode 100644
index 0000000..0e801bf
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/smetric.py
@@ -0,0 +1,37 @@
+import networkx as nx
+#from networkx.generators.smax import li_smax_graph
+
+def s_metric(G, normalized=True):
+ """Return the s-metric of graph.
+
+ The s-metric is defined as the sum of the products deg(u)*deg(v)
+ for every edge (u,v) in G. If norm is provided construct the
+ s-max graph and compute it's s_metric, and return the normalized
+ s value
+
+ Parameters
+ ----------
+ G : graph
+ The graph used to compute the s-metric.
+ normalized : bool (optional)
+ Normalize the value.
+
+ Returns
+ -------
+ s : float
+ The s-metric of the graph.
+
+ References
+ ----------
+ .. [1] Lun Li, David Alderson, John C. Doyle, and Walter Willinger,
+ Towards a Theory of Scale-Free Graphs:
+ Definition, Properties, and Implications (Extended Version), 2005.
+ http://arxiv.org/abs/cond-mat/0501169
+ """
+ if normalized:
+ raise nx.NetworkXError("Normalization not implemented")
+# Gmax = li_smax_graph(list(G.degree().values()))
+# return s_metric(G,normalized=False)/s_metric(Gmax,normalized=False)
+# else:
+ return float(sum([G.degree(u)*G.degree(v) for (u,v) in G.edges_iter()]))
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/swap.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/swap.py
new file mode 100644
index 0000000..33d882d
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/swap.py
@@ -0,0 +1,185 @@
+# -*- coding: utf-8 -*-
+"""Swap edges in a graph.
+"""
+# Copyright (C) 2004-2012 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import math
+import random
+import networkx as nx
+__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Dan Schult (dschult@colgate.edu)'
+ 'Joel Miller (joel.c.miller.research@gmail.com)'
+ 'Ben Edwards'])
+
+__all__ = ['double_edge_swap',
+ 'connected_double_edge_swap']
+
+
+def double_edge_swap(G, nswap=1, max_tries=100):
+ """Swap two edges in the graph while keeping the node degrees fixed.
+
+ A double-edge swap removes two randomly chosen edges u-v and x-y
+ and creates the new edges u-x and v-y::
+
+ u--v u v
+ becomes | |
+ x--y x y
+
+ If either the edge u-x or v-y already exist no swap is performed
+ and another attempt is made to find a suitable edge pair.
+
+ Parameters
+ ----------
+ G : graph
+ An undirected graph
+
+ nswap : integer (optional, default=1)
+ Number of double-edge swaps to perform
+
+ max_tries : integer (optional)
+ Maximum number of attempts to swap edges
+
+ Returns
+ -------
+ G : graph
+ The graph after double edge swaps.
+
+ Notes
+ -----
+ Does not enforce any connectivity constraints.
+
+ The graph G is modified in place.
+ """
+ if G.is_directed():
+ raise nx.NetworkXError(\
+ "double_edge_swap() not defined for directed graphs.")
+ if nswap>max_tries:
+ raise nx.NetworkXError("Number of swaps > number of tries allowed.")
+ if len(G) < 4:
+ raise nx.NetworkXError("Graph has less than four nodes.")
+ # Instead of choosing uniformly at random from a generated edge list,
+ # this algorithm chooses nonuniformly from the set of nodes with
+ # probability weighted by degree.
+ n=0
+ swapcount=0
+ keys,degrees=zip(*G.degree().items()) # keys, degree
+ cdf=nx.utils.cumulative_distribution(degrees) # cdf of degree
+ while swapcount < nswap:
+# if random.random() < 0.5: continue # trick to avoid periodicities?
+ # pick two random edges without creating edge list
+ # choose source node indices from discrete distribution
+ (ui,xi)=nx.utils.discrete_sequence(2,cdistribution=cdf)
+ if ui==xi:
+ continue # same source, skip
+ u=keys[ui] # convert index to label
+ x=keys[xi]
+ # choose target uniformly from neighbors
+ v=random.choice(list(G[u]))
+ y=random.choice(list(G[x]))
+ if v==y:
+ continue # same target, skip
+ if (x not in G[u]) and (y not in G[v]): # don't create parallel edges
+ G.add_edge(u,x)
+ G.add_edge(v,y)
+ G.remove_edge(u,v)
+ G.remove_edge(x,y)
+ swapcount+=1
+ if n >= max_tries:
+ e=('Maximum number of swap attempts (%s) exceeded '%n +
+ 'before desired swaps achieved (%s).'%nswap)
+ raise nx.NetworkXAlgorithmError(e)
+ n+=1
+ return G
+
+def connected_double_edge_swap(G, nswap=1):
+ """Attempt nswap double-edge swaps in the graph G.
+
+ A double-edge swap removes two randomly chosen edges u-v and x-y
+ and creates the new edges u-x and v-y::
+
+ u--v u v
+ becomes | |
+ x--y x y
+
+ If either the edge u-x or v-y already exist no swap is performed so
+ the actual count of swapped edges is always <= nswap
+
+ Parameters
+ ----------
+ G : graph
+ An undirected graph
+
+ nswap : integer (optional, default=1)
+ Number of double-edge swaps to perform
+
+ Returns
+ -------
+ G : int
+ The number of successful swaps
+
+ Notes
+ -----
+ The initial graph G must be connected, and the resulting graph is connected.
+ The graph G is modified in place.
+
+ References
+ ----------
+ .. [1] C. Gkantsidis and M. Mihail and E. Zegura,
+ The Markov chain simulation method for generating connected
+ power law random graphs, 2003.
+ http://citeseer.ist.psu.edu/gkantsidis03markov.html
+ """
+ import math
+ if not nx.is_connected(G):
+ raise nx.NetworkXError("Graph not connected")
+ if len(G) < 4:
+ raise nx.NetworkXError("Graph has less than four nodes.")
+ n=0
+ swapcount=0
+ deg=G.degree()
+ dk=list(deg.keys()) # Label key for nodes
+ cdf=nx.utils.cumulative_distribution(list(G.degree().values()))
+ window=1
+ while n < nswap:
+ wcount=0
+ swapped=[]
+ while wcount < window and n < nswap:
+ # Pick two random edges without creating edge list
+ # Choose source nodes from discrete degree distribution
+ (ui,xi)=nx.utils.discrete_sequence(2,cdistribution=cdf)
+ if ui==xi:
+ continue # same source, skip
+ u=dk[ui] # convert index to label
+ x=dk[xi]
+ # Choose targets uniformly from neighbors
+ v=random.choice(G.neighbors(u))
+ y=random.choice(G.neighbors(x)) #
+ if v==y: continue # same target, skip
+ if (not G.has_edge(u,x)) and (not G.has_edge(v,y)):
+ G.remove_edge(u,v)
+ G.remove_edge(x,y)
+ G.add_edge(u,x)
+ G.add_edge(v,y)
+ swapped.append((u,v,x,y))
+ swapcount+=1
+ n+=1
+ wcount+=1
+ if nx.is_connected(G):
+ window+=1
+ else:
+ # not connected, undo changes from previous window, decrease window
+ while swapped:
+ (u,v,x,y)=swapped.pop()
+ G.add_edge(u,v)
+ G.add_edge(x,y)
+ G.remove_edge(u,x)
+ G.remove_edge(v,y)
+ swapcount-=1
+ window = int(math.ceil(float(window)/2))
+ return swapcount
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_block.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_block.py
new file mode 100644
index 0000000..281e385
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_block.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx
+
+class TestBlock:
+
+ def test_path(self):
+ G=networkx.path_graph(6)
+ partition=[[0,1],[2,3],[4,5]]
+ M=networkx.blockmodel(G,partition)
+ assert_equal(sorted(M.nodes()),[0,1,2])
+ assert_equal(sorted(M.edges()),[(0,1),(1,2)])
+ for n in M.nodes():
+ assert_equal(M.node[n]['nedges'],1)
+ assert_equal(M.node[n]['nnodes'],2)
+ assert_equal(M.node[n]['density'],1.0)
+
+ def test_multigraph_path(self):
+ G=networkx.MultiGraph(networkx.path_graph(6))
+ partition=[[0,1],[2,3],[4,5]]
+ M=networkx.blockmodel(G,partition,multigraph=True)
+ assert_equal(sorted(M.nodes()),[0,1,2])
+ assert_equal(sorted(M.edges()),[(0,1),(1,2)])
+ for n in M.nodes():
+ assert_equal(M.node[n]['nedges'],1)
+ assert_equal(M.node[n]['nnodes'],2)
+ assert_equal(M.node[n]['density'],1.0)
+
+ def test_directed_path(self):
+ G = networkx.DiGraph()
+ G.add_path(list(range(6)))
+ partition=[[0,1],[2,3],[4,5]]
+ M=networkx.blockmodel(G,partition)
+ assert_equal(sorted(M.nodes()),[0,1,2])
+ assert_equal(sorted(M.edges()),[(0,1),(1,2)])
+ for n in M.nodes():
+ assert_equal(M.node[n]['nedges'],1)
+ assert_equal(M.node[n]['nnodes'],2)
+ assert_equal(M.node[n]['density'],0.5)
+
+ def test_directed_multigraph_path(self):
+ G = networkx.MultiDiGraph()
+ G.add_path(list(range(6)))
+ partition=[[0,1],[2,3],[4,5]]
+ M=networkx.blockmodel(G,partition,multigraph=True)
+ assert_equal(sorted(M.nodes()),[0,1,2])
+ assert_equal(sorted(M.edges()),[(0,1),(1,2)])
+ for n in M.nodes():
+ assert_equal(M.node[n]['nedges'],1)
+ assert_equal(M.node[n]['nnodes'],2)
+ assert_equal(M.node[n]['density'],0.5)
+
+ @raises(networkx.NetworkXException)
+ def test_overlapping(self):
+ G=networkx.path_graph(6)
+ partition=[[0,1,2],[2,3],[4,5]]
+ M=networkx.blockmodel(G,partition)
+
+ def test_weighted_path(self):
+ G=networkx.path_graph(6)
+ G[0][1]['weight']=1
+ G[1][2]['weight']=2
+ G[2][3]['weight']=3
+ G[3][4]['weight']=4
+ G[4][5]['weight']=5
+ partition=[[0,1],[2,3],[4,5]]
+ M=networkx.blockmodel(G,partition)
+ assert_equal(sorted(M.nodes()),[0,1,2])
+ assert_equal(sorted(M.edges()),[(0,1),(1,2)])
+ assert_equal(M[0][1]['weight'],2)
+ assert_equal(M[1][2]['weight'],4)
+ for n in M.nodes():
+ assert_equal(M.node[n]['nedges'],1)
+ assert_equal(M.node[n]['nnodes'],2)
+ assert_equal(M.node[n]['density'],1.0)
+
+
+ def test_barbell(self):
+ G=networkx.barbell_graph(3,0)
+ partition=[[0,1,2],[3,4,5]]
+ M=networkx.blockmodel(G,partition)
+ assert_equal(sorted(M.nodes()),[0,1])
+ assert_equal(sorted(M.edges()),[(0,1)])
+ for n in M.nodes():
+ assert_equal(M.node[n]['nedges'],3)
+ assert_equal(M.node[n]['nnodes'],3)
+ assert_equal(M.node[n]['density'],1.0)
+
+ def test_barbell_plus(self):
+ G=networkx.barbell_graph(3,0)
+ G.add_edge(0,5) # add extra edge between bells
+ partition=[[0,1,2],[3,4,5]]
+ M=networkx.blockmodel(G,partition)
+ assert_equal(sorted(M.nodes()),[0,1])
+ assert_equal(sorted(M.edges()),[(0,1)])
+ assert_equal(M[0][1]['weight'],2)
+ for n in M.nodes():
+ assert_equal(M.node[n]['nedges'],3)
+ assert_equal(M.node[n]['nnodes'],3)
+ assert_equal(M.node[n]['density'],1.0)
+
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_boundary.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_boundary.py
new file mode 100644
index 0000000..2b9e714
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_boundary.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+from networkx import convert_node_labels_to_integers as cnlti
+
+class TestBoundary:
+
+ def setUp(self):
+ self.null=nx.null_graph()
+ self.P10=cnlti(nx.path_graph(10),first_label=1)
+ self.K10=cnlti(nx.complete_graph(10),first_label=1)
+
+ def test_null_node_boundary(self):
+ """null graph has empty node boundaries"""
+ null=self.null
+ assert_equal(nx.node_boundary(null,[]),[])
+ assert_equal(nx.node_boundary(null,[],[]),[])
+ assert_equal(nx.node_boundary(null,[1,2,3]),[])
+ assert_equal(nx.node_boundary(null,[1,2,3],[4,5,6]),[])
+ assert_equal(nx.node_boundary(null,[1,2,3],[3,4,5]),[])
+
+ def test_null_edge_boundary(self):
+ """null graph has empty edge boundaries"""
+ null=self.null
+ assert_equal(nx.edge_boundary(null,[]),[])
+ assert_equal(nx.edge_boundary(null,[],[]),[])
+ assert_equal(nx.edge_boundary(null,[1,2,3]),[])
+ assert_equal(nx.edge_boundary(null,[1,2,3],[4,5,6]),[])
+ assert_equal(nx.edge_boundary(null,[1,2,3],[3,4,5]),[])
+
+ def test_path_node_boundary(self):
+ """Check node boundaries in path graph."""
+ P10=self.P10
+ assert_equal(nx.node_boundary(P10,[]),[])
+ assert_equal(nx.node_boundary(P10,[],[]),[])
+ assert_equal(nx.node_boundary(P10,[1,2,3]),[4])
+ assert_equal(sorted(nx.node_boundary(P10,[4,5,6])),[3, 7])
+ assert_equal(sorted(nx.node_boundary(P10,[3,4,5,6,7])),[2, 8])
+ assert_equal(nx.node_boundary(P10,[8,9,10]),[7])
+ assert_equal(sorted(nx.node_boundary(P10,[4,5,6],[9,10])),[])
+
+ def test_path_edge_boundary(self):
+ """Check edge boundaries in path graph."""
+ P10=self.P10
+
+ assert_equal(nx.edge_boundary(P10,[]),[])
+ assert_equal(nx.edge_boundary(P10,[],[]),[])
+ assert_equal(nx.edge_boundary(P10,[1,2,3]),[(3, 4)])
+ assert_equal(sorted(nx.edge_boundary(P10,[4,5,6])),[(4, 3), (6, 7)])
+ assert_equal(sorted(nx.edge_boundary(P10,[3,4,5,6,7])),[(3, 2), (7, 8)])
+ assert_equal(nx.edge_boundary(P10,[8,9,10]),[(8, 7)])
+ assert_equal(sorted(nx.edge_boundary(P10,[4,5,6],[9,10])),[])
+ assert_equal(nx.edge_boundary(P10,[1,2,3],[3,4,5]) ,[(2, 3), (3, 4)])
+
+
+ def test_k10_node_boundary(self):
+ """Check node boundaries in K10"""
+ K10=self.K10
+
+ assert_equal(nx.node_boundary(K10,[]),[])
+ assert_equal(nx.node_boundary(K10,[],[]),[])
+ assert_equal(sorted(nx.node_boundary(K10,[1,2,3])),
+ [4, 5, 6, 7, 8, 9, 10])
+ assert_equal(sorted(nx.node_boundary(K10,[4,5,6])),
+ [1, 2, 3, 7, 8, 9, 10])
+ assert_equal(sorted(nx.node_boundary(K10,[3,4,5,6,7])),
+ [1, 2, 8, 9, 10])
+ assert_equal(nx.node_boundary(K10,[4,5,6],[]),[])
+ assert_equal(nx.node_boundary(K10,K10),[])
+ assert_equal(nx.node_boundary(K10,[1,2,3],[3,4,5]),[4, 5])
+
+ def test_k10_edge_boundary(self):
+ """Check edge boundaries in K10"""
+ K10=self.K10
+
+ assert_equal(nx.edge_boundary(K10,[]),[])
+ assert_equal(nx.edge_boundary(K10,[],[]),[])
+ assert_equal(len(nx.edge_boundary(K10,[1,2,3])),21)
+ assert_equal(len(nx.edge_boundary(K10,[4,5,6,7])),24)
+ assert_equal(len(nx.edge_boundary(K10,[3,4,5,6,7])),25)
+ assert_equal(len(nx.edge_boundary(K10,[8,9,10])),21)
+ assert_equal(sorted(nx.edge_boundary(K10,[4,5,6],[9,10])),
+ [(4, 9), (4, 10), (5, 9), (5, 10), (6, 9), (6, 10)])
+ assert_equal(nx.edge_boundary(K10,[1,2,3],[3,4,5]),
+ [(1, 3), (1, 4), (1, 5), (2, 3), (2, 4),
+ (2, 5), (3, 4), (3, 5)])
+
+
+ def test_petersen(self):
+ """Check boundaries in the petersen graph
+
+ cheeger(G,k)=min(|bdy(S)|/|S| for |S|=k, 0<k<=|V(G)|/2)
+ """
+ from random import sample
+ P=nx.petersen_graph()
+ def cheeger(G,k):
+ return min([float(len(nx.node_boundary(G,sample(G.nodes(),k))))/k
+ for n in range(100)])
+
+ assert_almost_equals(cheeger(P,1),3.00,places=2)
+ assert_almost_equals(cheeger(P,2),2.00,places=2)
+ assert_almost_equals(cheeger(P,3),1.67,places=2)
+ assert_almost_equals(cheeger(P,4),1.00,places=2)
+ assert_almost_equals(cheeger(P,5),0.80,places=2)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_clique.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_clique.py
new file mode 100644
index 0000000..892a67d
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_clique.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+from networkx import convert_node_labels_to_integers as cnlti
+
+class TestCliques:
+
+ def setUp(self):
+ z=[3,4,3,4,2,4,2,1,1,1,1]
+ self.G=cnlti(nx.generators.havel_hakimi_graph(z),first_label=1)
+ self.cl=list(nx.find_cliques(self.G))
+ H=nx.complete_graph(6)
+ H=nx.relabel_nodes(H,dict( [(i,i+1) for i in range(6)]))
+ H.remove_edges_from([(2,6),(2,5),(2,4),(1,3),(5,3)])
+ self.H=H
+
+ def test_find_cliques1(self):
+ cl=list(nx.find_cliques(self.G))
+ rcl=nx.find_cliques_recursive(self.G)
+ assert_equal(sorted(map(sorted,cl)), sorted(map(sorted,rcl)))
+ assert_equal(cl,
+ [[2, 6, 1, 3], [2, 6, 4], [5, 4, 7], [8, 9], [10, 11]])
+
+ def test_selfloops(self):
+ self.G.add_edge(1,1)
+ cl=list(nx.find_cliques(self.G))
+ rcl=nx.find_cliques_recursive(self.G)
+ assert_equal(sorted(map(sorted,cl)), sorted(map(sorted,rcl)))
+ assert_equal(cl,
+ [[2, 6, 1, 3], [2, 6, 4], [5, 4, 7], [8, 9], [10, 11]])
+
+ def test_find_cliques2(self):
+ hcl=list(nx.find_cliques(self.H))
+ assert_equal(sorted(map(sorted,hcl)),
+ [[1, 2], [1, 4, 5, 6], [2, 3], [3, 4, 6]])
+
+ def test_clique_number(self):
+ G=self.G
+ assert_equal(nx.graph_clique_number(G),4)
+ assert_equal(nx.graph_clique_number(G,cliques=self.cl),4)
+
+ def test_number_of_cliques(self):
+ G=self.G
+ assert_equal(nx.graph_number_of_cliques(G),5)
+ assert_equal(nx.graph_number_of_cliques(G,cliques=self.cl),5)
+ assert_equal(nx.number_of_cliques(G,1),1)
+ assert_equal(list(nx.number_of_cliques(G,[1]).values()),[1])
+ assert_equal(list(nx.number_of_cliques(G,[1,2]).values()),[1, 2])
+ assert_equal(nx.number_of_cliques(G,[1,2]),{1: 1, 2: 2})
+ assert_equal(nx.number_of_cliques(G,2),2)
+ assert_equal(nx.number_of_cliques(G),
+ {1: 1, 2: 2, 3: 1, 4: 2, 5: 1,
+ 6: 2, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1})
+ assert_equal(nx.number_of_cliques(G,nodes=G.nodes()),
+ {1: 1, 2: 2, 3: 1, 4: 2, 5: 1,
+ 6: 2, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1})
+ assert_equal(nx.number_of_cliques(G,nodes=[2,3,4]),
+ {2: 2, 3: 1, 4: 2})
+ assert_equal(nx.number_of_cliques(G,cliques=self.cl),
+ {1: 1, 2: 2, 3: 1, 4: 2, 5: 1,
+ 6: 2, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1})
+ assert_equal(nx.number_of_cliques(G,G.nodes(),cliques=self.cl),
+ {1: 1, 2: 2, 3: 1, 4: 2, 5: 1,
+ 6: 2, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1})
+
+
+
+ def test_node_clique_number(self):
+ G=self.G
+ assert_equal(nx.node_clique_number(G,1),4)
+ assert_equal(list(nx.node_clique_number(G,[1]).values()),[4])
+ assert_equal(list(nx.node_clique_number(G,[1,2]).values()),[4, 4])
+ assert_equal(nx.node_clique_number(G,[1,2]),{1: 4, 2: 4})
+ assert_equal(nx.node_clique_number(G,1),4)
+ assert_equal(nx.node_clique_number(G),
+ {1: 4, 2: 4, 3: 4, 4: 3, 5: 3, 6: 4,
+ 7: 3, 8: 2, 9: 2, 10: 2, 11: 2})
+ assert_equal(nx.node_clique_number(G,cliques=self.cl),
+ {1: 4, 2: 4, 3: 4, 4: 3, 5: 3, 6: 4,
+ 7: 3, 8: 2, 9: 2, 10: 2, 11: 2})
+
+ def test_cliques_containing_node(self):
+ G=self.G
+ assert_equal(nx.cliques_containing_node(G,1),
+ [[2, 6, 1, 3]])
+ assert_equal(list(nx.cliques_containing_node(G,[1]).values()),
+ [[[2, 6, 1, 3]]])
+ assert_equal(list(nx.cliques_containing_node(G,[1,2]).values()),
+ [[[2, 6, 1, 3]], [[2, 6, 1, 3], [2, 6, 4]]])
+ assert_equal(nx.cliques_containing_node(G,[1,2]),
+ {1: [[2, 6, 1, 3]], 2: [[2, 6, 1, 3], [2, 6, 4]]})
+ assert_equal(nx.cliques_containing_node(G,1),
+ [[2, 6, 1, 3]])
+ assert_equal(nx.cliques_containing_node(G,2),
+ [[2, 6, 1, 3], [2, 6, 4]])
+ assert_equal(nx.cliques_containing_node(G,2,cliques=self.cl),
+ [[2, 6, 1, 3], [2, 6, 4]])
+ assert_equal(len(nx.cliques_containing_node(G)),11)
+
+ def test_make_clique_bipartite(self):
+ G=self.G
+ B=nx.make_clique_bipartite(G)
+ assert_equal(sorted(B.nodes()),
+ [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
+ H=nx.project_down(B)
+ assert_equal(H.adj,G.adj)
+ H1=nx.project_up(B)
+ assert_equal(H1.nodes(),[1, 2, 3, 4, 5])
+ H2=nx.make_max_clique_graph(G)
+ assert_equal(H1.adj,H2.adj)
+
+ @raises(nx.NetworkXNotImplemented)
+ def test_directed(self):
+ cliques=nx.find_cliques(nx.DiGraph())
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_cluster.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_cluster.py
new file mode 100644
index 0000000..19c00ae
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_cluster.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+class TestTriangles:
+
+ def test_empty(self):
+ G = nx.Graph()
+ assert_equal(list(nx.triangles(G).values()),[])
+
+ def test_path(self):
+ G = nx.path_graph(10)
+ assert_equal(list(nx.triangles(G).values()),
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+ assert_equal(nx.triangles(G),
+ {0: 0, 1: 0, 2: 0, 3: 0, 4: 0,
+ 5: 0, 6: 0, 7: 0, 8: 0, 9: 0})
+
+ def test_cubical(self):
+ G = nx.cubical_graph()
+ assert_equal(list(nx.triangles(G).values()),
+ [0, 0, 0, 0, 0, 0, 0, 0])
+ assert_equal(nx.triangles(G,1),0)
+ assert_equal(list(nx.triangles(G,[1,2]).values()),[0, 0])
+ assert_equal(nx.triangles(G,1),0)
+ assert_equal(nx.triangles(G,[1,2]),{1: 0, 2: 0})
+
+ def test_k5(self):
+ G = nx.complete_graph(5)
+ assert_equal(list(nx.triangles(G).values()),[6, 6, 6, 6, 6])
+ assert_equal(sum(nx.triangles(G).values())/3.0,10)
+ assert_equal(nx.triangles(G,1),6)
+ G.remove_edge(1,2)
+ assert_equal(list(nx.triangles(G).values()),[5, 3, 3, 5, 5])
+ assert_equal(nx.triangles(G,1),3)
+
+
+class TestWeightedClustering:
+
+ def test_clustering(self):
+ G = nx.Graph()
+ assert_equal(list(nx.clustering(G,weight='weight').values()),[])
+ assert_equal(nx.clustering(G),{})
+
+ def test_path(self):
+ G = nx.path_graph(10)
+ assert_equal(list(nx.clustering(G,weight='weight').values()),
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
+ assert_equal(nx.clustering(G,weight='weight'),
+ {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0,
+ 5: 0.0, 6: 0.0, 7: 0.0, 8: 0.0, 9: 0.0})
+
+ def test_cubical(self):
+ G = nx.cubical_graph()
+ assert_equal(list(nx.clustering(G,weight='weight').values()),
+ [0, 0, 0, 0, 0, 0, 0, 0])
+ assert_equal(nx.clustering(G,1),0)
+ assert_equal(list(nx.clustering(G,[1,2],weight='weight').values()),[0, 0])
+ assert_equal(nx.clustering(G,1,weight='weight'),0)
+ assert_equal(nx.clustering(G,[1,2],weight='weight'),{1: 0, 2: 0})
+
+ def test_k5(self):
+ G = nx.complete_graph(5)
+ assert_equal(list(nx.clustering(G,weight='weight').values()),[1, 1, 1, 1, 1])
+ assert_equal(nx.average_clustering(G,weight='weight'),1)
+ G.remove_edge(1,2)
+ assert_equal(list(nx.clustering(G,weight='weight').values()),
+ [5./6., 1.0, 1.0, 5./6., 5./6.])
+ assert_equal(nx.clustering(G,[1,4],weight='weight'),{1: 1.0, 4: 0.83333333333333337})
+
+
+ def test_triangle_and_edge(self):
+ G=nx.Graph()
+ G.add_cycle([0,1,2])
+ G.add_edge(0,4,weight=2)
+ assert_equal(nx.clustering(G)[0],1.0/3.0)
+ assert_equal(nx.clustering(G,weight='weight')[0],1.0/6.0)
+
+class TestClustering:
+
+ def test_clustering(self):
+ G = nx.Graph()
+ assert_equal(list(nx.clustering(G).values()),[])
+ assert_equal(nx.clustering(G),{})
+
+ def test_path(self):
+ G = nx.path_graph(10)
+ assert_equal(list(nx.clustering(G).values()),
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
+ assert_equal(nx.clustering(G),
+ {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0,
+ 5: 0.0, 6: 0.0, 7: 0.0, 8: 0.0, 9: 0.0})
+
+ def test_cubical(self):
+ G = nx.cubical_graph()
+ assert_equal(list(nx.clustering(G).values()),
+ [0, 0, 0, 0, 0, 0, 0, 0])
+ assert_equal(nx.clustering(G,1),0)
+ assert_equal(list(nx.clustering(G,[1,2]).values()),[0, 0])
+ assert_equal(nx.clustering(G,1),0)
+ assert_equal(nx.clustering(G,[1,2]),{1: 0, 2: 0})
+
+ def test_k5(self):
+ G = nx.complete_graph(5)
+ assert_equal(list(nx.clustering(G).values()),[1, 1, 1, 1, 1])
+ assert_equal(nx.average_clustering(G),1)
+ G.remove_edge(1,2)
+ assert_equal(list(nx.clustering(G).values()),
+ [5./6., 1.0, 1.0, 5./6., 5./6.])
+ assert_equal(nx.clustering(G,[1,4]),{1: 1.0, 4: 0.83333333333333337})
+
+
+
+class TestTransitivity:
+
+ def test_transitivity(self):
+ G = nx.Graph()
+ assert_equal(nx.transitivity(G),0.0)
+
+ def test_path(self):
+ G = nx.path_graph(10)
+ assert_equal(nx.transitivity(G),0.0)
+
+ def test_cubical(self):
+ G = nx.cubical_graph()
+ assert_equal(nx.transitivity(G),0.0)
+
+ def test_k5(self):
+ G = nx.complete_graph(5)
+ assert_equal(nx.transitivity(G),1.0)
+ G.remove_edge(1,2)
+ assert_equal(nx.transitivity(G),0.875)
+
+ # def test_clustering_transitivity(self):
+ # # check that weighted average of clustering is transitivity
+ # G = nx.complete_graph(5)
+ # G.remove_edge(1,2)
+ # t1=nx.transitivity(G)
+ # (cluster_d2,weights)=nx.clustering(G,weights=True)
+ # trans=[]
+ # for v in G.nodes():
+ # trans.append(cluster_d2[v]*weights[v])
+ # t2=sum(trans)
+ # assert_almost_equal(abs(t1-t2),0)
+
+class TestSquareClustering:
+
+ def test_clustering(self):
+ G = nx.Graph()
+ assert_equal(list(nx.square_clustering(G).values()),[])
+ assert_equal(nx.square_clustering(G),{})
+
+ def test_path(self):
+ G = nx.path_graph(10)
+ assert_equal(list(nx.square_clustering(G).values()),
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
+ assert_equal(nx.square_clustering(G),
+ {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0,
+ 5: 0.0, 6: 0.0, 7: 0.0, 8: 0.0, 9: 0.0})
+
+ def test_cubical(self):
+ G = nx.cubical_graph()
+ assert_equal(list(nx.square_clustering(G).values()),
+ [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5])
+ assert_equal(list(nx.square_clustering(G,[1,2]).values()),[0.5, 0.5])
+ assert_equal(nx.square_clustering(G,[1])[1],0.5)
+ assert_equal(nx.square_clustering(G,[1,2]),{1: 0.5, 2: 0.5})
+
+ def test_k5(self):
+ G = nx.complete_graph(5)
+ assert_equal(list(nx.square_clustering(G).values()),[1, 1, 1, 1, 1])
+
+ def test_bipartite_k5(self):
+ G = nx.complete_bipartite_graph(5,5)
+ assert_equal(list(nx.square_clustering(G).values()),
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
+
+ def test_lind_square_clustering(self):
+ """Test C4 for figure 1 Lind et al (2005)"""
+ G = nx.Graph([(1,2),(1,3),(1,6),(1,7),(2,4),(2,5),
+ (3,4),(3,5),(6,7),(7,8),(6,8),(7,9),
+ (7,10),(6,11),(6,12),(2,13),(2,14),(3,15),(3,16)])
+ G1 = G.subgraph([1,2,3,4,5,13,14,15,16])
+ G2 = G.subgraph([1,6,7,8,9,10,11,12])
+ assert_equal(nx.square_clustering(G, [1])[1], 3/75.0)
+ assert_equal(nx.square_clustering(G1, [1])[1], 2/6.0)
+ assert_equal(nx.square_clustering(G2, [1])[1], 1/5.0)
+
+
+def test_average_clustering():
+ G=nx.cycle_graph(3)
+ G.add_edge(2,3)
+ assert_equal(nx.average_clustering(G),(1+1+1/3.0)/4.0)
+ assert_equal(nx.average_clustering(G,count_zeros=True),(1+1+1/3.0)/4.0)
+ assert_equal(nx.average_clustering(G,count_zeros=False),(1+1+1/3.0)/3.0)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_core.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_core.py
new file mode 100644
index 0000000..48399ae
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_core.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+class TestCore:
+
+ def setUp(self):
+ # G is the example graph in Figure 1 from Batagelj and
+ # Zaversnik's paper titled An O(m) Algorithm for Cores
+ # Decomposition of Networks, 2003,
+ # http://arXiv.org/abs/cs/0310049. With nodes labeled as
+ # shown, the 3-core is given by nodes 1-8, the 2-core by nodes
+ # 9-16, the 1-core by nodes 17-20 and node 21 is in the
+ # 0-core.
+ t1=nx.convert_node_labels_to_integers(nx.tetrahedral_graph(),1)
+ t2=nx.convert_node_labels_to_integers(t1,5)
+ G=nx.union(t1,t2)
+ G.add_edges_from( [(3,7), (2,11), (11,5), (11,12), (5,12), (12,19),
+ (12,18), (3,9), (7,9), (7,10), (9,10), (9,20),
+ (17,13), (13,14), (14,15), (15,16), (16,13)])
+ G.add_node(21)
+ self.G=G
+
+ # Create the graph H resulting from the degree sequence
+ # [0,1,2,2,2,2,3] when using the Havel-Hakimi algorithm.
+
+ degseq=[0,1,2,2,2,2,3]
+ H = nx.havel_hakimi_graph(degseq)
+ mapping = {6:0, 0:1, 4:3, 5:6, 3:4, 1:2, 2:5 }
+ self.H = nx.relabel_nodes(H, mapping)
+
+ def test_trivial(self):
+ """Empty graph"""
+ G = nx.Graph()
+ assert_equal(nx.find_cores(G),{})
+
+ def test_find_cores(self):
+ cores=nx.find_cores(self.G)
+ nodes_by_core=[]
+ for val in [0,1,2,3]:
+ nodes_by_core.append( sorted([k for k in cores if cores[k]==val]))
+ assert_equal(nodes_by_core[0],[21])
+ assert_equal(nodes_by_core[1],[17, 18, 19, 20])
+ assert_equal(nodes_by_core[2],[9, 10, 11, 12, 13, 14, 15, 16])
+ assert_equal(nodes_by_core[3], [1, 2, 3, 4, 5, 6, 7, 8])
+
+ def test_core_number(self):
+ # smoke test real name
+ cores=nx.core_number(self.G)
+
+ def test_find_cores2(self):
+ cores=nx.find_cores(self.H)
+ nodes_by_core=[]
+ for val in [0,1,2]:
+ nodes_by_core.append( sorted([k for k in cores if cores[k]==val]))
+ assert_equal(nodes_by_core[0],[0])
+ assert_equal(nodes_by_core[1],[1, 3])
+ assert_equal(nodes_by_core[2],[2, 4, 5, 6])
+
+ def test_main_core(self):
+ main_core_subgraph=nx.k_core(self.H)
+ assert_equal(sorted(main_core_subgraph.nodes()),[2,4,5,6])
+
+ def test_k_core(self):
+ # k=0
+ k_core_subgraph=nx.k_core(self.H,k=0)
+ assert_equal(sorted(k_core_subgraph.nodes()),sorted(self.H.nodes()))
+ # k=1
+ k_core_subgraph=nx.k_core(self.H,k=1)
+ assert_equal(sorted(k_core_subgraph.nodes()),[1,2,3,4,5,6])
+ # k=2
+ k_core_subgraph=nx.k_core(self.H,k=2)
+ assert_equal(sorted(k_core_subgraph.nodes()),[2,4,5,6])
+
+ def test_main_crust(self):
+ main_crust_subgraph=nx.k_crust(self.H)
+ assert_equal(sorted(main_crust_subgraph.nodes()),[0,1,3])
+
+ def test_k_crust(self):
+ # k=0
+ k_crust_subgraph=nx.k_crust(self.H,k=2)
+ assert_equal(sorted(k_crust_subgraph.nodes()),sorted(self.H.nodes()))
+ # k=1
+ k_crust_subgraph=nx.k_crust(self.H,k=1)
+ assert_equal(sorted(k_crust_subgraph.nodes()),[0,1,3])
+ # k=2
+ k_crust_subgraph=nx.k_crust(self.H,k=0)
+ assert_equal(sorted(k_crust_subgraph.nodes()),[0])
+
+ def test_main_shell(self):
+ main_shell_subgraph=nx.k_shell(self.H)
+ assert_equal(sorted(main_shell_subgraph.nodes()),[2,4,5,6])
+
+ def test_k_shell(self):
+ # k=0
+ k_shell_subgraph=nx.k_shell(self.H,k=2)
+ assert_equal(sorted(k_shell_subgraph.nodes()),[2,4,5,6])
+ # k=1
+ k_shell_subgraph=nx.k_shell(self.H,k=1)
+ assert_equal(sorted(k_shell_subgraph.nodes()),[1,3])
+ # k=2
+ k_shell_subgraph=nx.k_shell(self.H,k=0)
+ assert_equal(sorted(k_shell_subgraph.nodes()),[0])
+
+ def test_k_corona(self):
+ # k=0
+ k_corona_subgraph=nx.k_corona(self.H,k=2)
+ assert_equal(sorted(k_corona_subgraph.nodes()),[2,4,5,6])
+ # k=1
+ k_corona_subgraph=nx.k_corona(self.H,k=1)
+ assert_equal(sorted(k_corona_subgraph.nodes()),[1])
+ # k=2
+ k_corona_subgraph=nx.k_corona(self.H,k=0)
+ assert_equal(sorted(k_corona_subgraph.nodes()),[0])
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_cycles.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_cycles.py
new file mode 100644
index 0000000..e39bb87
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_cycles.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx
+import networkx as nx
+
+class TestCycles:
+ def setUp(self):
+ G=networkx.Graph()
+ G.add_cycle([0,1,2,3])
+ G.add_cycle([0,3,4,5])
+ G.add_cycle([0,1,6,7,8])
+ G.add_edge(8,9)
+ self.G=G
+
+ def is_cyclic_permutation(self,a,b):
+ n=len(a)
+ if len(b)!=n:
+ return False
+ l=a+a
+ return any(l[i:i+n]==b for i in range(2*n-n+1))
+
+ def test_cycle_basis(self):
+ G=self.G
+ cy=networkx.cycle_basis(G,0)
+ sort_cy= sorted( sorted(c) for c in cy )
+ assert_equal(sort_cy, [[0,1,2,3],[0,1,6,7,8],[0,3,4,5]])
+ cy=networkx.cycle_basis(G,1)
+ sort_cy= sorted( sorted(c) for c in cy )
+ assert_equal(sort_cy, [[0,1,2,3],[0,1,6,7,8],[0,3,4,5]])
+ cy=networkx.cycle_basis(G,9)
+ sort_cy= sorted( sorted(c) for c in cy )
+ assert_equal(sort_cy, [[0,1,2,3],[0,1,6,7,8],[0,3,4,5]])
+ # test disconnected graphs
+ G.add_cycle(list("ABC"))
+ cy=networkx.cycle_basis(G,9)
+ sort_cy= sorted(sorted(c) for c in cy[:-1]) + [sorted(cy[-1])]
+ assert_equal(sort_cy, [[0,1,2,3],[0,1,6,7,8],[0,3,4,5],['A','B','C']])
+
+ @raises(nx.NetworkXNotImplemented)
+ def test_cycle_basis(self):
+ G=nx.DiGraph()
+ cy=networkx.cycle_basis(G,0)
+
+ @raises(nx.NetworkXNotImplemented)
+ def test_cycle_basis(self):
+ G=nx.MultiGraph()
+ cy=networkx.cycle_basis(G,0)
+
+ def test_simple_cycles(self):
+ G = nx.DiGraph([(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)])
+ cc=sorted(nx.simple_cycles(G))
+ ca=[[0], [0, 1, 2], [0, 2], [1, 2], [2]]
+ for c in cc:
+ assert_true(any(self.is_cyclic_permutation(c,rc) for rc in ca))
+
+ @raises(nx.NetworkXNotImplemented)
+ def test_simple_cycles_graph(self):
+ G = nx.Graph()
+ c = sorted(nx.simple_cycles(G))
+
+ def test_unsortable(self):
+ # TODO What does this test do? das 6/2013
+ G=nx.DiGraph()
+ G.add_cycle(['a',1])
+ c=list(nx.simple_cycles(G))
+
+ def test_simple_cycles_small(self):
+ G = nx.DiGraph()
+ G.add_cycle([1,2,3])
+ c=sorted(nx.simple_cycles(G))
+ assert_equal(len(c),1)
+ assert_true(self.is_cyclic_permutation(c[0],[1,2,3]))
+ G.add_cycle([10,20,30])
+ cc=sorted(nx.simple_cycles(G))
+ ca=[[1,2,3],[10,20,30]]
+ for c in cc:
+ assert_true(any(self.is_cyclic_permutation(c,rc) for rc in ca))
+
+ def test_simple_cycles_empty(self):
+ G = nx.DiGraph()
+ assert_equal(list(nx.simple_cycles(G)),[])
+
+ def test_complete_directed_graph(self):
+ # see table 2 in Johnson's paper
+ ncircuits=[1,5,20,84,409,2365,16064]
+ for n,c in zip(range(2,9),ncircuits):
+ G=nx.DiGraph(nx.complete_graph(n))
+ assert_equal(len(list(nx.simple_cycles(G))),c)
+
+ def worst_case_graph(self,k):
+ # see figure 1 in Johnson's paper
+ # this graph has excactly 3k simple cycles
+ G=nx.DiGraph()
+ for n in range(2,k+2):
+ G.add_edge(1,n)
+ G.add_edge(n,k+2)
+ G.add_edge(2*k+1,1)
+ for n in range(k+2,2*k+2):
+ G.add_edge(n,2*k+2)
+ G.add_edge(n,n+1)
+ G.add_edge(2*k+3,k+2)
+ for n in range(2*k+3,3*k+3):
+ G.add_edge(2*k+2,n)
+ G.add_edge(n,3*k+3)
+ G.add_edge(3*k+3,2*k+2)
+ return G
+
+ def test_worst_case_graph(self):
+ # see figure 1 in Johnson's paper
+ for k in range(3,10):
+ G=self.worst_case_graph(k)
+ l=len(list(nx.simple_cycles(G)))
+ assert_equal(l,3*k)
+
+ def test_recursive_simple_and_not(self):
+ for k in range(2,10):
+ G=self.worst_case_graph(k)
+ cc=sorted(nx.simple_cycles(G))
+ rcc=sorted(nx.recursive_simple_cycles(G))
+ assert_equal(len(cc),len(rcc))
+ for c in cc:
+ assert_true(any(self.is_cyclic_permutation(c,rc) for rc in rcc))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_dag.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_dag.py
new file mode 100644
index 0000000..eb368af
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_dag.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+class TestDAG:
+
+ def setUp(self):
+ pass
+
+ def test_topological_sort1(self):
+ DG=nx.DiGraph()
+ DG.add_edges_from([(1,2),(1,3),(2,3)])
+ assert_equal(nx.topological_sort(DG),[1, 2, 3])
+ assert_equal(nx.topological_sort_recursive(DG),[1, 2, 3])
+
+ DG.add_edge(3,2)
+ assert_raises(nx.NetworkXUnfeasible, nx.topological_sort, DG)
+ assert_raises(nx.NetworkXUnfeasible, nx.topological_sort_recursive, DG)
+
+ DG.remove_edge(2,3)
+ assert_equal(nx.topological_sort(DG),[1, 3, 2])
+ assert_equal(nx.topological_sort_recursive(DG),[1, 3, 2])
+
+ def test_is_directed_acyclic_graph(self):
+ G = nx.generators.complete_graph(2)
+ assert_false(nx.is_directed_acyclic_graph(G))
+ assert_false(nx.is_directed_acyclic_graph(G.to_directed()))
+ assert_false(nx.is_directed_acyclic_graph(nx.Graph([(3, 4), (4, 5)])))
+ assert_true(nx.is_directed_acyclic_graph(nx.DiGraph([(3, 4), (4, 5)])))
+
+ def test_topological_sort2(self):
+ DG=nx.DiGraph({1:[2],2:[3],3:[4],
+ 4:[5],5:[1],11:[12],
+ 12:[13],13:[14],14:[15]})
+ assert_raises(nx.NetworkXUnfeasible, nx.topological_sort, DG)
+ assert_raises(nx.NetworkXUnfeasible, nx.topological_sort_recursive, DG)
+
+ assert_false(nx.is_directed_acyclic_graph(DG))
+
+ DG.remove_edge(1,2)
+ assert_equal(nx.topological_sort_recursive(DG),
+ [11, 12, 13, 14, 15, 2, 3, 4, 5, 1])
+ assert_equal(nx.topological_sort(DG),
+ [11, 12, 13, 14, 15, 2, 3, 4, 5, 1])
+ assert_true(nx.is_directed_acyclic_graph(DG))
+
+ def test_topological_sort3(self):
+ DG=nx.DiGraph()
+ DG.add_edges_from([(1,i) for i in range(2,5)])
+ DG.add_edges_from([(2,i) for i in range(5,9)])
+ DG.add_edges_from([(6,i) for i in range(9,12)])
+ DG.add_edges_from([(4,i) for i in range(12,15)])
+ assert_equal(nx.topological_sort_recursive(DG),
+ [1, 4, 14, 13, 12, 3, 2, 7, 6, 11, 10, 9, 5, 8])
+ assert_equal(nx.topological_sort(DG),
+ [1, 2, 8, 5, 6, 9, 10, 11, 7, 3, 4, 12, 13, 14])
+
+ DG.add_edge(14,1)
+ assert_raises(nx.NetworkXUnfeasible, nx.topological_sort, DG)
+ assert_raises(nx.NetworkXUnfeasible, nx.topological_sort_recursive, DG)
+
+ def test_topological_sort4(self):
+ G=nx.Graph()
+ G.add_edge(1,2)
+ assert_raises(nx.NetworkXError, nx.topological_sort, G)
+ assert_raises(nx.NetworkXError, nx.topological_sort_recursive, G)
+
+ def test_topological_sort5(self):
+ G=nx.DiGraph()
+ G.add_edge(0,1)
+ assert_equal(nx.topological_sort_recursive(G), [0,1])
+ assert_equal(nx.topological_sort(G), [0,1])
+
+ def test_nbunch_argument(self):
+ G=nx.DiGraph()
+ G.add_edges_from([(1,2), (2,3), (1,4), (1,5), (2,6)])
+ assert_equal(nx.topological_sort(G), [1, 2, 3, 6, 4, 5])
+ assert_equal(nx.topological_sort_recursive(G), [1, 5, 4, 2, 6, 3])
+ assert_equal(nx.topological_sort(G,[1]), [1, 2, 3, 6, 4, 5])
+ assert_equal(nx.topological_sort_recursive(G,[1]), [1, 5, 4, 2, 6, 3])
+ assert_equal(nx.topological_sort(G,[5]), [5])
+ assert_equal(nx.topological_sort_recursive(G,[5]), [5])
+
+ def test_ancestors(self):
+ G=nx.DiGraph()
+ ancestors = nx.algorithms.dag.ancestors
+ G.add_edges_from([
+ (1, 2), (1, 3), (4, 2), (4, 3), (4, 5), (2, 6), (5, 6)])
+ assert_equal(ancestors(G, 6), set([1, 2, 4, 5]))
+ assert_equal(ancestors(G, 3), set([1, 4]))
+ assert_equal(ancestors(G, 1), set())
+ assert_raises(nx.NetworkXError, ancestors, G, 8)
+
+ def test_descendants(self):
+ G=nx.DiGraph()
+ descendants = nx.algorithms.dag.descendants
+ G.add_edges_from([
+ (1, 2), (1, 3), (4, 2), (4, 3), (4, 5), (2, 6), (5, 6)])
+ assert_equal(descendants(G, 1), set([2, 3, 6]))
+ assert_equal(descendants(G, 4), set([2, 3, 5, 6]))
+ assert_equal(descendants(G, 3), set())
+ assert_raises(nx.NetworkXError, descendants, G, 8)
+
+
+def test_is_aperiodic_cycle():
+ G=nx.DiGraph()
+ G.add_cycle([1,2,3,4])
+ assert_false(nx.is_aperiodic(G))
+
+def test_is_aperiodic_cycle2():
+ G=nx.DiGraph()
+ G.add_cycle([1,2,3,4])
+ G.add_cycle([3,4,5,6,7])
+ assert_true(nx.is_aperiodic(G))
+
+def test_is_aperiodic_cycle3():
+ G=nx.DiGraph()
+ G.add_cycle([1,2,3,4])
+ G.add_cycle([3,4,5,6])
+ assert_false(nx.is_aperiodic(G))
+
+def test_is_aperiodic_cycle4():
+ G = nx.DiGraph()
+ G.add_cycle([1,2,3,4])
+ G.add_edge(1,3)
+ assert_true(nx.is_aperiodic(G))
+
+def test_is_aperiodic_selfloop():
+ G = nx.DiGraph()
+ G.add_cycle([1,2,3,4])
+ G.add_edge(1,1)
+ assert_true(nx.is_aperiodic(G))
+
+def test_is_aperiodic_raise():
+ G = nx.Graph()
+ assert_raises(nx.NetworkXError,
+ nx.is_aperiodic,
+ G)
+
+def test_is_aperiodic_bipartite():
+ #Bipartite graph
+ G = nx.DiGraph(nx.davis_southern_women_graph())
+ assert_false(nx.is_aperiodic(G))
+
+def test_is_aperiodic_rary_tree():
+ G = nx.full_rary_tree(3,27,create_using=nx.DiGraph())
+ assert_false(nx.is_aperiodic(G))
+
+def test_is_aperiodic_disconnected():
+ #disconnected graph
+ G = nx.DiGraph()
+ G.add_cycle([1,2,3,4])
+ G.add_cycle([5,6,7,8])
+ assert_false(nx.is_aperiodic(G))
+ G.add_edge(1,3)
+ G.add_edge(5,7)
+ assert_true(nx.is_aperiodic(G))
+
+def test_is_aperiodic_disconnected2():
+ G = nx.DiGraph()
+ G.add_cycle([0,1,2])
+ G.add_edge(3,3)
+ assert_false(nx.is_aperiodic(G))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_distance_measures.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_distance_measures.py
new file mode 100644
index 0000000..ff3530c
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_distance_measures.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx
+
+class TestDistance:
+
+ def setUp(self):
+ G=networkx.Graph()
+ from networkx import convert_node_labels_to_integers as cnlti
+ G=cnlti(networkx.grid_2d_graph(4,4),first_label=1,ordering="sorted")
+ self.G=G
+
+ def test_eccentricity(self):
+ assert_equal(networkx.eccentricity(self.G,1),6)
+ e=networkx.eccentricity(self.G)
+ assert_equal(e[1],6)
+ sp=networkx.shortest_path_length(self.G)
+ e=networkx.eccentricity(self.G,sp=sp)
+ assert_equal(e[1],6)
+ e=networkx.eccentricity(self.G,v=1)
+ assert_equal(e,6)
+ e=networkx.eccentricity(self.G,v=[1,1]) #This behavior changed in version 1.8 (ticket #739)
+ assert_equal(e[1],6)
+ e=networkx.eccentricity(self.G,v=[1,2])
+ assert_equal(e[1],6)
+ # test against graph with one node
+ G=networkx.path_graph(1)
+ e=networkx.eccentricity(G)
+ assert_equal(e[0],0)
+ e=networkx.eccentricity(G,v=0)
+ assert_equal(e,0)
+ assert_raises(networkx.NetworkXError, networkx.eccentricity, G, 1)
+ # test against empty graph
+ G=networkx.empty_graph()
+ e=networkx.eccentricity(G)
+ assert_equal(e,{})
+
+
+
+
+ def test_diameter(self):
+ assert_equal(networkx.diameter(self.G),6)
+
+ def test_radius(self):
+ assert_equal(networkx.radius(self.G),4)
+
+ def test_periphery(self):
+ assert_equal(set(networkx.periphery(self.G)),set([1, 4, 13, 16]))
+
+ def test_center(self):
+ assert_equal(set(networkx.center(self.G)),set([6, 7, 10, 11]))
+
+ def test_radius_exception(self):
+ G=networkx.Graph()
+ G.add_edge(1,2)
+ G.add_edge(3,4)
+ assert_raises(networkx.NetworkXError, networkx.diameter, G)
+
+ @raises(networkx.NetworkXError)
+ def test_eccentricity_infinite(self):
+ G=networkx.Graph([(1,2),(3,4)])
+ e = networkx.eccentricity(G)
+
+ @raises(networkx.NetworkXError)
+ def test_eccentricity_invalid(self):
+ G=networkx.Graph([(1,2),(3,4)])
+ e = networkx.eccentricity(G,sp=1)
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_distance_regular.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_distance_regular.py
new file mode 100644
index 0000000..068a81f
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_distance_regular.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+class TestDistanceRegular:
+
+ def test_is_distance_regular(self):
+ assert_true(nx.is_distance_regular(nx.icosahedral_graph()))
+ assert_true(nx.is_distance_regular(nx.petersen_graph()))
+ assert_true(nx.is_distance_regular(nx.cubical_graph()))
+ assert_true(nx.is_distance_regular(nx.complete_bipartite_graph(3,3)))
+ assert_true(nx.is_distance_regular(nx.tetrahedral_graph()))
+ assert_true(nx.is_distance_regular(nx.dodecahedral_graph()))
+ assert_true(nx.is_distance_regular(nx.pappus_graph()))
+ assert_true(nx.is_distance_regular(nx.heawood_graph()))
+ assert_true(nx.is_distance_regular(nx.cycle_graph(3)))
+ # no distance regular
+ assert_false(nx.is_distance_regular(nx.path_graph(4)))
+
+ def test_not_connected(self):
+ G=nx.cycle_graph(4)
+ G.add_cycle([5,6,7])
+ assert_false(nx.is_distance_regular(G))
+
+
+ def test_global_parameters(self):
+ b,c=nx.intersection_array(nx.cycle_graph(5))
+ g=nx.global_parameters(b,c)
+ assert_equal(list(g),[(0, 0, 2), (1, 0, 1), (1, 1, 0)])
+ b,c=nx.intersection_array(nx.cycle_graph(3))
+ g=nx.global_parameters(b,c)
+ assert_equal(list(g),[(0, 0, 2), (1, 1, 0)])
+
+
+ def test_intersection_array(self):
+ b,c=nx.intersection_array(nx.cycle_graph(5))
+ assert_equal(b,[2, 1])
+ assert_equal(c,[1, 1])
+ b,c=nx.intersection_array(nx.dodecahedral_graph())
+ assert_equal(b,[3, 2, 1, 1, 1])
+ assert_equal(c,[1, 1, 1, 2, 3])
+ b,c=nx.intersection_array(nx.icosahedral_graph())
+ assert_equal(b,[5, 2, 1])
+ assert_equal(c,[1, 2, 5])
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_euler.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_euler.py
new file mode 100644
index 0000000..0b55f1f
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_euler.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+# run with nose: nosetests -v test_euler.py
+
+from nose.tools import *
+import networkx as nx
+from networkx import is_eulerian,eulerian_circuit
+
+class TestEuler:
+
+ def test_is_eulerian(self):
+ assert_true(is_eulerian(nx.complete_graph(5)))
+ assert_true(is_eulerian(nx.complete_graph(7)))
+ assert_true(is_eulerian(nx.hypercube_graph(4)))
+ assert_true(is_eulerian(nx.hypercube_graph(6)))
+
+ assert_false(is_eulerian(nx.complete_graph(4)))
+ assert_false(is_eulerian(nx.complete_graph(6)))
+ assert_false(is_eulerian(nx.hypercube_graph(3)))
+ assert_false(is_eulerian(nx.hypercube_graph(5)))
+
+ assert_false(is_eulerian(nx.petersen_graph()))
+ assert_false(is_eulerian(nx.path_graph(4)))
+
+ def test_is_eulerian2(self):
+ # not connected
+ G = nx.Graph()
+ G.add_nodes_from([1,2,3])
+ assert_false(is_eulerian(G))
+ # not strongly connected
+ G = nx.DiGraph()
+ G.add_nodes_from([1,2,3])
+ assert_false(is_eulerian(G))
+ G = nx.MultiDiGraph()
+ G.add_edge(1,2)
+ G.add_edge(2,3)
+ G.add_edge(2,3)
+ G.add_edge(3,1)
+ assert_false(is_eulerian(G))
+
+
+
+ def test_eulerian_circuit_cycle(self):
+ G=nx.cycle_graph(4)
+
+ edges=list(eulerian_circuit(G,source=0))
+ nodes=[u for u,v in edges]
+ assert_equal(nodes,[0,1,2,3])
+ assert_equal(edges,[(0,1),(1,2),(2,3),(3,0)])
+
+ edges=list(eulerian_circuit(G,source=1))
+ nodes=[u for u,v in edges]
+ assert_equal(nodes,[1,0,3,2])
+ assert_equal(edges,[(1,0),(0,3),(3,2),(2,1)])
+
+
+ def test_eulerian_circuit_digraph(self):
+ G=nx.DiGraph()
+ G.add_cycle([0,1,2,3])
+
+ edges=list(eulerian_circuit(G,source=0))
+ nodes=[u for u,v in edges]
+ assert_equal(nodes,[0,1,2,3])
+ assert_equal(edges,[(0,1),(1,2),(2,3),(3,0)])
+
+ edges=list(eulerian_circuit(G,source=1))
+ nodes=[u for u,v in edges]
+ assert_equal(nodes,[1,2,3,0])
+ assert_equal(edges,[(1,2),(2,3),(3,0),(0,1)])
+
+
+ def test_eulerian_circuit_multigraph(self):
+ G=nx.MultiGraph()
+ G.add_cycle([0,1,2,3])
+ G.add_edge(1,2)
+ G.add_edge(1,2)
+ edges=list(eulerian_circuit(G,source=0))
+ nodes=[u for u,v in edges]
+ assert_equal(nodes,[0,1,2,1,2,3])
+ assert_equal(edges,[(0,1),(1,2),(2,1),(1,2),(2,3),(3,0)])
+
+
+ @raises(nx.NetworkXError)
+ def test_not_eulerian(self):
+ f=list(eulerian_circuit(nx.complete_graph(4)))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_graphical.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_graphical.py
new file mode 100644
index 0000000..9609d25
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_graphical.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+def test_valid_degree_sequence1():
+ n = 100
+ p = .3
+ for i in range(10):
+ G = nx.erdos_renyi_graph(n,p)
+ deg = list(G.degree().values())
+ assert_true( nx.is_valid_degree_sequence(deg, method='eg') )
+ assert_true( nx.is_valid_degree_sequence(deg, method='hh') )
+
+def test_valid_degree_sequence2():
+ n = 100
+ for i in range(10):
+ G = nx.barabasi_albert_graph(n,1)
+ deg = list(G.degree().values())
+ assert_true( nx.is_valid_degree_sequence(deg, method='eg') )
+ assert_true( nx.is_valid_degree_sequence(deg, method='hh') )
+
+@raises(nx.NetworkXException)
+def test_string_input():
+ a = nx.is_valid_degree_sequence([],'foo')
+
+def test_negative_input():
+ assert_false(nx.is_valid_degree_sequence([-1],'hh'))
+ assert_false(nx.is_valid_degree_sequence([-1],'eg'))
+ assert_false(nx.is_valid_degree_sequence([72.5],'eg'))
+
+
+def test_atlas():
+ for graph in nx.graph_atlas_g():
+ deg = list(graph.degree().values())
+ assert_true( nx.is_valid_degree_sequence(deg, method='eg') )
+ assert_true( nx.is_valid_degree_sequence(deg, method='hh') )
+
+def test_small_graph_true():
+ z=[5,3,3,3,3,2,2,2,1,1,1]
+ assert_true(nx.is_valid_degree_sequence(z, method='hh'))
+ assert_true(nx.is_valid_degree_sequence(z, method='eg'))
+ z=[10,3,3,3,3,2,2,2,2,2,2]
+ assert_true(nx.is_valid_degree_sequence(z, method='hh'))
+ assert_true(nx.is_valid_degree_sequence(z, method='eg'))
+ z=[1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
+ assert_true(nx.is_valid_degree_sequence(z, method='hh'))
+ assert_true(nx.is_valid_degree_sequence(z, method='eg'))
+
+
+
+def test_small_graph_false():
+ z=[1000,3,3,3,3,2,2,2,1,1,1]
+ assert_false(nx.is_valid_degree_sequence(z, method='hh'))
+ assert_false(nx.is_valid_degree_sequence(z, method='eg'))
+ z=[6,5,4,4,2,1,1,1]
+ assert_false(nx.is_valid_degree_sequence(z, method='hh'))
+ assert_false(nx.is_valid_degree_sequence(z, method='eg'))
+ z=[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
+ assert_false(nx.is_valid_degree_sequence(z, method='hh'))
+ assert_false(nx.is_valid_degree_sequence(z, method='eg'))
+
+def test_directed_degree_sequence():
+ # Test a range of valid directed degree sequences
+ n, r = 100, 10
+ p = 1.0 / r
+ for i in range(r):
+ G = nx.erdos_renyi_graph(n,p*(i+1),None,True)
+ din = list(G.in_degree().values())
+ dout = list(G.out_degree().values())
+ assert_true(nx.is_digraphical(din, dout))
+
+def test_small_directed_sequences():
+ dout=[5,3,3,3,3,2,2,2,1,1,1]
+ din=[3,3,3,3,3,2,2,2,2,2,1]
+ assert_true(nx.is_digraphical(din, dout))
+ # Test nongraphical directed sequence
+ dout = [1000,3,3,3,3,2,2,2,1,1,1]
+ din=[103,102,102,102,102,102,102,102,102,102]
+ assert_false(nx.is_digraphical(din, dout))
+ # Test digraphical small sequence
+ dout=[1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
+ din=[2, 2, 2, 2, 2, 2, 2, 2, 1, 1]
+ assert_true(nx.is_digraphical(din, dout))
+ # Test nonmatching sum
+ din=[2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1]
+ assert_false(nx.is_digraphical(din, dout))
+ # Test for negative integer in sequence
+ din=[2, 2, 2, -2, 2, 2, 2, 2, 1, 1, 4]
+ assert_false(nx.is_digraphical(din, dout))
+
+def test_multi_sequence():
+ # Test nongraphical multi sequence
+ seq=[1000,3,3,3,3,2,2,2,1,1]
+ assert_false(nx.is_multigraphical(seq))
+ # Test small graphical multi sequence
+ seq=[6,5,4,4,2,1,1,1]
+ assert_true(nx.is_multigraphical(seq))
+ # Test for negative integer in sequence
+ seq=[6,5,4,-4,2,1,1,1]
+ assert_false(nx.is_multigraphical(seq))
+ # Test for sequence with odd sum
+ seq=[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
+ assert_false(nx.is_multigraphical(seq))
+
+def test_pseudo_sequence():
+ # Test small valid pseudo sequence
+ seq=[1000,3,3,3,3,2,2,2,1,1]
+ assert_true(nx.is_pseudographical(seq))
+ # Test for sequence with odd sum
+ seq=[1000,3,3,3,3,2,2,2,1,1,1]
+ assert_false(nx.is_pseudographical(seq))
+ # Test for negative integer in sequence
+ seq=[1000,3,3,3,3,2,2,-2,1,1]
+ assert_false(nx.is_pseudographical(seq))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_hierarchy.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_hierarchy.py
new file mode 100644
index 0000000..6f2907e
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_hierarchy.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+def test_hierarchy_exception():
+ G = nx.cycle_graph(5)
+ assert_raises(nx.NetworkXError,nx.flow_hierarchy,G)
+
+def test_hierarchy_cycle():
+ G = nx.cycle_graph(5,create_using=nx.DiGraph())
+ assert_equal(nx.flow_hierarchy(G),0.0)
+
+def test_hierarchy_tree():
+ G = nx.full_rary_tree(2,16,create_using=nx.DiGraph())
+ assert_equal(nx.flow_hierarchy(G),1.0)
+
+def test_hierarchy_1():
+ G = nx.DiGraph()
+ G.add_edges_from([(0,1),(1,2),(2,3),(3,1),(3,4),(0,4)])
+ assert_equal(nx.flow_hierarchy(G),0.5)
+
+def test_hierarchy_weight():
+ G = nx.DiGraph()
+ G.add_edges_from([(0,1,{'weight':.3}),
+ (1,2,{'weight':.1}),
+ (2,3,{'weight':.1}),
+ (3,1,{'weight':.1}),
+ (3,4,{'weight':.3}),
+ (0,4,{'weight':.3})])
+ assert_equal(nx.flow_hierarchy(G,weight='weight'),.75) \ No newline at end of file
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_matching.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_matching.py
new file mode 100644
index 0000000..5509e2d
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_matching.py
@@ -0,0 +1,247 @@
+#!/usr/bin/env python
+import math
+from nose.tools import *
+import networkx as nx
+
+class TestMatching:
+
+ def setUp(self):
+ pass
+
+ def test_trivial1(self):
+ """Empty graph"""
+ G = nx.Graph()
+ assert_equal(nx.max_weight_matching(G),{})
+
+ def test_trivial2(self):
+ """Self loop"""
+ G = nx.Graph()
+ G.add_edge(0, 0, weight=100)
+ assert_equal(nx.max_weight_matching(G),{})
+
+ def test_trivial3(self):
+ """Single edge"""
+ G = nx.Graph()
+ G.add_edge(0, 1)
+ assert_equal(nx.max_weight_matching(G),
+ {0: 1, 1: 0})
+
+ def test_trivial4(self):
+ """Small graph"""
+ G = nx.Graph()
+ G.add_edge('one', 'two', weight=10)
+ G.add_edge('two', 'three', weight=11)
+ assert_equal(nx.max_weight_matching(G),
+ {'three': 'two', 'two': 'three'})
+
+ def test_trivial5(self):
+ """Path"""
+ G = nx.Graph()
+ G.add_edge(1, 2, weight=5)
+ G.add_edge(2, 3, weight=11)
+ G.add_edge(3, 4, weight=5)
+ assert_equal(nx.max_weight_matching(G),
+ {2: 3, 3: 2})
+ assert_equal(nx.max_weight_matching(G, 1),
+ {1: 2, 2: 1, 3: 4, 4: 3})
+
+
+ def test_floating_point_weights(self):
+ """Floating point weights"""
+ G = nx.Graph()
+ G.add_edge(1, 2, weight=math.pi)
+ G.add_edge(2, 3, weight=math.exp(1))
+ G.add_edge(1, 3, weight=3.0)
+ G.add_edge(1, 4, weight=math.sqrt(2.0))
+ assert_equal(nx.max_weight_matching(G),
+ {1: 4, 2: 3, 3: 2, 4: 1})
+
+ def test_negative_weights(self):
+ """Negative weights"""
+ G = nx.Graph()
+ G.add_edge(1, 2, weight=2)
+ G.add_edge(1, 3, weight=-2)
+ G.add_edge(2, 3, weight=1)
+ G.add_edge(2, 4, weight=-1)
+ G.add_edge(3, 4, weight=-6)
+ assert_equal(nx.max_weight_matching(G),
+ {1: 2, 2: 1})
+ assert_equal(nx.max_weight_matching(G, 1),
+ {1: 3, 2: 4, 3: 1, 4: 2})
+
+ def test_s_blossom(self):
+ """Create S-blossom and use it for augmentation:"""
+ G = nx.Graph()
+ G.add_weighted_edges_from([ (1, 2, 8), (1, 3, 9),
+ (2, 3, 10), (3, 4, 7) ])
+ assert_equal(nx.max_weight_matching(G),
+ {1: 2, 2: 1, 3: 4, 4: 3})
+
+ G.add_weighted_edges_from([ (1, 6, 5), (4, 5, 6) ])
+ assert_equal(nx.max_weight_matching(G),
+ {1: 6, 2: 3, 3: 2, 4: 5, 5: 4, 6: 1})
+
+ def test_s_t_blossom(self):
+ """Create S-blossom, relabel as T-blossom, use for augmentation:"""
+ G = nx.Graph()
+ G.add_weighted_edges_from([ (1, 2, 9), (1, 3, 8), (2, 3, 10),
+ (1, 4, 5), (4, 5, 4), (1, 6, 3) ])
+ assert_equal(nx.max_weight_matching(G),
+ {1: 6, 2: 3, 3: 2, 4: 5, 5: 4, 6: 1})
+ G.add_edge(4, 5, weight=3)
+ G.add_edge(1, 6, weight=4)
+ assert_equal(nx.max_weight_matching(G),
+ {1: 6, 2: 3, 3: 2, 4: 5, 5: 4, 6: 1})
+ G.remove_edge(1, 6)
+ G.add_edge(3, 6, weight=4)
+ assert_equal(nx.max_weight_matching(G),
+ {1: 2, 2: 1, 3: 6, 4: 5, 5: 4, 6: 3})
+
+ def test_nested_s_blossom(self):
+ """Create nested S-blossom, use for augmentation:"""
+
+ G = nx.Graph()
+ G.add_weighted_edges_from([ (1, 2, 9), (1, 3, 9), (2, 3, 10),
+ (2, 4, 8), (3, 5, 8), (4, 5, 10),
+ (5, 6, 6) ])
+ assert_equal(nx.max_weight_matching(G),
+ {1: 3, 2: 4, 3: 1, 4: 2, 5: 6, 6: 5})
+
+ def test_nested_s_blossom_relabel(self):
+ """Create S-blossom, relabel as S, include in nested S-blossom:"""
+ G = nx.Graph()
+ G.add_weighted_edges_from([ (1, 2, 10), (1, 7, 10), (2, 3, 12),
+ (3, 4, 20), (3, 5, 20), (4, 5, 25),
+ (5, 6, 10), (6, 7, 10), (7, 8, 8) ])
+ assert_equal(nx.max_weight_matching(G),
+ {1: 2, 2: 1, 3: 4, 4: 3, 5: 6, 6: 5, 7: 8, 8: 7})
+
+ def test_nested_s_blossom_expand(self):
+ """Create nested S-blossom, augment, expand recursively:"""
+ G = nx.Graph()
+ G.add_weighted_edges_from([ (1, 2, 8), (1, 3, 8), (2, 3, 10),
+ (2, 4, 12),(3, 5, 12), (4, 5, 14),
+ (4, 6, 12), (5, 7, 12), (6, 7, 14),
+ (7, 8, 12) ])
+ assert_equal(nx.max_weight_matching(G),
+ {1: 2, 2: 1, 3: 5, 4: 6, 5: 3, 6: 4, 7: 8, 8: 7})
+
+
+ def test_s_blossom_relabel_expand(self):
+ """Create S-blossom, relabel as T, expand:"""
+ G = nx.Graph()
+ G.add_weighted_edges_from([ (1, 2, 23), (1, 5, 22), (1, 6, 15),
+ (2, 3, 25), (3, 4, 22), (4, 5, 25),
+ (4, 8, 14), (5, 7, 13) ])
+ assert_equal(nx.max_weight_matching(G),
+ {1: 6, 2: 3, 3: 2, 4: 8, 5: 7, 6: 1, 7: 5, 8: 4})
+
+ def test_nested_s_blossom_relabel_expand(self):
+ """Create nested S-blossom, relabel as T, expand:"""
+ G = nx.Graph()
+ G.add_weighted_edges_from([ (1, 2, 19), (1, 3, 20), (1, 8, 8),
+ (2, 3, 25), (2, 4, 18), (3, 5, 18),
+ (4, 5, 13), (4, 7, 7), (5, 6, 7) ])
+ assert_equal(nx.max_weight_matching(G),
+ {1: 8, 2: 3, 3: 2, 4: 7, 5: 6, 6: 5, 7: 4, 8: 1})
+
+
+ def test_nasty_blossom1(self):
+ """Create blossom, relabel as T in more than one way, expand,
+ augment:
+ """
+ G = nx.Graph()
+ G.add_weighted_edges_from([ (1, 2, 45), (1, 5, 45), (2, 3, 50),
+ (3, 4, 45), (4, 5, 50), (1, 6, 30),
+ (3, 9, 35), (4, 8, 35), (5, 7, 26),
+ (9, 10, 5) ])
+ assert_equal(nx.max_weight_matching(G),
+ {1: 6, 2: 3, 3: 2, 4: 8, 5: 7,
+ 6: 1, 7: 5, 8: 4, 9: 10, 10: 9})
+
+ def test_nasty_blossom2(self):
+ """Again but slightly different:"""
+ G = nx.Graph()
+ G.add_weighted_edges_from([ (1, 2, 45), (1, 5, 45), (2, 3, 50),
+ (3, 4, 45), (4, 5, 50), (1, 6, 30),
+ (3, 9, 35), (4, 8, 26), (5, 7, 40),
+ (9, 10, 5) ])
+ assert_equal(nx.max_weight_matching(G),
+ {1: 6, 2: 3, 3: 2, 4: 8, 5: 7,
+ 6: 1, 7: 5, 8: 4, 9: 10, 10: 9})
+
+ def test_nasty_blossom_least_slack(self):
+ """Create blossom, relabel as T, expand such that a new
+ least-slack S-to-free dge is produced, augment:
+ """
+ G = nx.Graph()
+ G.add_weighted_edges_from([ (1, 2, 45), (1, 5, 45), (2, 3, 50),
+ (3, 4, 45), (4, 5, 50), (1, 6, 30),
+ (3, 9, 35), (4, 8, 28), (5, 7, 26),
+ (9, 10, 5) ])
+ assert_equal(nx.max_weight_matching(G),
+ {1: 6, 2: 3, 3: 2, 4: 8, 5: 7,
+ 6: 1, 7: 5, 8: 4, 9: 10, 10: 9})
+
+ def test_nasty_blossom_augmenting(self):
+ """Create nested blossom, relabel as T in more than one way"""
+ # expand outer blossom such that inner blossom ends up on an
+ # augmenting path:
+ G = nx.Graph()
+ G.add_weighted_edges_from([ (1, 2, 45), (1, 7, 45), (2, 3, 50),
+ (3, 4, 45), (4, 5, 95), (4, 6, 94),
+ (5, 6, 94), (6, 7, 50), (1, 8, 30),
+ (3, 11, 35), (5, 9, 36), (7, 10, 26),
+ (11, 12, 5) ])
+ assert_equal(nx.max_weight_matching(G),
+ {1: 8, 2: 3, 3: 2, 4: 6, 5: 9, 6: 4,
+ 7: 10, 8: 1, 9: 5, 10: 7, 11: 12, 12: 11})
+
+ def test_nasty_blossom_expand_recursively(self):
+ """Create nested S-blossom, relabel as S, expand recursively:"""
+ G = nx.Graph()
+ G.add_weighted_edges_from([ (1, 2, 40), (1, 3, 40), (2, 3, 60),
+ (2, 4, 55), (3, 5, 55), (4, 5, 50),
+ (1, 8, 15), (5, 7, 30), (7, 6, 10),
+ (8, 10, 10), (4, 9, 30) ])
+ assert_equal(nx.max_weight_matching(G),
+ {1: 2, 2: 1, 3: 5, 4: 9, 5: 3,
+ 6: 7, 7: 6, 8: 10, 9: 4, 10: 8})
+
+def test_maximal_matching():
+ graph = nx.Graph()
+ graph.add_edge(0, 1)
+ graph.add_edge(0, 2)
+ graph.add_edge(0, 3)
+ graph.add_edge(0, 4)
+ graph.add_edge(0, 5)
+ graph.add_edge(1, 2)
+ matching = nx.maximal_matching(graph)
+
+ vset = set(u for u, v in matching)
+ vset = vset | set(v for u, v in matching)
+
+ for edge in graph.edges_iter():
+ u, v = edge
+ ok_(len(set([v]) & vset) > 0 or len(set([u]) & vset) > 0, \
+ "not a proper matching!")
+
+ eq_(1, len(matching), "matching not length 1!")
+ graph = nx.Graph()
+ graph.add_edge(1, 2)
+ graph.add_edge(1, 5)
+ graph.add_edge(2, 3)
+ graph.add_edge(2, 5)
+ graph.add_edge(3, 4)
+ graph.add_edge(3, 6)
+ graph.add_edge(5, 6)
+
+ matching = nx.maximal_matching(graph)
+ vset = set(u for u, v in matching)
+ vset = vset | set(v for u, v in matching)
+
+ for edge in graph.edges_iter():
+ u, v = edge
+ ok_(len(set([v]) & vset) > 0 or len(set([u]) & vset) > 0, \
+ "not a proper matching!")
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_mis.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_mis.py
new file mode 100644
index 0000000..01092ef
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_mis.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# $Id: test_maximal_independent_set.py 577 2011-03-01 06:07:53Z lleeoo $
+"""
+Tests for maximal (not maximum) independent sets.
+
+"""
+# Copyright (C) 2004-2010 by
+# Leo Lopes <leo.lopes@monash.edu>
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+__author__ = """Leo Lopes (leo.lopes@monash.edu)"""
+
+from nose.tools import *
+import networkx as nx
+import random
+
+class TestMaximalIndependantSet(object):
+ def setup(self):
+ self.florentine = nx.Graph()
+ self.florentine.add_edge('Acciaiuoli','Medici')
+ self.florentine.add_edge('Castellani','Peruzzi')
+ self.florentine.add_edge('Castellani','Strozzi')
+ self.florentine.add_edge('Castellani','Barbadori')
+ self.florentine.add_edge('Medici','Barbadori')
+ self.florentine.add_edge('Medici','Ridolfi')
+ self.florentine.add_edge('Medici','Tornabuoni')
+ self.florentine.add_edge('Medici','Albizzi')
+ self.florentine.add_edge('Medici','Salviati')
+ self.florentine.add_edge('Salviati','Pazzi')
+ self.florentine.add_edge('Peruzzi','Strozzi')
+ self.florentine.add_edge('Peruzzi','Bischeri')
+ self.florentine.add_edge('Strozzi','Ridolfi')
+ self.florentine.add_edge('Strozzi','Bischeri')
+ self.florentine.add_edge('Ridolfi','Tornabuoni')
+ self.florentine.add_edge('Tornabuoni','Guadagni')
+ self.florentine.add_edge('Albizzi','Ginori')
+ self.florentine.add_edge('Albizzi','Guadagni')
+ self.florentine.add_edge('Bischeri','Guadagni')
+ self.florentine.add_edge('Guadagni','Lamberteschi')
+
+ def test_K5(self):
+ """Maximal independent set: K5"""
+ G = nx.complete_graph(5)
+ for node in G:
+ assert_equal(nx.maximal_independent_set(G, [node]), [node])
+
+ def test_K55(self):
+ """Maximal independent set: K55"""
+ G = nx.complete_graph(55)
+ for node in G:
+ assert_equal(nx.maximal_independent_set(G, [node]), [node])
+
+ def test_exception(self):
+ """Bad input should raise exception."""
+ G = self.florentine
+ assert_raises(nx.NetworkXUnfeasible,
+ nx.maximal_independent_set, G, ["Smith"])
+ assert_raises(nx.NetworkXUnfeasible,
+ nx.maximal_independent_set, G, ["Salviati", "Pazzi"])
+
+ def test_florentine_family(self):
+ G = self.florentine
+ indep = nx.maximal_independent_set(G, ["Medici", "Bischeri"])
+ assert_equal(sorted(indep),
+ sorted(["Medici", "Bischeri", "Castellani", "Pazzi",
+ "Ginori", "Lamberteschi"]))
+
+ def test_bipartite(self):
+ G = nx.complete_bipartite_graph(12, 34)
+ indep = nx.maximal_independent_set(G, [4, 5, 9, 10])
+ assert_equal(sorted(indep), list(range(12)))
+
+
+ def test_random_graphs(self):
+ """Generate 50 random graphs of different types and sizes and
+ make sure that all sets are independent and maximal."""
+ for i in range(0, 50, 10):
+ G = nx.random_graphs.erdos_renyi_graph(i*10+1, random.random())
+ IS = nx.maximal_independent_set(G)
+ assert_false(G.subgraph(IS).edges())
+ neighbors_of_MIS = set.union(*(set(G.neighbors(v)) for v in IS))
+ for v in set(G.nodes()).difference(IS):
+ assert_true(v in neighbors_of_MIS)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_mst.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_mst.py
new file mode 100644
index 0000000..5716506
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_mst.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+class TestMST:
+
+ def setUp(self):
+ # example from Wikipedia: http://en.wikipedia.org/wiki/Kruskal's_algorithm
+ G=nx.Graph()
+ edgelist = [(0,3,[('weight',5)]),
+ (0,1,[('weight',7)]),
+ (1,3,[('weight',9)]),
+ (1,2,[('weight',8)]),
+ (1,4,[('weight',7)]),
+ (3,4,[('weight',15)]),
+ (3,5,[('weight',6)]),
+ (2,4,[('weight',5)]),
+ (4,5,[('weight',8)]),
+ (4,6,[('weight',9)]),
+ (5,6,[('weight',11)])]
+
+
+ G.add_edges_from(edgelist)
+ self.G=G
+ tree_edgelist = [(0,1,{'weight':7}),
+ (0,3,{'weight':5}),
+ (3,5,{'weight':6}),
+ (1,4,{'weight':7}),
+ (4,2,{'weight':5}),
+ (4,6,{'weight':9})]
+ self.tree_edgelist=sorted((sorted((u, v))[0], sorted((u, v))[1], d)
+ for u,v,d in tree_edgelist)
+
+ def test_mst(self):
+ T=nx.minimum_spanning_tree(self.G)
+ assert_equal(T.edges(data=True),self.tree_edgelist)
+
+ def test_mst_edges(self):
+ edgelist=sorted(nx.minimum_spanning_edges(self.G))
+ assert_equal(edgelist,self.tree_edgelist)
+
+ def test_mst_disconnected(self):
+ G=nx.Graph()
+ G.add_path([1,2])
+ G.add_path([10,20])
+ T=nx.minimum_spanning_tree(G)
+ assert_equal(sorted(T.edges()),[(1, 2), (20, 10)])
+ assert_equal(sorted(T.nodes()),[1, 2, 10, 20])
+
+ def test_mst_isolate(self):
+ G=nx.Graph()
+ G.add_nodes_from([1,2])
+ T=nx.minimum_spanning_tree(G)
+ assert_equal(sorted(T.nodes()),[1, 2])
+ assert_equal(sorted(T.edges()),[])
+
+ def test_mst_attributes(self):
+ G=nx.Graph()
+ G.add_edge(1,2,weight=1,color='red',distance=7)
+ G.add_edge(2,3,weight=1,color='green',distance=2)
+ G.add_edge(1,3,weight=10,color='blue',distance=1)
+ G.add_node(13,color='purple')
+ G.graph['foo']='bar'
+ T=nx.minimum_spanning_tree(G)
+ assert_equal(T.graph,G.graph)
+ assert_equal(T.node[13],G.node[13])
+ assert_equal(T.edge[1][2],G.edge[1][2])
+
+ def test_mst_edges_specify_weight(self):
+ G=nx.Graph()
+ G.add_edge(1,2,weight=1,color='red',distance=7)
+ G.add_edge(1,3,weight=30,color='blue',distance=1)
+ G.add_edge(2,3,weight=1,color='green',distance=1)
+ G.add_node(13,color='purple')
+ G.graph['foo']='bar'
+ T=nx.minimum_spanning_tree(G)
+ assert_equal(sorted(T.nodes()),[1,2,3,13])
+ assert_equal(sorted(T.edges()),[(1,2),(2,3)])
+ T=nx.minimum_spanning_tree(G,weight='distance')
+ assert_equal(sorted(T.edges()),[(1,3),(2,3)])
+ assert_equal(sorted(T.nodes()),[1,2,3,13])
+
+ def test_prim_mst(self):
+ T=nx.prim_mst(self.G)
+ assert_equal(T.edges(data=True),self.tree_edgelist)
+
+ def test_prim_mst_edges(self):
+ edgelist=sorted(nx.prim_mst_edges(self.G))
+ edgelist=sorted((sorted((u, v))[0], sorted((u, v))[1], d)
+ for u,v,d in edgelist)
+ assert_equal(edgelist,self.tree_edgelist)
+
+ def test_prim_mst_disconnected(self):
+ G=nx.Graph()
+ G.add_path([1,2])
+ G.add_path([10,20])
+ T=nx.prim_mst(G)
+ assert_equal(sorted(T.edges()),[(1, 2), (20, 10)])
+ assert_equal(sorted(T.nodes()),[1, 2, 10, 20])
+
+ def test_prim_mst_isolate(self):
+ G=nx.Graph()
+ G.add_nodes_from([1,2])
+ T=nx.prim_mst(G)
+ assert_equal(sorted(T.nodes()),[1, 2])
+ assert_equal(sorted(T.edges()),[])
+
+ def test_prim_mst_attributes(self):
+ G=nx.Graph()
+ G.add_edge(1,2,weight=1,color='red',distance=7)
+ G.add_edge(2,3,weight=1,color='green',distance=2)
+ G.add_edge(1,3,weight=10,color='blue',distance=1)
+ G.add_node(13,color='purple')
+ G.graph['foo']='bar'
+ T=nx.prim_mst(G)
+ assert_equal(T.graph,G.graph)
+ assert_equal(T.node[13],G.node[13])
+ assert_equal(T.edge[1][2],G.edge[1][2])
+
+ def test_prim_mst_edges_specify_weight(self):
+ G=nx.Graph()
+ G.add_edge(1,2,weight=1,color='red',distance=7)
+ G.add_edge(1,3,weight=30,color='blue',distance=1)
+ G.add_edge(2,3,weight=1,color='green',distance=1)
+ G.add_node(13,color='purple')
+ G.graph['foo']='bar'
+ T=nx.prim_mst(G)
+ assert_equal(sorted(T.nodes()),[1,2,3,13])
+ assert_equal(sorted(T.edges()),[(1,2),(2,3)])
+ T=nx.prim_mst(G,weight='distance')
+ assert_equal(sorted(T.edges()),[(1,3),(2,3)])
+ assert_equal(sorted(T.nodes()),[1,2,3,13])
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_richclub.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_richclub.py
new file mode 100644
index 0000000..8114d91
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_richclub.py
@@ -0,0 +1,30 @@
+import networkx as nx
+from nose.tools import *
+
+
+def test_richclub():
+ G = nx.Graph([(0,1),(0,2),(1,2),(1,3),(1,4),(4,5)])
+ rc = nx.richclub.rich_club_coefficient(G,normalized=False)
+ assert_equal(rc,{0: 12.0/30,1:8.0/12})
+
+ # test single value
+ rc0 = nx.richclub.rich_club_coefficient(G,normalized=False)[0]
+ assert_equal(rc0,12.0/30.0)
+
+def test_richclub_normalized():
+ G = nx.Graph([(0,1),(0,2),(1,2),(1,3),(1,4),(4,5)])
+ rcNorm = nx.richclub.rich_club_coefficient(G,Q=2)
+ assert_equal(rcNorm,{0:1.0,1:1.0})
+
+
+def test_richclub2():
+ T = nx.balanced_tree(2,10)
+ rc = nx.richclub.rich_club_coefficient(T,normalized=False)
+ assert_equal(rc,{0:4092/(2047*2046.0),
+ 1:(2044.0/(1023*1022)),
+ 2:(2040.0/(1022*1021))})
+
+#def test_richclub2_normalized():
+# T = nx.balanced_tree(2,10)
+# rcNorm = nx.richclub.rich_club_coefficient(T,Q=2)
+# assert_true(rcNorm[0] ==1.0 and rcNorm[1] < 0.9 and rcNorm[2] < 0.9)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_simple_paths.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_simple_paths.py
new file mode 100644
index 0000000..268a890
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_simple_paths.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+def test_all_simple_paths():
+ G = nx.path_graph(4)
+ paths = nx.all_simple_paths(G,0,3)
+ assert_equal(list(list(p) for p in paths),[[0,1,2,3]])
+
+def test_all_simple_paths_cutoff():
+ G = nx.complete_graph(4)
+ paths = nx.all_simple_paths(G,0,1,cutoff=1)
+ assert_equal(list(list(p) for p in paths),[[0,1]])
+ paths = nx.all_simple_paths(G,0,1,cutoff=2)
+ assert_equal(list(list(p) for p in paths),[[0,1],[0,2,1],[0,3,1]])
+
+def test_all_simple_paths_multigraph():
+ G = nx.MultiGraph([(1,2),(1,2)])
+ paths = nx.all_simple_paths(G,1,2)
+ assert_equal(list(list(p) for p in paths),[[1,2],[1,2]])
+
+def test_all_simple_paths_multigraph_with_cutoff():
+ G = nx.MultiGraph([(1,2),(1,2),(1,10),(10,2)])
+ paths = nx.all_simple_paths(G,1,2, cutoff=1)
+ assert_equal(list(list(p) for p in paths),[[1,2],[1,2]])
+
+
+def test_all_simple_paths_directed():
+ G = nx.DiGraph()
+ G.add_path([1,2,3])
+ G.add_path([3,2,1])
+ paths = nx.all_simple_paths(G,1,3)
+ assert_equal(list(list(p) for p in paths),[[1,2,3]])
+
+def test_all_simple_paths_empty():
+ G = nx.path_graph(4)
+ paths = nx.all_simple_paths(G,0,3,cutoff=2)
+ assert_equal(list(list(p) for p in paths),[])
+
+def hamiltonian_path(G,source):
+ source = next(G.nodes_iter())
+ neighbors = set(G[source])-set([source])
+ n = len(G)
+ for target in neighbors:
+ for path in nx.all_simple_paths(G,source,target):
+ if len(path) == n:
+ yield path
+
+def test_hamiltonian_path():
+ from itertools import permutations
+ G=nx.complete_graph(4)
+ paths = [list(p) for p in hamiltonian_path(G,0)]
+ exact = [[0]+list(p) for p in permutations([1,2,3],3) ]
+ assert_equal(sorted(paths),sorted(exact))
+
+def test_cutoff_zero():
+ G = nx.complete_graph(4)
+ paths = nx.all_simple_paths(G,0,3,cutoff=0)
+ assert_equal(list(list(p) for p in paths),[])
+ paths = nx.all_simple_paths(nx.MultiGraph(G),0,3,cutoff=0)
+ assert_equal(list(list(p) for p in paths),[])
+
+@raises(nx.NetworkXError)
+def test_source_missing():
+ G = nx.Graph()
+ G.add_path([1,2,3])
+ paths = list(nx.all_simple_paths(nx.MultiGraph(G),0,3))
+
+@raises(nx.NetworkXError)
+def test_target_missing():
+ G = nx.Graph()
+ G.add_path([1,2,3])
+ paths = list(nx.all_simple_paths(nx.MultiGraph(G),1,4))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_smetric.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_smetric.py
new file mode 100644
index 0000000..3fb288b
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_smetric.py
@@ -0,0 +1,19 @@
+
+from nose.tools import assert_equal,raises
+
+import networkx as nx
+
+def test_smetric():
+ g = nx.Graph()
+ g.add_edge(1,2)
+ g.add_edge(2,3)
+ g.add_edge(2,4)
+ g.add_edge(1,4)
+ sm = nx.s_metric(g,normalized=False)
+ assert_equal(sm, 19.0)
+# smNorm = nx.s_metric(g,normalized=True)
+# assert_equal(smNorm, 0.95)
+
+@raises(nx.NetworkXError)
+def test_normalized():
+ sm = nx.s_metric(nx.Graph(),normalized=True)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_swap.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_swap.py
new file mode 100644
index 0000000..afa2355
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_swap.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+from nose.tools import *
+from networkx import *
+
+def test_double_edge_swap():
+ graph = barabasi_albert_graph(200,1)
+ degrees = sorted(graph.degree().values())
+ G = double_edge_swap(graph, 40)
+ assert_equal(degrees, sorted(graph.degree().values()))
+
+def test_connected_double_edge_swap():
+ graph = barabasi_albert_graph(200,1)
+ degrees = sorted(graph.degree().values())
+ G = connected_double_edge_swap(graph, 40)
+ assert_true(is_connected(graph))
+ assert_equal(degrees, sorted(graph.degree().values()))
+
+@raises(NetworkXError)
+def test_double_edge_swap_small():
+ G = nx.double_edge_swap(nx.path_graph(3))
+
+@raises(NetworkXError)
+def test_double_edge_swap_tries():
+ G = nx.double_edge_swap(nx.path_graph(10),nswap=1,max_tries=0)
+
+@raises(NetworkXError)
+def test_connected_double_edge_swap_small():
+ G = nx.connected_double_edge_swap(nx.path_graph(3))
+
+@raises(NetworkXError)
+def test_connected_double_edge_swap_not_connected():
+ G = nx.path_graph(3)
+ G.add_path([10,11,12])
+ G = nx.connected_double_edge_swap(G)
+
+
+def test_degree_seq_c4():
+ G = cycle_graph(4)
+ degrees = sorted(G.degree().values())
+ G = double_edge_swap(G,1,100)
+ assert_equal(degrees, sorted(G.degree().values()))
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_vitality.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_vitality.py
new file mode 100644
index 0000000..06f09fe
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/tests/test_vitality.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+class TestVitality:
+
+ def test_closeness_vitality_unweighted(self):
+ G=nx.cycle_graph(3)
+ v=nx.closeness_vitality(G)
+ assert_equal(v,{0:4.0, 1:4.0, 2:4.0})
+ assert_equal(v[0],4.0)
+
+ def test_closeness_vitality_weighted(self):
+ G=nx.Graph()
+ G.add_cycle([0,1,2],weight=2)
+ v=nx.closeness_vitality(G,weight='weight')
+ assert_equal(v,{0:8.0, 1:8.0, 2:8.0})
+
+ def test_closeness_vitality_unweighted_digraph(self):
+ G=nx.DiGraph()
+ G.add_cycle([0,1,2])
+ v=nx.closeness_vitality(G)
+ assert_equal(v,{0:8.0, 1:8.0, 2:8.0})
+
+ def test_closeness_vitality_weighted_digraph(self):
+ G=nx.DiGraph()
+ G.add_cycle([0,1,2],weight=2)
+ v=nx.closeness_vitality(G,weight='weight')
+ assert_equal(v,{0:16.0, 1:16.0, 2:16.0})
+
+ def test_closeness_vitality_weighted_multidigraph(self):
+ G=nx.MultiDiGraph()
+ G.add_cycle([0,1,2],weight=2)
+ v=nx.closeness_vitality(G,weight='weight')
+ assert_equal(v,{0:16.0, 1:16.0, 2:16.0})
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/traversal/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/traversal/__init__.py
new file mode 100644
index 0000000..de558ba
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/traversal/__init__.py
@@ -0,0 +1,4 @@
+import networkx.algorithms.traversal.depth_first_search
+from networkx.algorithms.traversal.depth_first_search import *
+import networkx.algorithms.traversal.breadth_first_search
+from networkx.algorithms.traversal.breadth_first_search import *
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/traversal/breadth_first_search.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/traversal/breadth_first_search.py
new file mode 100644
index 0000000..6ef2985
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/traversal/breadth_first_search.py
@@ -0,0 +1,53 @@
+"""
+====================
+Breadth-first search
+====================
+
+Basic algorithms for breadth-first searching.
+"""
+__author__ = """\n""".join(['Aric Hagberg <hagberg@lanl.gov>'])
+
+__all__ = ['bfs_edges', 'bfs_tree',
+ 'bfs_predecessors', 'bfs_successors']
+
+import networkx as nx
+from collections import defaultdict, deque
+
+def bfs_edges(G, source, reverse=False):
+ """Produce edges in a breadth-first-search starting at source."""
+ # Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
+ # by D. Eppstein, July 2004.
+ if reverse and isinstance(G, nx.DiGraph):
+ neighbors = G.predecessors_iter
+ else:
+ neighbors = G.neighbors_iter
+ visited=set([source])
+ queue = deque([(source, neighbors(source))])
+ while queue:
+ parent, children = queue[0]
+ try:
+ child = next(children)
+ if child not in visited:
+ yield parent, child
+ visited.add(child)
+ queue.append((child, neighbors(child)))
+ except StopIteration:
+ queue.popleft()
+
+def bfs_tree(G, source, reverse=False):
+ """Return directed tree of breadth-first-search from source."""
+ T = nx.DiGraph()
+ T.add_node(source)
+ T.add_edges_from(bfs_edges(G,source,reverse=reverse))
+ return T
+
+def bfs_predecessors(G, source):
+ """Return dictionary of predecessors in breadth-first-search from source."""
+ return dict((t,s) for s,t in bfs_edges(G,source))
+
+def bfs_successors(G, source):
+ """Return dictionary of successors in breadth-first-search from source."""
+ d=defaultdict(list)
+ for s,t in bfs_edges(G,source):
+ d[s].append(t)
+ return dict(d)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/traversal/depth_first_search.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/traversal/depth_first_search.py
new file mode 100644
index 0000000..feb6713
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/traversal/depth_first_search.py
@@ -0,0 +1,124 @@
+"""
+==================
+Depth-first search
+==================
+
+Basic algorithms for depth-first searching.
+
+Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py
+by D. Eppstein, July 2004.
+"""
+__author__ = """\n""".join(['Aric Hagberg <hagberg@lanl.gov>'])
+
+__all__ = ['dfs_edges', 'dfs_tree',
+ 'dfs_predecessors', 'dfs_successors',
+ 'dfs_preorder_nodes','dfs_postorder_nodes',
+ 'dfs_labeled_edges']
+
+import networkx as nx
+from collections import defaultdict
+
+def dfs_edges(G,source=None):
+ """Produce edges in a depth-first-search starting at source."""
+ # Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py
+ # by D. Eppstein, July 2004.
+ if source is None:
+ # produce edges for all components
+ nodes=G
+ else:
+ # produce edges for components with source
+ nodes=[source]
+ visited=set()
+ for start in nodes:
+ if start in visited:
+ continue
+ visited.add(start)
+ stack = [(start,iter(G[start]))]
+ while stack:
+ parent,children = stack[-1]
+ try:
+ child = next(children)
+ if child not in visited:
+ yield parent,child
+ visited.add(child)
+ stack.append((child,iter(G[child])))
+ except StopIteration:
+ stack.pop()
+
+def dfs_tree(G, source):
+ """Return directed tree of depth-first-search from source."""
+ T = nx.DiGraph()
+ if source is None:
+ T.add_nodes_from(G)
+ else:
+ T.add_node(source)
+ T.add_edges_from(dfs_edges(G,source))
+ return T
+
+def dfs_predecessors(G, source=None):
+ """Return dictionary of predecessors in depth-first-search from source."""
+ return dict((t,s) for s,t in dfs_edges(G,source=source))
+
+
+def dfs_successors(G, source=None):
+ """Return dictionary of successors in depth-first-search from source."""
+ d=defaultdict(list)
+ for s,t in dfs_edges(G,source=source):
+ d[s].append(t)
+ return dict(d)
+
+
+def dfs_postorder_nodes(G,source=None):
+ """Produce nodes in a depth-first-search post-ordering starting
+ from source.
+ """
+ post=(v for u,v,d in nx.dfs_labeled_edges(G,source=source)
+ if d['dir']=='reverse')
+ # chain source to end of pre-ordering
+# return chain(post,[source])
+ return post
+
+
+def dfs_preorder_nodes(G,source=None):
+ """Produce nodes in a depth-first-search pre-ordering starting at source."""
+ pre=(v for u,v,d in nx.dfs_labeled_edges(G,source=source)
+ if d['dir']=='forward')
+ # chain source to beginning of pre-ordering
+# return chain([source],pre)
+ return pre
+
+
+def dfs_labeled_edges(G,source=None):
+ """Produce edges in a depth-first-search starting at source and
+ labeled by direction type (forward, reverse, nontree).
+ """
+ # Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py
+ # by D. Eppstein, July 2004.
+ if source is None:
+ # produce edges for all components
+ nodes=G
+ else:
+ # produce edges for components with source
+ nodes=[source]
+ visited=set()
+ for start in nodes:
+ if start in visited:
+ continue
+ yield start,start,{'dir':'forward'}
+ visited.add(start)
+ stack = [(start,iter(G[start]))]
+ while stack:
+ parent,children = stack[-1]
+ try:
+ child = next(children)
+ if child in visited:
+ yield parent,child,{'dir':'nontree'}
+ else:
+ yield parent,child,{'dir':'forward'}
+ visited.add(child)
+ stack.append((child,iter(G[child])))
+ except StopIteration:
+ stack.pop()
+ if stack:
+ yield stack[-1][0],parent,{'dir':'reverse'}
+ yield start,start,{'dir':'reverse'}
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/traversal/tests/test_bfs.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/traversal/tests/test_bfs.py
new file mode 100644
index 0000000..bae2fac
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/traversal/tests/test_bfs.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+class TestBFS:
+
+ def setUp(self):
+ # simple graph
+ G=nx.Graph()
+ G.add_edges_from([(0,1),(1,2),(1,3),(2,4),(3,4)])
+ self.G=G
+
+ def test_successor(self):
+ assert_equal(nx.bfs_successors(self.G,source=0),
+ {0: [1], 1: [2,3], 2:[4]})
+
+ def test_predecessor(self):
+ assert_equal(nx.bfs_predecessors(self.G,source=0),
+ {1: 0, 2: 1, 3: 1, 4: 2})
+
+ def test_bfs_tree(self):
+ T=nx.bfs_tree(self.G,source=0)
+ assert_equal(sorted(T.nodes()),sorted(self.G.nodes()))
+ assert_equal(sorted(T.edges()),[(0, 1), (1, 2), (1, 3), (2, 4)])
+
+ def test_bfs_edges(self):
+ edges=nx.bfs_edges(self.G,source=0)
+ assert_equal(list(edges),[(0, 1), (1, 2), (1, 3), (2, 4)])
+
+ def test_bfs_tree_isolates(self):
+ G = nx.Graph()
+ G.add_node(1)
+ G.add_node(2)
+ T=nx.bfs_tree(G,source=1)
+ assert_equal(sorted(T.nodes()),[1])
+ assert_equal(sorted(T.edges()),[])
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/traversal/tests/test_dfs.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/traversal/tests/test_dfs.py
new file mode 100644
index 0000000..9fad985
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/traversal/tests/test_dfs.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+class TestDFS:
+
+ def setUp(self):
+ # simple graph
+ G=nx.Graph()
+ G.add_edges_from([(0,1),(1,2),(1,3),(2,4),(3,4)])
+ self.G=G
+ # simple graph, disconnected
+ D=nx.Graph()
+ D.add_edges_from([(0,1),(2,3)])
+ self.D=D
+
+
+ def test_preorder_nodes(self):
+ assert_equal(list(nx.dfs_preorder_nodes(self.G,source=0)),
+ [0, 1, 2, 4, 3])
+ assert_equal(list(nx.dfs_preorder_nodes(self.D)),[0, 1, 2, 3])
+
+ def test_postorder_nodes(self):
+ assert_equal(list(nx.dfs_postorder_nodes(self.G,source=0)),
+ [3, 4, 2, 1, 0])
+ assert_equal(list(nx.dfs_postorder_nodes(self.D)),[1, 0, 3, 2])
+
+ def test_successor(self):
+ assert_equal(nx.dfs_successors(self.G,source=0),
+ {0: [1], 1: [2], 2: [4], 4: [3]})
+ assert_equal(nx.dfs_successors(self.D), {0: [1], 2: [3]})
+
+ def test_predecessor(self):
+ assert_equal(nx.dfs_predecessors(self.G,source=0),
+ {1: 0, 2: 1, 3: 4, 4: 2})
+ assert_equal(nx.dfs_predecessors(self.D), {1: 0, 3: 2})
+
+ def test_dfs_tree(self):
+ T=nx.dfs_tree(self.G,source=0)
+ assert_equal(sorted(T.nodes()),sorted(self.G.nodes()))
+ assert_equal(sorted(T.edges()),[(0, 1), (1, 2), (2, 4), (4, 3)])
+
+ def test_dfs_edges(self):
+ edges=nx.dfs_edges(self.G,source=0)
+ assert_equal(list(edges),[(0, 1), (1, 2), (2, 4), (4, 3)])
+ edges=nx.dfs_edges(self.D)
+ assert_equal(list(edges),[(0, 1), (2, 3)])
+
+ def test_dfs_labeled_edges(self):
+ edges=list(nx.dfs_labeled_edges(self.G,source=0))
+ forward=[(u,v) for (u,v,d) in edges if d['dir']=='forward']
+ assert_equal(forward,[(0,0), (0, 1), (1, 2), (2, 4), (4, 3)])
+
+ def test_dfs_labeled_disconnected_edges(self):
+ edges=list(nx.dfs_labeled_edges(self.D))
+ forward=[(u,v) for (u,v,d) in edges if d['dir']=='forward']
+ assert_equal(forward,[(0, 0), (0, 1), (2, 2), (2, 3)])
+
+ def test_dfs_tree_isolates(self):
+ G = nx.Graph()
+ G.add_node(1)
+ G.add_node(2)
+ T=nx.dfs_tree(G,source=1)
+ assert_equal(sorted(T.nodes()),[1])
+ assert_equal(sorted(T.edges()),[])
+ T=nx.dfs_tree(G,source=None)
+ assert_equal(sorted(T.nodes()),[1, 2])
+ assert_equal(sorted(T.edges()),[])
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/vitality.py b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/vitality.py
new file mode 100644
index 0000000..c4db32e
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/algorithms/vitality.py
@@ -0,0 +1,84 @@
+"""
+Vitality measures.
+"""
+# Copyright (C) 2012 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Renato Fabbri'])
+__all__ = ['closeness_vitality']
+
+def weiner_index(G, weight=None):
+ # compute sum of distances between all node pairs
+ # (with optional weights)
+ weiner=0.0
+ if weight is None:
+ for n in G:
+ path_length=nx.single_source_shortest_path_length(G,n)
+ weiner+=sum(path_length.values())
+ else:
+ for n in G:
+ path_length=nx.single_source_dijkstra_path_length(G,
+ n,weight=weight)
+ weiner+=sum(path_length.values())
+ return weiner
+
+
+def closeness_vitality(G, weight=None):
+ """Compute closeness vitality for nodes.
+
+ Closeness vitality of a node is the change in the sum of distances
+ between all node pairs when excluding that node.
+
+ Parameters
+ ----------
+ G : graph
+
+ weight : None or string (optional)
+ The name of the edge attribute used as weight. If None the edge
+ weights are ignored.
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary with nodes as keys and closeness vitality as the value.
+
+ Examples
+ --------
+ >>> G=nx.cycle_graph(3)
+ >>> nx.closeness_vitality(G)
+ {0: 4.0, 1: 4.0, 2: 4.0}
+
+ See Also
+ --------
+ closeness_centrality()
+
+ References
+ ----------
+ .. [1] Ulrik Brandes, Sec. 3.6.2 in
+ Network Analysis: Methodological Foundations, Springer, 2005.
+ http://books.google.com/books?id=TTNhSm7HYrIC
+ """
+ multigraph = G.is_multigraph()
+ wig = weiner_index(G,weight)
+ closeness_vitality = {}
+ for n in G:
+ # remove edges connected to node n and keep list of edges with data
+ # could remove node n but it doesn't count anyway
+ if multigraph:
+ edges = G.edges(n,data=True,keys=True)
+ if G.is_directed():
+ edges += G.in_edges(n,data=True,keys=True)
+ else:
+ edges = G.edges(n,data=True)
+ if G.is_directed():
+ edges += G.in_edges(n,data=True)
+ G.remove_edges_from(edges)
+ closeness_vitality[n] = wig - weiner_index(G,weight)
+ # add edges and data back to graph
+ G.add_edges_from(edges)
+ return closeness_vitality
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/classes/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/classes/__init__.py
new file mode 100644
index 0000000..fa97851
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/classes/__init__.py
@@ -0,0 +1,5 @@
+from networkx.classes.graph import Graph
+from networkx.classes.digraph import DiGraph
+from networkx.classes.multigraph import MultiGraph
+from networkx.classes.multidigraph import MultiDiGraph
+from networkx.classes.function import *
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/classes/digraph.py b/lib/python2.7/site-packages/setoolsgui/networkx/classes/digraph.py
new file mode 100644
index 0000000..37b9f9d
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/classes/digraph.py
@@ -0,0 +1,1236 @@
+"""Base class for directed graphs."""
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+from copy import deepcopy
+import networkx as nx
+from networkx.classes.graph import Graph
+from networkx.exception import NetworkXError
+import networkx.convert as convert
+__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Dan Schult(dschult@colgate.edu)'])
+
+class DiGraph(Graph):
+ """
+ Base class for directed graphs.
+
+ A DiGraph stores nodes and edges with optional data, or attributes.
+
+ DiGraphs hold directed edges. Self loops are allowed but multiple
+ (parallel) edges are not.
+
+ Nodes can be arbitrary (hashable) Python objects with optional
+ key/value attributes.
+
+ Edges are represented as links between nodes with optional
+ key/value attributes.
+
+ Parameters
+ ----------
+ data : input graph
+ Data to initialize graph. If data=None (default) an empty
+ graph is created. The data can be an edge list, or any
+ NetworkX graph object. If the corresponding optional Python
+ packages are installed the data can also be a NumPy matrix
+ or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
+ attr : keyword arguments, optional (default= no attributes)
+ Attributes to add to graph as key=value pairs.
+
+ See Also
+ --------
+ Graph
+ MultiGraph
+ MultiDiGraph
+
+ Examples
+ --------
+ Create an empty graph structure (a "null graph") with no nodes and
+ no edges.
+
+ >>> G = nx.DiGraph()
+
+ G can be grown in several ways.
+
+ **Nodes:**
+
+ Add one node at a time:
+
+ >>> G.add_node(1)
+
+ Add the nodes from any container (a list, dict, set or
+ even the lines from a file or the nodes from another graph).
+
+ >>> G.add_nodes_from([2,3])
+ >>> G.add_nodes_from(range(100,110))
+ >>> H=nx.Graph()
+ >>> H.add_path([0,1,2,3,4,5,6,7,8,9])
+ >>> G.add_nodes_from(H)
+
+ In addition to strings and integers any hashable Python object
+ (except None) can represent a node, e.g. a customized node object,
+ or even another Graph.
+
+ >>> G.add_node(H)
+
+ **Edges:**
+
+ G can also be grown by adding edges.
+
+ Add one edge,
+
+ >>> G.add_edge(1, 2)
+
+ a list of edges,
+
+ >>> G.add_edges_from([(1,2),(1,3)])
+
+ or a collection of edges,
+
+ >>> G.add_edges_from(H.edges())
+
+ If some edges connect nodes not yet in the graph, the nodes
+ are added automatically. There are no errors when adding
+ nodes or edges that already exist.
+
+ **Attributes:**
+
+ Each graph, node, and edge can hold key/value attribute pairs
+ in an associated attribute dictionary (the keys must be hashable).
+ By default these are empty, but can be added or changed using
+ add_edge, add_node or direct manipulation of the attribute
+ dictionaries named graph, node and edge respectively.
+
+ >>> G = nx.DiGraph(day="Friday")
+ >>> G.graph
+ {'day': 'Friday'}
+
+ Add node attributes using add_node(), add_nodes_from() or G.node
+
+ >>> G.add_node(1, time='5pm')
+ >>> G.add_nodes_from([3], time='2pm')
+ >>> G.node[1]
+ {'time': '5pm'}
+ >>> G.node[1]['room'] = 714
+ >>> del G.node[1]['room'] # remove attribute
+ >>> G.nodes(data=True)
+ [(1, {'time': '5pm'}), (3, {'time': '2pm'})]
+
+ Warning: adding a node to G.node does not add it to the graph.
+
+ Add edge attributes using add_edge(), add_edges_from(), subscript
+ notation, or G.edge.
+
+ >>> G.add_edge(1, 2, weight=4.7 )
+ >>> G.add_edges_from([(3,4),(4,5)], color='red')
+ >>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})])
+ >>> G[1][2]['weight'] = 4.7
+ >>> G.edge[1][2]['weight'] = 4
+
+ **Shortcuts:**
+
+ Many common graph features allow python syntax to speed reporting.
+
+ >>> 1 in G # check if node in graph
+ True
+ >>> [n for n in G if n<3] # iterate through nodes
+ [1, 2]
+ >>> len(G) # number of nodes in graph
+ 5
+
+ The fastest way to traverse all edges of a graph is via
+ adjacency_iter(), but the edges() method is often more convenient.
+
+ >>> for n,nbrsdict in G.adjacency_iter():
+ ... for nbr,eattr in nbrsdict.items():
+ ... if 'weight' in eattr:
+ ... (n,nbr,eattr['weight'])
+ (1, 2, 4)
+ (2, 3, 8)
+ >>> [ (u,v,edata['weight']) for u,v,edata in G.edges(data=True) if 'weight' in edata ]
+ [(1, 2, 4), (2, 3, 8)]
+
+ **Reporting:**
+
+ Simple graph information is obtained using methods.
+ Iterator versions of many reporting methods exist for efficiency.
+ Methods exist for reporting nodes(), edges(), neighbors() and degree()
+ as well as the number of nodes and edges.
+
+ For details on these and other miscellaneous methods, see below.
+ """
+ def __init__(self, data=None, **attr):
+ """Initialize a graph with edges, name, graph attributes.
+
+ Parameters
+ ----------
+ data : input graph
+ Data to initialize graph. If data=None (default) an empty
+ graph is created. The data can be an edge list, or any
+ NetworkX graph object. If the corresponding optional Python
+ packages are installed the data can also be a NumPy matrix
+ or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
+ name : string, optional (default='')
+ An optional name for the graph.
+ attr : keyword arguments, optional (default= no attributes)
+ Attributes to add to graph as key=value pairs.
+
+ See Also
+ --------
+ convert
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G = nx.Graph(name='my graph')
+ >>> e = [(1,2),(2,3),(3,4)] # list of edges
+ >>> G = nx.Graph(e)
+
+ Arbitrary graph attribute pairs (key=value) may be assigned
+
+ >>> G=nx.Graph(e, day="Friday")
+ >>> G.graph
+ {'day': 'Friday'}
+
+ """
+ self.graph = {} # dictionary for graph attributes
+ self.node = {} # dictionary for node attributes
+ # We store two adjacency lists:
+ # the predecessors of node n are stored in the dict self.pred
+ # the successors of node n are stored in the dict self.succ=self.adj
+ self.adj = {} # empty adjacency dictionary
+ self.pred = {} # predecessor
+ self.succ = self.adj # successor
+
+ # attempt to load graph with data
+ if data is not None:
+ convert.to_networkx_graph(data,create_using=self)
+ # load graph attributes (must be after convert)
+ self.graph.update(attr)
+ self.edge=self.adj
+
+
+ def add_node(self, n, attr_dict=None, **attr):
+ """Add a single node n and update node attributes.
+
+ Parameters
+ ----------
+ n : node
+ A node can be any hashable Python object except None.
+ attr_dict : dictionary, optional (default= no attributes)
+ Dictionary of node attributes. Key/value pairs will
+ update existing data associated with the node.
+ attr : keyword arguments, optional
+ Set or change attributes using key=value.
+
+ See Also
+ --------
+ add_nodes_from
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_node(1)
+ >>> G.add_node('Hello')
+ >>> K3 = nx.Graph([(0,1),(1,2),(2,0)])
+ >>> G.add_node(K3)
+ >>> G.number_of_nodes()
+ 3
+
+ Use keywords set/change node attributes:
+
+ >>> G.add_node(1,size=10)
+ >>> G.add_node(3,weight=0.4,UTM=('13S',382871,3972649))
+
+ Notes
+ -----
+ A hashable object is one that can be used as a key in a Python
+ dictionary. This includes strings, numbers, tuples of strings
+ and numbers, etc.
+
+ On many platforms hashable items also include mutables such as
+ NetworkX Graphs, though one should be careful that the hash
+ doesn't change on mutables.
+ """
+ # set up attribute dict
+ if attr_dict is None:
+ attr_dict=attr
+ else:
+ try:
+ attr_dict.update(attr)
+ except AttributeError:
+ raise NetworkXError(\
+ "The attr_dict argument must be a dictionary.")
+ if n not in self.succ:
+ self.succ[n] = {}
+ self.pred[n] = {}
+ self.node[n] = attr_dict
+ else: # update attr even if node already exists
+ self.node[n].update(attr_dict)
+
+
+ def add_nodes_from(self, nodes, **attr):
+ """Add multiple nodes.
+
+ Parameters
+ ----------
+ nodes : iterable container
+ A container of nodes (list, dict, set, etc.).
+ OR
+ A container of (node, attribute dict) tuples.
+ Node attributes are updated using the attribute dict.
+ attr : keyword arguments, optional (default= no attributes)
+ Update attributes for all nodes in nodes.
+ Node attributes specified in nodes as a tuple
+ take precedence over attributes specified generally.
+
+ See Also
+ --------
+ add_node
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_nodes_from('Hello')
+ >>> K3 = nx.Graph([(0,1),(1,2),(2,0)])
+ >>> G.add_nodes_from(K3)
+ >>> sorted(G.nodes(),key=str)
+ [0, 1, 2, 'H', 'e', 'l', 'o']
+
+ Use keywords to update specific node attributes for every node.
+
+ >>> G.add_nodes_from([1,2], size=10)
+ >>> G.add_nodes_from([3,4], weight=0.4)
+
+ Use (node, attrdict) tuples to update attributes for specific
+ nodes.
+
+ >>> G.add_nodes_from([(1,dict(size=11)), (2,{'color':'blue'})])
+ >>> G.node[1]['size']
+ 11
+ >>> H = nx.Graph()
+ >>> H.add_nodes_from(G.nodes(data=True))
+ >>> H.node[1]['size']
+ 11
+
+ """
+ for n in nodes:
+ try:
+ newnode=n not in self.succ
+ except TypeError:
+ nn,ndict = n
+ if nn not in self.succ:
+ self.succ[nn] = {}
+ self.pred[nn] = {}
+ newdict = attr.copy()
+ newdict.update(ndict)
+ self.node[nn] = newdict
+ else:
+ olddict = self.node[nn]
+ olddict.update(attr)
+ olddict.update(ndict)
+ continue
+ if newnode:
+ self.succ[n] = {}
+ self.pred[n] = {}
+ self.node[n] = attr.copy()
+ else:
+ self.node[n].update(attr)
+
+ def remove_node(self, n):
+ """Remove node n.
+
+ Removes the node n and all adjacent edges.
+ Attempting to remove a non-existent node will raise an exception.
+
+ Parameters
+ ----------
+ n : node
+ A node in the graph
+
+ Raises
+ -------
+ NetworkXError
+ If n is not in the graph.
+
+ See Also
+ --------
+ remove_nodes_from
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2])
+ >>> G.edges()
+ [(0, 1), (1, 2)]
+ >>> G.remove_node(1)
+ >>> G.edges()
+ []
+
+ """
+ try:
+ nbrs=self.succ[n]
+ del self.node[n]
+ except KeyError: # NetworkXError if n not in self
+ raise NetworkXError("The node %s is not in the digraph."%(n,))
+ for u in nbrs:
+ del self.pred[u][n] # remove all edges n-u in digraph
+ del self.succ[n] # remove node from succ
+ for u in self.pred[n]:
+ del self.succ[u][n] # remove all edges n-u in digraph
+ del self.pred[n] # remove node from pred
+
+
+ def remove_nodes_from(self, nbunch):
+ """Remove multiple nodes.
+
+ Parameters
+ ----------
+ nodes : iterable container
+ A container of nodes (list, dict, set, etc.). If a node
+ in the container is not in the graph it is silently
+ ignored.
+
+ See Also
+ --------
+ remove_node
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2])
+ >>> e = G.nodes()
+ >>> e
+ [0, 1, 2]
+ >>> G.remove_nodes_from(e)
+ >>> G.nodes()
+ []
+
+ """
+ for n in nbunch:
+ try:
+ succs=self.succ[n]
+ del self.node[n]
+ for u in succs:
+ del self.pred[u][n] # remove all edges n-u in digraph
+ del self.succ[n] # now remove node
+ for u in self.pred[n]:
+ del self.succ[u][n] # remove all edges n-u in digraph
+ del self.pred[n] # now remove node
+ except KeyError:
+ pass # silent failure on remove
+
+
+ def add_edge(self, u, v, attr_dict=None, **attr):
+ """Add an edge between u and v.
+
+ The nodes u and v will be automatically added if they are
+ not already in the graph.
+
+ Edge attributes can be specified with keywords or by providing
+ a dictionary with key/value pairs. See examples below.
+
+ Parameters
+ ----------
+ u,v : nodes
+ Nodes can be, for example, strings or numbers.
+ Nodes must be hashable (and not None) Python objects.
+ attr_dict : dictionary, optional (default= no attributes)
+ Dictionary of edge attributes. Key/value pairs will
+ update existing data associated with the edge.
+ attr : keyword arguments, optional
+ Edge data (or labels or objects) can be assigned using
+ keyword arguments.
+
+ See Also
+ --------
+ add_edges_from : add a collection of edges
+
+ Notes
+ -----
+ Adding an edge that already exists updates the edge data.
+
+ Many NetworkX algorithms designed for weighted graphs use as
+ the edge weight a numerical value assigned to a keyword
+ which by default is 'weight'.
+
+ Examples
+ --------
+ The following all add the edge e=(1,2) to graph G:
+
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> e = (1,2)
+ >>> G.add_edge(1, 2) # explicit two-node form
+ >>> G.add_edge(*e) # single edge as tuple of two nodes
+ >>> G.add_edges_from( [(1,2)] ) # add edges from iterable container
+
+ Associate data to edges using keywords:
+
+ >>> G.add_edge(1, 2, weight=3)
+ >>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
+ """
+ # set up attribute dict
+ if attr_dict is None:
+ attr_dict=attr
+ else:
+ try:
+ attr_dict.update(attr)
+ except AttributeError:
+ raise NetworkXError(\
+ "The attr_dict argument must be a dictionary.")
+ # add nodes
+ if u not in self.succ:
+ self.succ[u]={}
+ self.pred[u]={}
+ self.node[u] = {}
+ if v not in self.succ:
+ self.succ[v]={}
+ self.pred[v]={}
+ self.node[v] = {}
+ # add the edge
+ datadict=self.adj[u].get(v,{})
+ datadict.update(attr_dict)
+ self.succ[u][v]=datadict
+ self.pred[v][u]=datadict
+
+ def add_edges_from(self, ebunch, attr_dict=None, **attr):
+ """Add all the edges in ebunch.
+
+ Parameters
+ ----------
+ ebunch : container of edges
+ Each edge given in the container will be added to the
+ graph. The edges must be given as as 2-tuples (u,v) or
+ 3-tuples (u,v,d) where d is a dictionary containing edge
+ data.
+ attr_dict : dictionary, optional (default= no attributes)
+ Dictionary of edge attributes. Key/value pairs will
+ update existing data associated with each edge.
+ attr : keyword arguments, optional
+ Edge data (or labels or objects) can be assigned using
+ keyword arguments.
+
+
+ See Also
+ --------
+ add_edge : add a single edge
+ add_weighted_edges_from : convenient way to add weighted edges
+
+ Notes
+ -----
+ Adding the same edge twice has no effect but any edge data
+ will be updated when each duplicate edge is added.
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_edges_from([(0,1),(1,2)]) # using a list of edge tuples
+ >>> e = zip(range(0,3),range(1,4))
+ >>> G.add_edges_from(e) # Add the path graph 0-1-2-3
+
+ Associate data to edges
+
+ >>> G.add_edges_from([(1,2),(2,3)], weight=3)
+ >>> G.add_edges_from([(3,4),(1,4)], label='WN2898')
+ """
+ # set up attribute dict
+ if attr_dict is None:
+ attr_dict=attr
+ else:
+ try:
+ attr_dict.update(attr)
+ except AttributeError:
+ raise NetworkXError(\
+ "The attr_dict argument must be a dict.")
+ # process ebunch
+ for e in ebunch:
+ ne = len(e)
+ if ne==3:
+ u,v,dd = e
+ assert hasattr(dd,"update")
+ elif ne==2:
+ u,v = e
+ dd = {}
+ else:
+ raise NetworkXError(\
+ "Edge tuple %s must be a 2-tuple or 3-tuple."%(e,))
+ if u not in self.succ:
+ self.succ[u] = {}
+ self.pred[u] = {}
+ self.node[u] = {}
+ if v not in self.succ:
+ self.succ[v] = {}
+ self.pred[v] = {}
+ self.node[v] = {}
+ datadict=self.adj[u].get(v,{})
+ datadict.update(attr_dict)
+ datadict.update(dd)
+ self.succ[u][v] = datadict
+ self.pred[v][u] = datadict
+
+
+ def remove_edge(self, u, v):
+ """Remove the edge between u and v.
+
+ Parameters
+ ----------
+ u,v: nodes
+ Remove the edge between nodes u and v.
+
+ Raises
+ ------
+ NetworkXError
+ If there is not an edge between u and v.
+
+ See Also
+ --------
+ remove_edges_from : remove a collection of edges
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> G.remove_edge(0,1)
+ >>> e = (1,2)
+ >>> G.remove_edge(*e) # unpacks e from an edge tuple
+ >>> e = (2,3,{'weight':7}) # an edge with attribute data
+ >>> G.remove_edge(*e[:2]) # select first part of edge tuple
+ """
+ try:
+ del self.succ[u][v]
+ del self.pred[v][u]
+ except KeyError:
+ raise NetworkXError("The edge %s-%s not in graph."%(u,v))
+
+
+ def remove_edges_from(self, ebunch):
+ """Remove all edges specified in ebunch.
+
+ Parameters
+ ----------
+ ebunch: list or container of edge tuples
+ Each edge given in the list or container will be removed
+ from the graph. The edges can be:
+
+ - 2-tuples (u,v) edge between u and v.
+ - 3-tuples (u,v,k) where k is ignored.
+
+ See Also
+ --------
+ remove_edge : remove a single edge
+
+ Notes
+ -----
+ Will fail silently if an edge in ebunch is not in the graph.
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> ebunch=[(1,2),(2,3)]
+ >>> G.remove_edges_from(ebunch)
+ """
+ for e in ebunch:
+ (u,v)=e[:2] # ignore edge data
+ if u in self.succ and v in self.succ[u]:
+ del self.succ[u][v]
+ del self.pred[v][u]
+
+
+ def has_successor(self, u, v):
+ """Return True if node u has successor v.
+
+ This is true if graph has the edge u->v.
+ """
+ return (u in self.succ and v in self.succ[u])
+
+ def has_predecessor(self, u, v):
+ """Return True if node u has predecessor v.
+
+ This is true if graph has the edge u<-v.
+ """
+ return (u in self.pred and v in self.pred[u])
+
+ def successors_iter(self,n):
+ """Return an iterator over successor nodes of n.
+
+ neighbors_iter() and successors_iter() are the same.
+ """
+ try:
+ return iter(self.succ[n])
+ except KeyError:
+ raise NetworkXError("The node %s is not in the digraph."%(n,))
+
+ def predecessors_iter(self,n):
+ """Return an iterator over predecessor nodes of n."""
+ try:
+ return iter(self.pred[n])
+ except KeyError:
+ raise NetworkXError("The node %s is not in the digraph."%(n,))
+
+ def successors(self, n):
+ """Return a list of successor nodes of n.
+
+ neighbors() and successors() are the same function.
+ """
+ return list(self.successors_iter(n))
+
+ def predecessors(self, n):
+ """Return a list of predecessor nodes of n."""
+ return list(self.predecessors_iter(n))
+
+
+ # digraph definitions
+ neighbors = successors
+ neighbors_iter = successors_iter
+
+ def edges_iter(self, nbunch=None, data=False):
+ """Return an iterator over the edges.
+
+ Edges are returned as tuples with optional data
+ in the order (node, neighbor, data).
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default= all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+ data : bool, optional (default=False)
+ If True, return edge attribute dict in 3-tuple (u,v,data).
+
+ Returns
+ -------
+ edge_iter : iterator
+ An iterator of (u,v) or (u,v,d) tuples of edges.
+
+ See Also
+ --------
+ edges : return a list of edges
+
+ Notes
+ -----
+ Nodes in nbunch that are not in the graph will be (quietly) ignored.
+ For directed graphs this returns the out-edges.
+
+ Examples
+ --------
+ >>> G = nx.DiGraph() # or MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> [e for e in G.edges_iter()]
+ [(0, 1), (1, 2), (2, 3)]
+ >>> list(G.edges_iter(data=True)) # default data is {} (empty dict)
+ [(0, 1, {}), (1, 2, {}), (2, 3, {})]
+ >>> list(G.edges_iter([0,2]))
+ [(0, 1), (2, 3)]
+ >>> list(G.edges_iter(0))
+ [(0, 1)]
+
+ """
+ if nbunch is None:
+ nodes_nbrs=self.adj.items()
+ else:
+ nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch))
+ if data:
+ for n,nbrs in nodes_nbrs:
+ for nbr,data in nbrs.items():
+ yield (n,nbr,data)
+ else:
+ for n,nbrs in nodes_nbrs:
+ for nbr in nbrs:
+ yield (n,nbr)
+
+ # alias out_edges to edges
+ out_edges_iter=edges_iter
+ out_edges=Graph.edges
+
+ def in_edges_iter(self, nbunch=None, data=False):
+ """Return an iterator over the incoming edges.
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default= all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+ data : bool, optional (default=False)
+ If True, return edge attribute dict in 3-tuple (u,v,data).
+
+ Returns
+ -------
+ in_edge_iter : iterator
+ An iterator of (u,v) or (u,v,d) tuples of incoming edges.
+
+ See Also
+ --------
+ edges_iter : return an iterator of edges
+ """
+ if nbunch is None:
+ nodes_nbrs=self.pred.items()
+ else:
+ nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch))
+ if data:
+ for n,nbrs in nodes_nbrs:
+ for nbr,data in nbrs.items():
+ yield (nbr,n,data)
+ else:
+ for n,nbrs in nodes_nbrs:
+ for nbr in nbrs:
+ yield (nbr,n)
+
+ def in_edges(self, nbunch=None, data=False):
+ """Return a list of the incoming edges.
+
+ See Also
+ --------
+ edges : return a list of edges
+ """
+ return list(self.in_edges_iter(nbunch, data))
+
+ def degree_iter(self, nbunch=None, weight=None):
+ """Return an iterator for (node, degree).
+
+ The node degree is the number of edges adjacent to the node.
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default=all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+
+ weight : string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ Returns
+ -------
+ nd_iter : an iterator
+ The iterator returns two-tuples of (node, degree).
+
+ See Also
+ --------
+ degree, in_degree, out_degree, in_degree_iter, out_degree_iter
+
+ Examples
+ --------
+ >>> G = nx.DiGraph() # or MultiDiGraph
+ >>> G.add_path([0,1,2,3])
+ >>> list(G.degree_iter(0)) # node 0 with degree 1
+ [(0, 1)]
+ >>> list(G.degree_iter([0,1]))
+ [(0, 1), (1, 2)]
+
+ """
+ if nbunch is None:
+ nodes_nbrs=zip(iter(self.succ.items()),iter(self.pred.items()))
+ else:
+ nodes_nbrs=zip(
+ ((n,self.succ[n]) for n in self.nbunch_iter(nbunch)),
+ ((n,self.pred[n]) for n in self.nbunch_iter(nbunch)))
+
+ if weight is None:
+ for (n,succ),(n2,pred) in nodes_nbrs:
+ yield (n,len(succ)+len(pred))
+ else:
+ # edge weighted graph - degree is sum of edge weights
+ for (n,succ),(n2,pred) in nodes_nbrs:
+ yield (n,
+ sum((succ[nbr].get(weight,1) for nbr in succ))+
+ sum((pred[nbr].get(weight,1) for nbr in pred)))
+
+
+ def in_degree_iter(self, nbunch=None, weight=None):
+ """Return an iterator for (node, in-degree).
+
+ The node in-degree is the number of edges pointing in to the node.
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default=all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+
+ weight : string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ Returns
+ -------
+ nd_iter : an iterator
+ The iterator returns two-tuples of (node, in-degree).
+
+ See Also
+ --------
+ degree, in_degree, out_degree, out_degree_iter
+
+ Examples
+ --------
+ >>> G = nx.DiGraph()
+ >>> G.add_path([0,1,2,3])
+ >>> list(G.in_degree_iter(0)) # node 0 with degree 0
+ [(0, 0)]
+ >>> list(G.in_degree_iter([0,1]))
+ [(0, 0), (1, 1)]
+
+ """
+ if nbunch is None:
+ nodes_nbrs=self.pred.items()
+ else:
+ nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch))
+
+ if weight is None:
+ for n,nbrs in nodes_nbrs:
+ yield (n,len(nbrs))
+ else:
+ # edge weighted graph - degree is sum of edge weights
+ for n,nbrs in nodes_nbrs:
+ yield (n, sum(data.get(weight,1) for data in nbrs.values()))
+
+
+ def out_degree_iter(self, nbunch=None, weight=None):
+ """Return an iterator for (node, out-degree).
+
+ The node out-degree is the number of edges pointing out of the node.
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default=all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+
+ weight : string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ Returns
+ -------
+ nd_iter : an iterator
+ The iterator returns two-tuples of (node, out-degree).
+
+ See Also
+ --------
+ degree, in_degree, out_degree, in_degree_iter
+
+ Examples
+ --------
+ >>> G = nx.DiGraph()
+ >>> G.add_path([0,1,2,3])
+ >>> list(G.out_degree_iter(0)) # node 0 with degree 1
+ [(0, 1)]
+ >>> list(G.out_degree_iter([0,1]))
+ [(0, 1), (1, 1)]
+
+ """
+ if nbunch is None:
+ nodes_nbrs=self.succ.items()
+ else:
+ nodes_nbrs=((n,self.succ[n]) for n in self.nbunch_iter(nbunch))
+
+ if weight is None:
+ for n,nbrs in nodes_nbrs:
+ yield (n,len(nbrs))
+ else:
+ # edge weighted graph - degree is sum of edge weights
+ for n,nbrs in nodes_nbrs:
+ yield (n, sum(data.get(weight,1) for data in nbrs.values()))
+
+
+ def in_degree(self, nbunch=None, weight=None):
+ """Return the in-degree of a node or nodes.
+
+ The node in-degree is the number of edges pointing in to the node.
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default=all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+
+ weight : string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ Returns
+ -------
+ nd : dictionary, or number
+ A dictionary with nodes as keys and in-degree as values or
+ a number if a single node is specified.
+
+ See Also
+ --------
+ degree, out_degree, in_degree_iter
+
+ Examples
+ --------
+ >>> G = nx.DiGraph() # or MultiDiGraph
+ >>> G.add_path([0,1,2,3])
+ >>> G.in_degree(0)
+ 0
+ >>> G.in_degree([0,1])
+ {0: 0, 1: 1}
+ >>> list(G.in_degree([0,1]).values())
+ [0, 1]
+ """
+ if nbunch in self: # return a single node
+ return next(self.in_degree_iter(nbunch,weight))[1]
+ else: # return a dict
+ return dict(self.in_degree_iter(nbunch,weight))
+
+ def out_degree(self, nbunch=None, weight=None):
+ """Return the out-degree of a node or nodes.
+
+ The node out-degree is the number of edges pointing out of the node.
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default=all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+
+ weight : string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ Returns
+ -------
+ nd : dictionary, or number
+ A dictionary with nodes as keys and out-degree as values or
+ a number if a single node is specified.
+
+ Examples
+ --------
+ >>> G = nx.DiGraph() # or MultiDiGraph
+ >>> G.add_path([0,1,2,3])
+ >>> G.out_degree(0)
+ 1
+ >>> G.out_degree([0,1])
+ {0: 1, 1: 1}
+ >>> list(G.out_degree([0,1]).values())
+ [1, 1]
+
+
+ """
+ if nbunch in self: # return a single node
+ return next(self.out_degree_iter(nbunch,weight))[1]
+ else: # return a dict
+ return dict(self.out_degree_iter(nbunch,weight))
+
+ def clear(self):
+ """Remove all nodes and edges from the graph.
+
+ This also removes the name, and all graph, node, and edge attributes.
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> G.clear()
+ >>> G.nodes()
+ []
+ >>> G.edges()
+ []
+
+ """
+ self.succ.clear()
+ self.pred.clear()
+ self.node.clear()
+ self.graph.clear()
+
+
+ def is_multigraph(self):
+ """Return True if graph is a multigraph, False otherwise."""
+ return False
+
+
+ def is_directed(self):
+ """Return True if graph is directed, False otherwise."""
+ return True
+
+ def to_directed(self):
+ """Return a directed copy of the graph.
+
+ Returns
+ -------
+ G : DiGraph
+ A deepcopy of the graph.
+
+ Notes
+ -----
+ This returns a "deepcopy" of the edge, node, and
+ graph attributes which attempts to completely copy
+ all of the data and references.
+
+ This is in contrast to the similar D=DiGraph(G) which returns a
+ shallow copy of the data.
+
+ See the Python copy module for more information on shallow
+ and deep copies, http://docs.python.org/library/copy.html.
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or MultiGraph, etc
+ >>> G.add_path([0,1])
+ >>> H = G.to_directed()
+ >>> H.edges()
+ [(0, 1), (1, 0)]
+
+ If already directed, return a (deep) copy
+
+ >>> G = nx.DiGraph() # or MultiDiGraph, etc
+ >>> G.add_path([0,1])
+ >>> H = G.to_directed()
+ >>> H.edges()
+ [(0, 1)]
+ """
+ return deepcopy(self)
+
+ def to_undirected(self, reciprocal=False):
+ """Return an undirected representation of the digraph.
+
+ Parameters
+ ----------
+ reciprocal : bool (optional)
+ If True only keep edges that appear in both directions
+ in the original digraph.
+
+ Returns
+ -------
+ G : Graph
+ An undirected graph with the same name and nodes and
+ with edge (u,v,data) if either (u,v,data) or (v,u,data)
+ is in the digraph. If both edges exist in digraph and
+ their edge data is different, only one edge is created
+ with an arbitrary choice of which edge data to use.
+ You must check and correct for this manually if desired.
+
+ Notes
+ -----
+ If edges in both directions (u,v) and (v,u) exist in the
+ graph, attributes for the new undirected edge will be a combination of
+ the attributes of the directed edges. The edge data is updated
+ in the (arbitrary) order that the edges are encountered. For
+ more customized control of the edge attributes use add_edge().
+
+ This returns a "deepcopy" of the edge, node, and
+ graph attributes which attempts to completely copy
+ all of the data and references.
+
+ This is in contrast to the similar G=DiGraph(D) which returns a
+ shallow copy of the data.
+
+ See the Python copy module for more information on shallow
+ and deep copies, http://docs.python.org/library/copy.html.
+ """
+ H=Graph()
+ H.name=self.name
+ H.add_nodes_from(self)
+ if reciprocal is True:
+ H.add_edges_from( (u,v,deepcopy(d))
+ for u,nbrs in self.adjacency_iter()
+ for v,d in nbrs.items()
+ if v in self.pred[u])
+ else:
+ H.add_edges_from( (u,v,deepcopy(d))
+ for u,nbrs in self.adjacency_iter()
+ for v,d in nbrs.items() )
+ H.graph=deepcopy(self.graph)
+ H.node=deepcopy(self.node)
+ return H
+
+
+ def reverse(self, copy=True):
+ """Return the reverse of the graph.
+
+ The reverse is a graph with the same nodes and edges
+ but with the directions of the edges reversed.
+
+ Parameters
+ ----------
+ copy : bool optional (default=True)
+ If True, return a new DiGraph holding the reversed edges.
+ If False, reverse the reverse graph is created using
+ the original graph (this changes the original graph).
+ """
+ if copy:
+ H = self.__class__(name="Reverse of (%s)"%self.name)
+ H.add_nodes_from(self)
+ H.add_edges_from( (v,u,deepcopy(d)) for u,v,d
+ in self.edges(data=True) )
+ H.graph=deepcopy(self.graph)
+ H.node=deepcopy(self.node)
+ else:
+ self.pred,self.succ=self.succ,self.pred
+ self.adj=self.succ
+ H=self
+ return H
+
+
+ def subgraph(self, nbunch):
+ """Return the subgraph induced on nodes in nbunch.
+
+ The induced subgraph of the graph contains the nodes in nbunch
+ and the edges between those nodes.
+
+ Parameters
+ ----------
+ nbunch : list, iterable
+ A container of nodes which will be iterated through once.
+
+ Returns
+ -------
+ G : Graph
+ A subgraph of the graph with the same edge attributes.
+
+ Notes
+ -----
+ The graph, edge or node attributes just point to the original graph.
+ So changes to the node or edge structure will not be reflected in
+ the original graph while changes to the attributes will.
+
+ To create a subgraph with its own copy of the edge/node attributes use:
+ nx.Graph(G.subgraph(nbunch))
+
+ If edge attributes are containers, a deep copy can be obtained using:
+ G.subgraph(nbunch).copy()
+
+ For an inplace reduction of a graph to a subgraph you can remove nodes:
+ G.remove_nodes_from([ n in G if n not in set(nbunch)])
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> H = G.subgraph([0,1,2])
+ >>> H.edges()
+ [(0, 1), (1, 2)]
+ """
+ bunch = self.nbunch_iter(nbunch)
+ # create new graph and copy subgraph into it
+ H = self.__class__()
+ # copy node and attribute dictionaries
+ for n in bunch:
+ H.node[n]=self.node[n]
+ # namespace shortcuts for speed
+ H_succ=H.succ
+ H_pred=H.pred
+ self_succ=self.succ
+ # add nodes
+ for n in H:
+ H_succ[n]={}
+ H_pred[n]={}
+ # add edges
+ for u in H_succ:
+ Hnbrs=H_succ[u]
+ for v,datadict in self_succ[u].items():
+ if v in H_succ:
+ # add both representations of edge: u-v and v-u
+ Hnbrs[v]=datadict
+ H_pred[v][u]=datadict
+ H.graph=self.graph
+ return H
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/classes/function.py b/lib/python2.7/site-packages/setoolsgui/networkx/classes/function.py
new file mode 100644
index 0000000..0c5e208
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/classes/function.py
@@ -0,0 +1,423 @@
+"""Functional interface to graph methods and assorted utilities.
+"""
+# Copyright (C) 2004-2012 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+#
+import networkx as nx
+import itertools
+__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Dan Schult(dschult@colgate.edu)'])
+__all__ = ['nodes', 'edges', 'degree', 'degree_histogram', 'neighbors',
+ 'number_of_nodes', 'number_of_edges', 'density',
+ 'nodes_iter', 'edges_iter', 'is_directed','info',
+ 'freeze','is_frozen','subgraph','create_empty_copy',
+ 'set_node_attributes','get_node_attributes',
+ 'set_edge_attributes','get_edge_attributes',
+ 'all_neighbors','non_neighbors']
+
+def nodes(G):
+ """Return a copy of the graph nodes in a list."""
+ return G.nodes()
+
+def nodes_iter(G):
+ """Return an iterator over the graph nodes."""
+ return G.nodes_iter()
+
+def edges(G,nbunch=None):
+ """Return list of edges adjacent to nodes in nbunch.
+
+ Return all edges if nbunch is unspecified or nbunch=None.
+
+ For digraphs, edges=out_edges
+ """
+ return G.edges(nbunch)
+
+def edges_iter(G,nbunch=None):
+ """Return iterator over edges adjacent to nodes in nbunch.
+
+ Return all edges if nbunch is unspecified or nbunch=None.
+
+ For digraphs, edges=out_edges
+ """
+ return G.edges_iter(nbunch)
+
+def degree(G,nbunch=None,weight=None):
+ """Return degree of single node or of nbunch of nodes.
+ If nbunch is ommitted, then return degrees of *all* nodes.
+ """
+ return G.degree(nbunch,weight)
+
+def neighbors(G,n):
+ """Return a list of nodes connected to node n. """
+ return G.neighbors(n)
+
+def number_of_nodes(G):
+ """Return the number of nodes in the graph."""
+ return G.number_of_nodes()
+
+def number_of_edges(G):
+ """Return the number of edges in the graph. """
+ return G.number_of_edges()
+
+def density(G):
+ r"""Return the density of a graph.
+
+ The density for undirected graphs is
+
+ .. math::
+
+ d = \frac{2m}{n(n-1)},
+
+ and for directed graphs is
+
+ .. math::
+
+ d = \frac{m}{n(n-1)},
+
+ where `n` is the number of nodes and `m` is the number of edges in `G`.
+
+ Notes
+ -----
+ The density is 0 for a graph without edges and 1 for a complete graph.
+ The density of multigraphs can be higher than 1.
+
+ Self loops are counted in the total number of edges so graphs with self
+ loops can have density higher than 1.
+ """
+ n=number_of_nodes(G)
+ m=number_of_edges(G)
+ if m==0 or n <= 1:
+ d=0.0
+ else:
+ if G.is_directed():
+ d=m/float(n*(n-1))
+ else:
+ d= m*2.0/float(n*(n-1))
+ return d
+
+def degree_histogram(G):
+ """Return a list of the frequency of each degree value.
+
+ Parameters
+ ----------
+ G : Networkx graph
+ A graph
+
+ Returns
+ -------
+ hist : list
+ A list of frequencies of degrees.
+ The degree values are the index in the list.
+
+ Notes
+ -----
+ Note: the bins are width one, hence len(list) can be large
+ (Order(number_of_edges))
+ """
+ degseq=list(G.degree().values())
+ dmax=max(degseq)+1
+ freq= [ 0 for d in range(dmax) ]
+ for d in degseq:
+ freq[d] += 1
+ return freq
+
+def is_directed(G):
+ """ Return True if graph is directed."""
+ return G.is_directed()
+
+
+def freeze(G):
+ """Modify graph to prevent further change by adding or removing
+ nodes or edges.
+
+ Node and edge data can still be modified.
+
+ Parameters
+ -----------
+ G : graph
+ A NetworkX graph
+
+ Examples
+ --------
+ >>> G=nx.Graph()
+ >>> G.add_path([0,1,2,3])
+ >>> G=nx.freeze(G)
+ >>> try:
+ ... G.add_edge(4,5)
+ ... except nx.NetworkXError as e:
+ ... print(str(e))
+ Frozen graph can't be modified
+
+ Notes
+ -----
+ To "unfreeze" a graph you must make a copy by creating a new graph object:
+
+ >>> graph = nx.path_graph(4)
+ >>> frozen_graph = nx.freeze(graph)
+ >>> unfrozen_graph = nx.Graph(frozen_graph)
+ >>> nx.is_frozen(unfrozen_graph)
+ False
+
+ See Also
+ --------
+ is_frozen
+ """
+ def frozen(*args):
+ raise nx.NetworkXError("Frozen graph can't be modified")
+ G.add_node=frozen
+ G.add_nodes_from=frozen
+ G.remove_node=frozen
+ G.remove_nodes_from=frozen
+ G.add_edge=frozen
+ G.add_edges_from=frozen
+ G.remove_edge=frozen
+ G.remove_edges_from=frozen
+ G.clear=frozen
+ G.frozen=True
+ return G
+
+def is_frozen(G):
+ """Return True if graph is frozen.
+
+ Parameters
+ -----------
+ G : graph
+ A NetworkX graph
+
+ See Also
+ --------
+ freeze
+ """
+ try:
+ return G.frozen
+ except AttributeError:
+ return False
+
+def subgraph(G, nbunch):
+ """Return the subgraph induced on nodes in nbunch.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ nbunch : list, iterable
+ A container of nodes that will be iterated through once (thus
+ it should be an iterator or be iterable). Each element of the
+ container should be a valid node type: any hashable type except
+ None. If nbunch is None, return all edges data in the graph.
+ Nodes in nbunch that are not in the graph will be (quietly)
+ ignored.
+
+ Notes
+ -----
+ subgraph(G) calls G.subgraph()
+ """
+ return G.subgraph(nbunch)
+
+def create_empty_copy(G,with_nodes=True):
+ """Return a copy of the graph G with all of the edges removed.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ with_nodes : bool (default=True)
+ Include nodes.
+
+ Notes
+ -----
+ Graph, node, and edge data is not propagated to the new graph.
+ """
+ H=G.__class__()
+ if with_nodes:
+ H.add_nodes_from(G)
+ return H
+
+
+def info(G, n=None):
+ """Print short summary of information for the graph G or the node n.
+
+ Parameters
+ ----------
+ G : Networkx graph
+ A graph
+ n : node (any hashable)
+ A node in the graph G
+ """
+ info='' # append this all to a string
+ if n is None:
+ info+="Name: %s\n"%G.name
+ type_name = [type(G).__name__]
+ info+="Type: %s\n"%",".join(type_name)
+ info+="Number of nodes: %d\n"%G.number_of_nodes()
+ info+="Number of edges: %d\n"%G.number_of_edges()
+ nnodes=G.number_of_nodes()
+ if len(G) > 0:
+ if G.is_directed():
+ info+="Average in degree: %8.4f\n"%\
+ (sum(G.in_degree().values())/float(nnodes))
+ info+="Average out degree: %8.4f"%\
+ (sum(G.out_degree().values())/float(nnodes))
+ else:
+ s=sum(G.degree().values())
+ info+="Average degree: %8.4f"%\
+ (float(s)/float(nnodes))
+
+ else:
+ if n not in G:
+ raise nx.NetworkXError("node %s not in graph"%(n,))
+ info+="Node % s has the following properties:\n"%n
+ info+="Degree: %d\n"%G.degree(n)
+ info+="Neighbors: "
+ info+=' '.join(str(nbr) for nbr in G.neighbors(n))
+ return info
+
+def set_node_attributes(G,name,attributes):
+ """Set node attributes from dictionary of nodes and values
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+
+ name : string
+ Attribute name
+
+ attributes: dict
+ Dictionary of attributes keyed by node.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(3)
+ >>> bb=nx.betweenness_centrality(G)
+ >>> nx.set_node_attributes(G,'betweenness',bb)
+ >>> G.node[1]['betweenness']
+ 1.0
+ """
+ for node,value in attributes.items():
+ G.node[node][name]=value
+
+def get_node_attributes(G,name):
+ """Get node attributes from graph
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+
+ name : string
+ Attribute name
+
+ Returns
+ -------
+ Dictionary of attributes keyed by node.
+
+ Examples
+ --------
+ >>> G=nx.Graph()
+ >>> G.add_nodes_from([1,2,3],color='red')
+ >>> color=nx.get_node_attributes(G,'color')
+ >>> color[1]
+ 'red'
+ """
+ return dict( (n,d[name]) for n,d in G.node.items() if name in d)
+
+
+def set_edge_attributes(G,name,attributes):
+ """Set edge attributes from dictionary of edge tuples and values
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+
+ name : string
+ Attribute name
+
+ attributes: dict
+ Dictionary of attributes keyed by edge (tuple).
+
+ Examples
+ --------
+ >>> G=nx.path_graph(3)
+ >>> bb=nx.edge_betweenness_centrality(G, normalized=False)
+ >>> nx.set_edge_attributes(G,'betweenness',bb)
+ >>> G[1][2]['betweenness']
+ 2.0
+ """
+ for (u,v),value in attributes.items():
+ G[u][v][name]=value
+
+def get_edge_attributes(G,name):
+ """Get edge attributes from graph
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+
+ name : string
+ Attribute name
+
+ Returns
+ -------
+ Dictionary of attributes keyed by node.
+
+ Examples
+ --------
+ >>> G=nx.Graph()
+ >>> G.add_path([1,2,3],color='red')
+ >>> color=nx.get_edge_attributes(G,'color')
+ >>> color[(1,2)]
+ 'red'
+ """
+ return dict( ((u,v),d[name]) for u,v,d in G.edges(data=True) if name in d)
+
+
+def all_neighbors(graph, node):
+ """ Returns all of the neighbors of a node in the graph.
+
+ If the graph is directed returns predecessors as well as successors.
+
+ Parameters
+ ----------
+ graph : NetworkX graph
+ Graph to find neighbors.
+
+ node : node
+ The node whose neighbors will be returned.
+
+ Returns
+ -------
+ neighbors : iterator
+ Iterator of neighbors
+ """
+ if graph.is_directed():
+ values = itertools.chain.from_iterable([graph.predecessors_iter(node),
+ graph.successors_iter(node)])
+ else:
+ values = graph.neighbors_iter(node)
+
+ return values
+
+def non_neighbors(graph, node):
+ """Returns the non-neighbors of the node in the graph.
+
+ Parameters
+ ----------
+ graph : NetworkX graph
+ Graph to find neighbors.
+
+ node : node
+ The node whose neighbors will be returned.
+
+ Returns
+ -------
+ non_neighbors : iterator
+ Iterator of nodes in the graph that are not neighbors of the node.
+ """
+ nbors = set(neighbors(graph, node)) | set([node])
+ return (nnode for nnode in graph if nnode not in nbors)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/classes/graph.py b/lib/python2.7/site-packages/setoolsgui/networkx/classes/graph.py
new file mode 100644
index 0000000..9ef7c23
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/classes/graph.py
@@ -0,0 +1,1816 @@
+"""Base class for undirected graphs.
+
+The Graph class allows any hashable object as a node
+and can associate key/value attribute pairs with each undirected edge.
+
+Self-loops are allowed but multiple edges are not (see MultiGraph).
+
+For directed graphs see DiGraph and MultiDiGraph.
+"""
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+from copy import deepcopy
+import networkx as nx
+from networkx.exception import NetworkXError
+import networkx.convert as convert
+
+__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Dan Schult(dschult@colgate.edu)'])
+
+class Graph(object):
+ """
+ Base class for undirected graphs.
+
+ A Graph stores nodes and edges with optional data, or attributes.
+
+ Graphs hold undirected edges. Self loops are allowed but multiple
+ (parallel) edges are not.
+
+ Nodes can be arbitrary (hashable) Python objects with optional
+ key/value attributes.
+
+ Edges are represented as links between nodes with optional
+ key/value attributes.
+
+ Parameters
+ ----------
+ data : input graph
+ Data to initialize graph. If data=None (default) an empty
+ graph is created. The data can be an edge list, or any
+ NetworkX graph object. If the corresponding optional Python
+ packages are installed the data can also be a NumPy matrix
+ or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
+ attr : keyword arguments, optional (default= no attributes)
+ Attributes to add to graph as key=value pairs.
+
+ See Also
+ --------
+ DiGraph
+ MultiGraph
+ MultiDiGraph
+
+ Examples
+ --------
+ Create an empty graph structure (a "null graph") with no nodes and
+ no edges.
+
+ >>> G = nx.Graph()
+
+ G can be grown in several ways.
+
+ **Nodes:**
+
+ Add one node at a time:
+
+ >>> G.add_node(1)
+
+ Add the nodes from any container (a list, dict, set or
+ even the lines from a file or the nodes from another graph).
+
+ >>> G.add_nodes_from([2,3])
+ >>> G.add_nodes_from(range(100,110))
+ >>> H=nx.Graph()
+ >>> H.add_path([0,1,2,3,4,5,6,7,8,9])
+ >>> G.add_nodes_from(H)
+
+ In addition to strings and integers any hashable Python object
+ (except None) can represent a node, e.g. a customized node object,
+ or even another Graph.
+
+ >>> G.add_node(H)
+
+ **Edges:**
+
+ G can also be grown by adding edges.
+
+ Add one edge,
+
+ >>> G.add_edge(1, 2)
+
+ a list of edges,
+
+ >>> G.add_edges_from([(1,2),(1,3)])
+
+ or a collection of edges,
+
+ >>> G.add_edges_from(H.edges())
+
+ If some edges connect nodes not yet in the graph, the nodes
+ are added automatically. There are no errors when adding
+ nodes or edges that already exist.
+
+ **Attributes:**
+
+ Each graph, node, and edge can hold key/value attribute pairs
+ in an associated attribute dictionary (the keys must be hashable).
+ By default these are empty, but can be added or changed using
+ add_edge, add_node or direct manipulation of the attribute
+ dictionaries named graph, node and edge respectively.
+
+ >>> G = nx.Graph(day="Friday")
+ >>> G.graph
+ {'day': 'Friday'}
+
+ Add node attributes using add_node(), add_nodes_from() or G.node
+
+ >>> G.add_node(1, time='5pm')
+ >>> G.add_nodes_from([3], time='2pm')
+ >>> G.node[1]
+ {'time': '5pm'}
+ >>> G.node[1]['room'] = 714
+ >>> del G.node[1]['room'] # remove attribute
+ >>> G.nodes(data=True)
+ [(1, {'time': '5pm'}), (3, {'time': '2pm'})]
+
+ Warning: adding a node to G.node does not add it to the graph.
+
+ Add edge attributes using add_edge(), add_edges_from(), subscript
+ notation, or G.edge.
+
+ >>> G.add_edge(1, 2, weight=4.7 )
+ >>> G.add_edges_from([(3,4),(4,5)], color='red')
+ >>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})])
+ >>> G[1][2]['weight'] = 4.7
+ >>> G.edge[1][2]['weight'] = 4
+
+ **Shortcuts:**
+
+ Many common graph features allow python syntax to speed reporting.
+
+ >>> 1 in G # check if node in graph
+ True
+ >>> [n for n in G if n<3] # iterate through nodes
+ [1, 2]
+ >>> len(G) # number of nodes in graph
+ 5
+
+ The fastest way to traverse all edges of a graph is via
+ adjacency_iter(), but the edges() method is often more convenient.
+
+ >>> for n,nbrsdict in G.adjacency_iter():
+ ... for nbr,eattr in nbrsdict.items():
+ ... if 'weight' in eattr:
+ ... (n,nbr,eattr['weight'])
+ (1, 2, 4)
+ (2, 1, 4)
+ (2, 3, 8)
+ (3, 2, 8)
+ >>> [ (u,v,edata['weight']) for u,v,edata in G.edges(data=True) if 'weight' in edata ]
+ [(1, 2, 4), (2, 3, 8)]
+
+ **Reporting:**
+
+ Simple graph information is obtained using methods.
+ Iterator versions of many reporting methods exist for efficiency.
+ Methods exist for reporting nodes(), edges(), neighbors() and degree()
+ as well as the number of nodes and edges.
+
+ For details on these and other miscellaneous methods, see below.
+ """
+ def __init__(self, data=None, **attr):
+ """Initialize a graph with edges, name, graph attributes.
+
+ Parameters
+ ----------
+ data : input graph
+ Data to initialize graph. If data=None (default) an empty
+ graph is created. The data can be an edge list, or any
+ NetworkX graph object. If the corresponding optional Python
+ packages are installed the data can also be a NumPy matrix
+ or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
+ name : string, optional (default='')
+ An optional name for the graph.
+ attr : keyword arguments, optional (default= no attributes)
+ Attributes to add to graph as key=value pairs.
+
+ See Also
+ --------
+ convert
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G = nx.Graph(name='my graph')
+ >>> e = [(1,2),(2,3),(3,4)] # list of edges
+ >>> G = nx.Graph(e)
+
+ Arbitrary graph attribute pairs (key=value) may be assigned
+
+ >>> G=nx.Graph(e, day="Friday")
+ >>> G.graph
+ {'day': 'Friday'}
+
+ """
+ self.graph = {} # dictionary for graph attributes
+ self.node = {} # empty node dict (created before convert)
+ self.adj = {} # empty adjacency dict
+ # attempt to load graph with data
+ if data is not None:
+ convert.to_networkx_graph(data,create_using=self)
+ # load graph attributes (must be after convert)
+ self.graph.update(attr)
+ self.edge = self.adj
+
+ @property
+ def name(self):
+ return self.graph.get('name','')
+ @name.setter
+ def name(self, s):
+ self.graph['name']=s
+
+ def __str__(self):
+ """Return the graph name.
+
+ Returns
+ -------
+ name : string
+ The name of the graph.
+
+ Examples
+ --------
+ >>> G = nx.Graph(name='foo')
+ >>> str(G)
+ 'foo'
+ """
+ return self.name
+
+ def __iter__(self):
+ """Iterate over the nodes. Use the expression 'for n in G'.
+
+ Returns
+ -------
+ niter : iterator
+ An iterator over all nodes in the graph.
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ """
+ return iter(self.node)
+
+ def __contains__(self,n):
+ """Return True if n is a node, False otherwise. Use the expression
+ 'n in G'.
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> 1 in G
+ True
+ """
+ try:
+ return n in self.node
+ except TypeError:
+ return False
+
+ def __len__(self):
+ """Return the number of nodes. Use the expression 'len(G)'.
+
+ Returns
+ -------
+ nnodes : int
+ The number of nodes in the graph.
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> len(G)
+ 4
+
+ """
+ return len(self.node)
+
+ def __getitem__(self, n):
+ """Return a dict of neighbors of node n. Use the expression 'G[n]'.
+
+ Parameters
+ ----------
+ n : node
+ A node in the graph.
+
+ Returns
+ -------
+ adj_dict : dictionary
+ The adjacency dictionary for nodes connected to n.
+
+ Notes
+ -----
+ G[n] is similar to G.neighbors(n) but the internal data dictionary
+ is returned instead of a list.
+
+ Assigning G[n] will corrupt the internal graph data structure.
+ Use G[n] for reading data only.
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> G[0]
+ {1: {}}
+ """
+ return self.adj[n]
+
+
+ def add_node(self, n, attr_dict=None, **attr):
+ """Add a single node n and update node attributes.
+
+ Parameters
+ ----------
+ n : node
+ A node can be any hashable Python object except None.
+ attr_dict : dictionary, optional (default= no attributes)
+ Dictionary of node attributes. Key/value pairs will
+ update existing data associated with the node.
+ attr : keyword arguments, optional
+ Set or change attributes using key=value.
+
+ See Also
+ --------
+ add_nodes_from
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_node(1)
+ >>> G.add_node('Hello')
+ >>> K3 = nx.Graph([(0,1),(1,2),(2,0)])
+ >>> G.add_node(K3)
+ >>> G.number_of_nodes()
+ 3
+
+ Use keywords set/change node attributes:
+
+ >>> G.add_node(1,size=10)
+ >>> G.add_node(3,weight=0.4,UTM=('13S',382871,3972649))
+
+ Notes
+ -----
+ A hashable object is one that can be used as a key in a Python
+ dictionary. This includes strings, numbers, tuples of strings
+ and numbers, etc.
+
+ On many platforms hashable items also include mutables such as
+ NetworkX Graphs, though one should be careful that the hash
+ doesn't change on mutables.
+ """
+ # set up attribute dict
+ if attr_dict is None:
+ attr_dict=attr
+ else:
+ try:
+ attr_dict.update(attr)
+ except AttributeError:
+ raise NetworkXError(\
+ "The attr_dict argument must be a dictionary.")
+ if n not in self.node:
+ self.adj[n] = {}
+ self.node[n] = attr_dict
+ else: # update attr even if node already exists
+ self.node[n].update(attr_dict)
+
+
+ def add_nodes_from(self, nodes, **attr):
+ """Add multiple nodes.
+
+ Parameters
+ ----------
+ nodes : iterable container
+ A container of nodes (list, dict, set, etc.).
+ OR
+ A container of (node, attribute dict) tuples.
+ Node attributes are updated using the attribute dict.
+ attr : keyword arguments, optional (default= no attributes)
+ Update attributes for all nodes in nodes.
+ Node attributes specified in nodes as a tuple
+ take precedence over attributes specified generally.
+
+ See Also
+ --------
+ add_node
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_nodes_from('Hello')
+ >>> K3 = nx.Graph([(0,1),(1,2),(2,0)])
+ >>> G.add_nodes_from(K3)
+ >>> sorted(G.nodes(),key=str)
+ [0, 1, 2, 'H', 'e', 'l', 'o']
+
+ Use keywords to update specific node attributes for every node.
+
+ >>> G.add_nodes_from([1,2], size=10)
+ >>> G.add_nodes_from([3,4], weight=0.4)
+
+ Use (node, attrdict) tuples to update attributes for specific
+ nodes.
+
+ >>> G.add_nodes_from([(1,dict(size=11)), (2,{'color':'blue'})])
+ >>> G.node[1]['size']
+ 11
+ >>> H = nx.Graph()
+ >>> H.add_nodes_from(G.nodes(data=True))
+ >>> H.node[1]['size']
+ 11
+
+ """
+ for n in nodes:
+ try:
+ newnode=n not in self.node
+ except TypeError:
+ nn,ndict = n
+ if nn not in self.node:
+ self.adj[nn] = {}
+ newdict = attr.copy()
+ newdict.update(ndict)
+ self.node[nn] = newdict
+ else:
+ olddict = self.node[nn]
+ olddict.update(attr)
+ olddict.update(ndict)
+ continue
+ if newnode:
+ self.adj[n] = {}
+ self.node[n] = attr.copy()
+ else:
+ self.node[n].update(attr)
+
+ def remove_node(self,n):
+ """Remove node n.
+
+ Removes the node n and all adjacent edges.
+ Attempting to remove a non-existent node will raise an exception.
+
+ Parameters
+ ----------
+ n : node
+ A node in the graph
+
+ Raises
+ -------
+ NetworkXError
+ If n is not in the graph.
+
+ See Also
+ --------
+ remove_nodes_from
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2])
+ >>> G.edges()
+ [(0, 1), (1, 2)]
+ >>> G.remove_node(1)
+ >>> G.edges()
+ []
+
+ """
+ adj = self.adj
+ try:
+ nbrs = list(adj[n].keys()) # keys handles self-loops (allow mutation later)
+ del self.node[n]
+ except KeyError: # NetworkXError if n not in self
+ raise NetworkXError("The node %s is not in the graph."%(n,))
+ for u in nbrs:
+ del adj[u][n] # remove all edges n-u in graph
+ del adj[n] # now remove node
+
+
+ def remove_nodes_from(self, nodes):
+ """Remove multiple nodes.
+
+ Parameters
+ ----------
+ nodes : iterable container
+ A container of nodes (list, dict, set, etc.). If a node
+ in the container is not in the graph it is silently
+ ignored.
+
+ See Also
+ --------
+ remove_node
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2])
+ >>> e = G.nodes()
+ >>> e
+ [0, 1, 2]
+ >>> G.remove_nodes_from(e)
+ >>> G.nodes()
+ []
+
+ """
+ adj = self.adj
+ for n in nodes:
+ try:
+ del self.node[n]
+ for u in list(adj[n].keys()): # keys() handles self-loops
+ del adj[u][n] #(allows mutation of dict in loop)
+ del adj[n]
+ except KeyError:
+ pass
+
+
+ def nodes_iter(self, data=False):
+ """Return an iterator over the nodes.
+
+ Parameters
+ ----------
+ data : boolean, optional (default=False)
+ If False the iterator returns nodes. If True
+ return a two-tuple of node and node data dictionary
+
+ Returns
+ -------
+ niter : iterator
+ An iterator over nodes. If data=True the iterator gives
+ two-tuples containing (node, node data, dictionary)
+
+ Notes
+ -----
+ If the node data is not required it is simpler and equivalent
+ to use the expression 'for n in G'.
+
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2])
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2])
+
+ >>> [d for n,d in G.nodes_iter(data=True)]
+ [{}, {}, {}]
+ """
+ if data:
+ return iter(self.node.items())
+ return iter(self.node)
+
+ def nodes(self, data=False):
+ """Return a list of the nodes in the graph.
+
+ Parameters
+ ----------
+ data : boolean, optional (default=False)
+ If False return a list of nodes. If True return a
+ two-tuple of node and node data dictionary
+
+ Returns
+ -------
+ nlist : list
+ A list of nodes. If data=True a list of two-tuples containing
+ (node, node data dictionary).
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2])
+ >>> G.nodes()
+ [0, 1, 2]
+ >>> G.add_node(1, time='5pm')
+ >>> G.nodes(data=True)
+ [(0, {}), (1, {'time': '5pm'}), (2, {})]
+ """
+ return list(self.nodes_iter(data=data))
+
+ def number_of_nodes(self):
+ """Return the number of nodes in the graph.
+
+ Returns
+ -------
+ nnodes : int
+ The number of nodes in the graph.
+
+ See Also
+ --------
+ order, __len__ which are identical
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2])
+ >>> len(G)
+ 3
+ """
+ return len(self.node)
+
+ def order(self):
+ """Return the number of nodes in the graph.
+
+ Returns
+ -------
+ nnodes : int
+ The number of nodes in the graph.
+
+ See Also
+ --------
+ number_of_nodes, __len__ which are identical
+
+ """
+ return len(self.node)
+
+ def has_node(self, n):
+ """Return True if the graph contains the node n.
+
+ Parameters
+ ----------
+ n : node
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2])
+ >>> G.has_node(0)
+ True
+
+ It is more readable and simpler to use
+
+ >>> 0 in G
+ True
+
+ """
+ try:
+ return n in self.node
+ except TypeError:
+ return False
+
+ def add_edge(self, u, v, attr_dict=None, **attr):
+ """Add an edge between u and v.
+
+ The nodes u and v will be automatically added if they are
+ not already in the graph.
+
+ Edge attributes can be specified with keywords or by providing
+ a dictionary with key/value pairs. See examples below.
+
+ Parameters
+ ----------
+ u,v : nodes
+ Nodes can be, for example, strings or numbers.
+ Nodes must be hashable (and not None) Python objects.
+ attr_dict : dictionary, optional (default= no attributes)
+ Dictionary of edge attributes. Key/value pairs will
+ update existing data associated with the edge.
+ attr : keyword arguments, optional
+ Edge data (or labels or objects) can be assigned using
+ keyword arguments.
+
+ See Also
+ --------
+ add_edges_from : add a collection of edges
+
+ Notes
+ -----
+ Adding an edge that already exists updates the edge data.
+
+ Many NetworkX algorithms designed for weighted graphs use as
+ the edge weight a numerical value assigned to a keyword
+ which by default is 'weight'.
+
+ Examples
+ --------
+ The following all add the edge e=(1,2) to graph G:
+
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> e = (1,2)
+ >>> G.add_edge(1, 2) # explicit two-node form
+ >>> G.add_edge(*e) # single edge as tuple of two nodes
+ >>> G.add_edges_from( [(1,2)] ) # add edges from iterable container
+
+ Associate data to edges using keywords:
+
+ >>> G.add_edge(1, 2, weight=3)
+ >>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
+ """
+ # set up attribute dictionary
+ if attr_dict is None:
+ attr_dict=attr
+ else:
+ try:
+ attr_dict.update(attr)
+ except AttributeError:
+ raise NetworkXError(\
+ "The attr_dict argument must be a dictionary.")
+ # add nodes
+ if u not in self.node:
+ self.adj[u] = {}
+ self.node[u] = {}
+ if v not in self.node:
+ self.adj[v] = {}
+ self.node[v] = {}
+ # add the edge
+ datadict=self.adj[u].get(v,{})
+ datadict.update(attr_dict)
+ self.adj[u][v] = datadict
+ self.adj[v][u] = datadict
+
+
+ def add_edges_from(self, ebunch, attr_dict=None, **attr):
+ """Add all the edges in ebunch.
+
+ Parameters
+ ----------
+ ebunch : container of edges
+ Each edge given in the container will be added to the
+ graph. The edges must be given as as 2-tuples (u,v) or
+ 3-tuples (u,v,d) where d is a dictionary containing edge
+ data.
+ attr_dict : dictionary, optional (default= no attributes)
+ Dictionary of edge attributes. Key/value pairs will
+ update existing data associated with each edge.
+ attr : keyword arguments, optional
+ Edge data (or labels or objects) can be assigned using
+ keyword arguments.
+
+
+ See Also
+ --------
+ add_edge : add a single edge
+ add_weighted_edges_from : convenient way to add weighted edges
+
+ Notes
+ -----
+ Adding the same edge twice has no effect but any edge data
+ will be updated when each duplicate edge is added.
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_edges_from([(0,1),(1,2)]) # using a list of edge tuples
+ >>> e = zip(range(0,3),range(1,4))
+ >>> G.add_edges_from(e) # Add the path graph 0-1-2-3
+
+ Associate data to edges
+
+ >>> G.add_edges_from([(1,2),(2,3)], weight=3)
+ >>> G.add_edges_from([(3,4),(1,4)], label='WN2898')
+ """
+ # set up attribute dict
+ if attr_dict is None:
+ attr_dict=attr
+ else:
+ try:
+ attr_dict.update(attr)
+ except AttributeError:
+ raise NetworkXError(\
+ "The attr_dict argument must be a dictionary.")
+ # process ebunch
+ for e in ebunch:
+ ne=len(e)
+ if ne==3:
+ u,v,dd = e
+ elif ne==2:
+ u,v = e
+ dd = {}
+ else:
+ raise NetworkXError(\
+ "Edge tuple %s must be a 2-tuple or 3-tuple."%(e,))
+ if u not in self.node:
+ self.adj[u] = {}
+ self.node[u] = {}
+ if v not in self.node:
+ self.adj[v] = {}
+ self.node[v] = {}
+ datadict=self.adj[u].get(v,{})
+ datadict.update(attr_dict)
+ datadict.update(dd)
+ self.adj[u][v] = datadict
+ self.adj[v][u] = datadict
+
+
+ def add_weighted_edges_from(self, ebunch, weight='weight', **attr):
+ """Add all the edges in ebunch as weighted edges with specified
+ weights.
+
+ Parameters
+ ----------
+ ebunch : container of edges
+ Each edge given in the list or container will be added
+ to the graph. The edges must be given as 3-tuples (u,v,w)
+ where w is a number.
+ weight : string, optional (default= 'weight')
+ The attribute name for the edge weights to be added.
+ attr : keyword arguments, optional (default= no attributes)
+ Edge attributes to add/update for all edges.
+
+ See Also
+ --------
+ add_edge : add a single edge
+ add_edges_from : add multiple edges
+
+ Notes
+ -----
+ Adding the same edge twice for Graph/DiGraph simply updates
+ the edge data. For MultiGraph/MultiDiGraph, duplicate edges
+ are stored.
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_weighted_edges_from([(0,1,3.0),(1,2,7.5)])
+ """
+ self.add_edges_from(((u,v,{weight:d}) for u,v,d in ebunch),**attr)
+
+ def remove_edge(self, u, v):
+ """Remove the edge between u and v.
+
+ Parameters
+ ----------
+ u,v: nodes
+ Remove the edge between nodes u and v.
+
+ Raises
+ ------
+ NetworkXError
+ If there is not an edge between u and v.
+
+ See Also
+ --------
+ remove_edges_from : remove a collection of edges
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> G.remove_edge(0,1)
+ >>> e = (1,2)
+ >>> G.remove_edge(*e) # unpacks e from an edge tuple
+ >>> e = (2,3,{'weight':7}) # an edge with attribute data
+ >>> G.remove_edge(*e[:2]) # select first part of edge tuple
+ """
+ try:
+ del self.adj[u][v]
+ if u != v: # self-loop needs only one entry removed
+ del self.adj[v][u]
+ except KeyError:
+ raise NetworkXError("The edge %s-%s is not in the graph"%(u,v))
+
+
+
+ def remove_edges_from(self, ebunch):
+ """Remove all edges specified in ebunch.
+
+ Parameters
+ ----------
+ ebunch: list or container of edge tuples
+ Each edge given in the list or container will be removed
+ from the graph. The edges can be:
+
+ - 2-tuples (u,v) edge between u and v.
+ - 3-tuples (u,v,k) where k is ignored.
+
+ See Also
+ --------
+ remove_edge : remove a single edge
+
+ Notes
+ -----
+ Will fail silently if an edge in ebunch is not in the graph.
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> ebunch=[(1,2),(2,3)]
+ >>> G.remove_edges_from(ebunch)
+ """
+ adj=self.adj
+ for e in ebunch:
+ u,v = e[:2] # ignore edge data if present
+ if u in adj and v in adj[u]:
+ del adj[u][v]
+ if u != v: # self loop needs only one entry removed
+ del adj[v][u]
+
+
+ def has_edge(self, u, v):
+ """Return True if the edge (u,v) is in the graph.
+
+ Parameters
+ ----------
+ u,v : nodes
+ Nodes can be, for example, strings or numbers.
+ Nodes must be hashable (and not None) Python objects.
+
+ Returns
+ -------
+ edge_ind : bool
+ True if edge is in the graph, False otherwise.
+
+ Examples
+ --------
+ Can be called either using two nodes u,v or edge tuple (u,v)
+
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> G.has_edge(0,1) # using two nodes
+ True
+ >>> e = (0,1)
+ >>> G.has_edge(*e) # e is a 2-tuple (u,v)
+ True
+ >>> e = (0,1,{'weight':7})
+ >>> G.has_edge(*e[:2]) # e is a 3-tuple (u,v,data_dictionary)
+ True
+
+ The following syntax are all equivalent:
+
+ >>> G.has_edge(0,1)
+ True
+ >>> 1 in G[0] # though this gives KeyError if 0 not in G
+ True
+
+ """
+ try:
+ return v in self.adj[u]
+ except KeyError:
+ return False
+
+
+ def neighbors(self, n):
+ """Return a list of the nodes connected to the node n.
+
+ Parameters
+ ----------
+ n : node
+ A node in the graph
+
+ Returns
+ -------
+ nlist : list
+ A list of nodes that are adjacent to n.
+
+ Raises
+ ------
+ NetworkXError
+ If the node n is not in the graph.
+
+ Notes
+ -----
+ It is usually more convenient (and faster) to access the
+ adjacency dictionary as G[n]:
+
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_edge('a','b',weight=7)
+ >>> G['a']
+ {'b': {'weight': 7}}
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> G.neighbors(0)
+ [1]
+
+ """
+ try:
+ return list(self.adj[n])
+ except KeyError:
+ raise NetworkXError("The node %s is not in the graph."%(n,))
+
+ def neighbors_iter(self, n):
+ """Return an iterator over all neighbors of node n.
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> [n for n in G.neighbors_iter(0)]
+ [1]
+
+ Notes
+ -----
+ It is faster to use the idiom "in G[0]", e.g.
+
+ >>> G = nx.path_graph(4)
+ >>> [n for n in G[0]]
+ [1]
+ """
+ try:
+ return iter(self.adj[n])
+ except KeyError:
+ raise NetworkXError("The node %s is not in the graph."%(n,))
+
+ def edges(self, nbunch=None, data=False):
+ """Return a list of edges.
+
+ Edges are returned as tuples with optional data
+ in the order (node, neighbor, data).
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default= all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+ data : bool, optional (default=False)
+ Return two tuples (u,v) (False) or three-tuples (u,v,data) (True).
+
+ Returns
+ --------
+ edge_list: list of edge tuples
+ Edges that are adjacent to any node in nbunch, or a list
+ of all edges if nbunch is not specified.
+
+ See Also
+ --------
+ edges_iter : return an iterator over the edges
+
+ Notes
+ -----
+ Nodes in nbunch that are not in the graph will be (quietly) ignored.
+ For directed graphs this returns the out-edges.
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> G.edges()
+ [(0, 1), (1, 2), (2, 3)]
+ >>> G.edges(data=True) # default edge data is {} (empty dictionary)
+ [(0, 1, {}), (1, 2, {}), (2, 3, {})]
+ >>> G.edges([0,3])
+ [(0, 1), (3, 2)]
+ >>> G.edges(0)
+ [(0, 1)]
+
+ """
+ return list(self.edges_iter(nbunch, data))
+
+ def edges_iter(self, nbunch=None, data=False):
+ """Return an iterator over the edges.
+
+ Edges are returned as tuples with optional data
+ in the order (node, neighbor, data).
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default= all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+ data : bool, optional (default=False)
+ If True, return edge attribute dict in 3-tuple (u,v,data).
+
+ Returns
+ -------
+ edge_iter : iterator
+ An iterator of (u,v) or (u,v,d) tuples of edges.
+
+ See Also
+ --------
+ edges : return a list of edges
+
+ Notes
+ -----
+ Nodes in nbunch that are not in the graph will be (quietly) ignored.
+ For directed graphs this returns the out-edges.
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or MultiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> [e for e in G.edges_iter()]
+ [(0, 1), (1, 2), (2, 3)]
+ >>> list(G.edges_iter(data=True)) # default data is {} (empty dict)
+ [(0, 1, {}), (1, 2, {}), (2, 3, {})]
+ >>> list(G.edges_iter([0,3]))
+ [(0, 1), (3, 2)]
+ >>> list(G.edges_iter(0))
+ [(0, 1)]
+
+ """
+ seen={} # helper dict to keep track of multiply stored edges
+ if nbunch is None:
+ nodes_nbrs = self.adj.items()
+ else:
+ nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch))
+ if data:
+ for n,nbrs in nodes_nbrs:
+ for nbr,data in nbrs.items():
+ if nbr not in seen:
+ yield (n,nbr,data)
+ seen[n]=1
+ else:
+ for n,nbrs in nodes_nbrs:
+ for nbr in nbrs:
+ if nbr not in seen:
+ yield (n,nbr)
+ seen[n] = 1
+ del seen
+
+
+ def get_edge_data(self, u, v, default=None):
+ """Return the attribute dictionary associated with edge (u,v).
+
+ Parameters
+ ----------
+ u,v : nodes
+ default: any Python object (default=None)
+ Value to return if the edge (u,v) is not found.
+
+ Returns
+ -------
+ edge_dict : dictionary
+ The edge attribute dictionary.
+
+ Notes
+ -----
+ It is faster to use G[u][v].
+
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> G[0][1]
+ {}
+
+ Warning: Assigning G[u][v] corrupts the graph data structure.
+ But it is safe to assign attributes to that dictionary,
+
+ >>> G[0][1]['weight'] = 7
+ >>> G[0][1]['weight']
+ 7
+ >>> G[1][0]['weight']
+ 7
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> G.get_edge_data(0,1) # default edge data is {}
+ {}
+ >>> e = (0,1)
+ >>> G.get_edge_data(*e) # tuple form
+ {}
+ >>> G.get_edge_data('a','b',default=0) # edge not in graph, return 0
+ 0
+ """
+ try:
+ return self.adj[u][v]
+ except KeyError:
+ return default
+
+ def adjacency_list(self):
+ """Return an adjacency list representation of the graph.
+
+ The output adjacency list is in the order of G.nodes().
+ For directed graphs, only outgoing adjacencies are included.
+
+ Returns
+ -------
+ adj_list : lists of lists
+ The adjacency structure of the graph as a list of lists.
+
+ See Also
+ --------
+ adjacency_iter
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> G.adjacency_list() # in order given by G.nodes()
+ [[1], [0, 2], [1, 3], [2]]
+
+ """
+ return list(map(list,iter(self.adj.values())))
+
+ def adjacency_iter(self):
+ """Return an iterator of (node, adjacency dict) tuples for all nodes.
+
+ This is the fastest way to look at every edge.
+ For directed graphs, only outgoing adjacencies are included.
+
+ Returns
+ -------
+ adj_iter : iterator
+ An iterator of (node, adjacency dictionary) for all nodes in
+ the graph.
+
+ See Also
+ --------
+ adjacency_list
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> [(n,nbrdict) for n,nbrdict in G.adjacency_iter()]
+ [(0, {1: {}}), (1, {0: {}, 2: {}}), (2, {1: {}, 3: {}}), (3, {2: {}})]
+
+ """
+ return iter(self.adj.items())
+
+ def degree(self, nbunch=None, weight=None):
+ """Return the degree of a node or nodes.
+
+ The node degree is the number of edges adjacent to that node.
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default=all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+
+ weight : string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ Returns
+ -------
+ nd : dictionary, or number
+ A dictionary with nodes as keys and degree as values or
+ a number if a single node is specified.
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> G.degree(0)
+ 1
+ >>> G.degree([0,1])
+ {0: 1, 1: 2}
+ >>> list(G.degree([0,1]).values())
+ [1, 2]
+
+ """
+ if nbunch in self: # return a single node
+ return next(self.degree_iter(nbunch,weight))[1]
+ else: # return a dict
+ return dict(self.degree_iter(nbunch,weight))
+
+ def degree_iter(self, nbunch=None, weight=None):
+ """Return an iterator for (node, degree).
+
+ The node degree is the number of edges adjacent to the node.
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default=all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+
+ weight : string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ Returns
+ -------
+ nd_iter : an iterator
+ The iterator returns two-tuples of (node, degree).
+
+ See Also
+ --------
+ degree
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> list(G.degree_iter(0)) # node 0 with degree 1
+ [(0, 1)]
+ >>> list(G.degree_iter([0,1]))
+ [(0, 1), (1, 2)]
+
+ """
+ if nbunch is None:
+ nodes_nbrs = self.adj.items()
+ else:
+ nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch))
+
+ if weight is None:
+ for n,nbrs in nodes_nbrs:
+ yield (n,len(nbrs)+(n in nbrs)) # return tuple (n,degree)
+ else:
+ # edge weighted graph - degree is sum of nbr edge weights
+ for n,nbrs in nodes_nbrs:
+ yield (n, sum((nbrs[nbr].get(weight,1) for nbr in nbrs)) +
+ (n in nbrs and nbrs[n].get(weight,1)))
+
+
+ def clear(self):
+ """Remove all nodes and edges from the graph.
+
+ This also removes the name, and all graph, node, and edge attributes.
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> G.clear()
+ >>> G.nodes()
+ []
+ >>> G.edges()
+ []
+
+ """
+ self.name = ''
+ self.adj.clear()
+ self.node.clear()
+ self.graph.clear()
+
+ def copy(self):
+ """Return a copy of the graph.
+
+ Returns
+ -------
+ G : Graph
+ A copy of the graph.
+
+ See Also
+ --------
+ to_directed: return a directed copy of the graph.
+
+ Notes
+ -----
+ This makes a complete copy of the graph including all of the
+ node or edge attributes.
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> H = G.copy()
+
+ """
+ return deepcopy(self)
+
+ def is_multigraph(self):
+ """Return True if graph is a multigraph, False otherwise."""
+ return False
+
+
+ def is_directed(self):
+ """Return True if graph is directed, False otherwise."""
+ return False
+
+ def to_directed(self):
+ """Return a directed representation of the graph.
+
+ Returns
+ -------
+ G : DiGraph
+ A directed graph with the same name, same nodes, and with
+ each edge (u,v,data) replaced by two directed edges
+ (u,v,data) and (v,u,data).
+
+ Notes
+ -----
+ This returns a "deepcopy" of the edge, node, and
+ graph attributes which attempts to completely copy
+ all of the data and references.
+
+ This is in contrast to the similar D=DiGraph(G) which returns a
+ shallow copy of the data.
+
+ See the Python copy module for more information on shallow
+ and deep copies, http://docs.python.org/library/copy.html.
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or MultiGraph, etc
+ >>> G.add_path([0,1])
+ >>> H = G.to_directed()
+ >>> H.edges()
+ [(0, 1), (1, 0)]
+
+ If already directed, return a (deep) copy
+
+ >>> G = nx.DiGraph() # or MultiDiGraph, etc
+ >>> G.add_path([0,1])
+ >>> H = G.to_directed()
+ >>> H.edges()
+ [(0, 1)]
+ """
+ from networkx import DiGraph
+ G=DiGraph()
+ G.name=self.name
+ G.add_nodes_from(self)
+ G.add_edges_from( ((u,v,deepcopy(data))
+ for u,nbrs in self.adjacency_iter()
+ for v,data in nbrs.items()) )
+ G.graph=deepcopy(self.graph)
+ G.node=deepcopy(self.node)
+ return G
+
+ def to_undirected(self):
+ """Return an undirected copy of the graph.
+
+ Returns
+ -------
+ G : Graph/MultiGraph
+ A deepcopy of the graph.
+
+ See Also
+ --------
+ copy, add_edge, add_edges_from
+
+ Notes
+ -----
+ This returns a "deepcopy" of the edge, node, and
+ graph attributes which attempts to completely copy
+ all of the data and references.
+
+ This is in contrast to the similar G=DiGraph(D) which returns a
+ shallow copy of the data.
+
+ See the Python copy module for more information on shallow
+ and deep copies, http://docs.python.org/library/copy.html.
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or MultiGraph, etc
+ >>> G.add_path([0,1])
+ >>> H = G.to_directed()
+ >>> H.edges()
+ [(0, 1), (1, 0)]
+ >>> G2 = H.to_undirected()
+ >>> G2.edges()
+ [(0, 1)]
+ """
+ return deepcopy(self)
+
+ def subgraph(self, nbunch):
+ """Return the subgraph induced on nodes in nbunch.
+
+ The induced subgraph of the graph contains the nodes in nbunch
+ and the edges between those nodes.
+
+ Parameters
+ ----------
+ nbunch : list, iterable
+ A container of nodes which will be iterated through once.
+
+ Returns
+ -------
+ G : Graph
+ A subgraph of the graph with the same edge attributes.
+
+ Notes
+ -----
+ The graph, edge or node attributes just point to the original graph.
+ So changes to the node or edge structure will not be reflected in
+ the original graph while changes to the attributes will.
+
+ To create a subgraph with its own copy of the edge/node attributes use:
+ nx.Graph(G.subgraph(nbunch))
+
+ If edge attributes are containers, a deep copy can be obtained using:
+ G.subgraph(nbunch).copy()
+
+ For an inplace reduction of a graph to a subgraph you can remove nodes:
+ G.remove_nodes_from([ n in G if n not in set(nbunch)])
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> H = G.subgraph([0,1,2])
+ >>> H.edges()
+ [(0, 1), (1, 2)]
+ """
+ bunch =self.nbunch_iter(nbunch)
+ # create new graph and copy subgraph into it
+ H = self.__class__()
+ # copy node and attribute dictionaries
+ for n in bunch:
+ H.node[n]=self.node[n]
+ # namespace shortcuts for speed
+ H_adj=H.adj
+ self_adj=self.adj
+ # add nodes and edges (undirected method)
+ for n in H.node:
+ Hnbrs={}
+ H_adj[n]=Hnbrs
+ for nbr,d in self_adj[n].items():
+ if nbr in H_adj:
+ # add both representations of edge: n-nbr and nbr-n
+ Hnbrs[nbr]=d
+ H_adj[nbr][n]=d
+ H.graph=self.graph
+ return H
+
+
+ def nodes_with_selfloops(self):
+ """Return a list of nodes with self loops.
+
+ A node with a self loop has an edge with both ends adjacent
+ to that node.
+
+ Returns
+ -------
+ nodelist : list
+ A list of nodes with self loops.
+
+ See Also
+ --------
+ selfloop_edges, number_of_selfloops
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_edge(1,1)
+ >>> G.add_edge(1,2)
+ >>> G.nodes_with_selfloops()
+ [1]
+ """
+ return [ n for n,nbrs in self.adj.items() if n in nbrs ]
+
+ def selfloop_edges(self, data=False):
+ """Return a list of selfloop edges.
+
+ A selfloop edge has the same node at both ends.
+
+ Parameters
+ -----------
+ data : bool, optional (default=False)
+ Return selfloop edges as two tuples (u,v) (data=False)
+ or three-tuples (u,v,data) (data=True)
+
+ Returns
+ -------
+ edgelist : list of edge tuples
+ A list of all selfloop edges.
+
+ See Also
+ --------
+ nodes_with_selfloops, number_of_selfloops
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_edge(1,1)
+ >>> G.add_edge(1,2)
+ >>> G.selfloop_edges()
+ [(1, 1)]
+ >>> G.selfloop_edges(data=True)
+ [(1, 1, {})]
+ """
+ if data:
+ return [ (n,n,nbrs[n])
+ for n,nbrs in self.adj.items() if n in nbrs ]
+ else:
+ return [ (n,n)
+ for n,nbrs in self.adj.items() if n in nbrs ]
+
+
+ def number_of_selfloops(self):
+ """Return the number of selfloop edges.
+
+ A selfloop edge has the same node at both ends.
+
+ Returns
+ -------
+ nloops : int
+ The number of selfloops.
+
+ See Also
+ --------
+ nodes_with_selfloops, selfloop_edges
+
+ Examples
+ --------
+ >>> G=nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_edge(1,1)
+ >>> G.add_edge(1,2)
+ >>> G.number_of_selfloops()
+ 1
+ """
+ return len(self.selfloop_edges())
+
+
+ def size(self, weight=None):
+ """Return the number of edges.
+
+ Parameters
+ ----------
+ weight : string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+
+ Returns
+ -------
+ nedges : int
+ The number of edges of sum of edge weights in the graph.
+
+ See Also
+ --------
+ number_of_edges
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> G.size()
+ 3
+
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_edge('a','b',weight=2)
+ >>> G.add_edge('b','c',weight=4)
+ >>> G.size()
+ 2
+ >>> G.size(weight='weight')
+ 6.0
+ """
+ s=sum(self.degree(weight=weight).values())/2
+ if weight is None:
+ return int(s)
+ else:
+ return float(s)
+
+ def number_of_edges(self, u=None, v=None):
+ """Return the number of edges between two nodes.
+
+ Parameters
+ ----------
+ u,v : nodes, optional (default=all edges)
+ If u and v are specified, return the number of edges between
+ u and v. Otherwise return the total number of all edges.
+
+ Returns
+ -------
+ nedges : int
+ The number of edges in the graph. If nodes u and v are specified
+ return the number of edges between those nodes.
+
+ See Also
+ --------
+ size
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> G.number_of_edges()
+ 3
+ >>> G.number_of_edges(0,1)
+ 1
+ >>> e = (0,1)
+ >>> G.number_of_edges(*e)
+ 1
+ """
+ if u is None: return int(self.size())
+ if v in self.adj[u]:
+ return 1
+ else:
+ return 0
+
+
+ def add_star(self, nodes, **attr):
+ """Add a star.
+
+ The first node in nodes is the middle of the star. It is connected
+ to all other nodes.
+
+ Parameters
+ ----------
+ nodes : iterable container
+ A container of nodes.
+ attr : keyword arguments, optional (default= no attributes)
+ Attributes to add to every edge in star.
+
+ See Also
+ --------
+ add_path, add_cycle
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_star([0,1,2,3])
+ >>> G.add_star([10,11,12],weight=2)
+
+ """
+ nlist = list(nodes)
+ v=nlist[0]
+ edges=((v,n) for n in nlist[1:])
+ self.add_edges_from(edges, **attr)
+
+ def add_path(self, nodes, **attr):
+ """Add a path.
+
+ Parameters
+ ----------
+ nodes : iterable container
+ A container of nodes. A path will be constructed from
+ the nodes (in order) and added to the graph.
+ attr : keyword arguments, optional (default= no attributes)
+ Attributes to add to every edge in path.
+
+ See Also
+ --------
+ add_star, add_cycle
+
+ Examples
+ --------
+ >>> G=nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> G.add_path([10,11,12],weight=7)
+
+ """
+ nlist = list(nodes)
+ edges=zip(nlist[:-1],nlist[1:])
+ self.add_edges_from(edges, **attr)
+
+ def add_cycle(self, nodes, **attr):
+ """Add a cycle.
+
+ Parameters
+ ----------
+ nodes: iterable container
+ A container of nodes. A cycle will be constructed from
+ the nodes (in order) and added to the graph.
+ attr : keyword arguments, optional (default= no attributes)
+ Attributes to add to every edge in cycle.
+
+ See Also
+ --------
+ add_path, add_star
+
+ Examples
+ --------
+ >>> G=nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_cycle([0,1,2,3])
+ >>> G.add_cycle([10,11,12],weight=7)
+
+ """
+ nlist = list(nodes)
+ edges=zip(nlist,nlist[1:]+[nlist[0]])
+ self.add_edges_from(edges, **attr)
+
+
+ def nbunch_iter(self, nbunch=None):
+ """Return an iterator of nodes contained in nbunch that are
+ also in the graph.
+
+ The nodes in nbunch are checked for membership in the graph
+ and if not are silently ignored.
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default=all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+
+ Returns
+ -------
+ niter : iterator
+ An iterator over nodes in nbunch that are also in the graph.
+ If nbunch is None, iterate over all nodes in the graph.
+
+ Raises
+ ------
+ NetworkXError
+ If nbunch is not a node or or sequence of nodes.
+ If a node in nbunch is not hashable.
+
+ See Also
+ --------
+ Graph.__iter__
+
+ Notes
+ -----
+ When nbunch is an iterator, the returned iterator yields values
+ directly from nbunch, becoming exhausted when nbunch is exhausted.
+
+ To test whether nbunch is a single node, one can use
+ "if nbunch in self:", even after processing with this routine.
+
+ If nbunch is not a node or a (possibly empty) sequence/iterator
+ or None, a NetworkXError is raised. Also, if any object in
+ nbunch is not hashable, a NetworkXError is raised.
+ """
+ if nbunch is None: # include all nodes via iterator
+ bunch=iter(self.adj.keys())
+ elif nbunch in self: # if nbunch is a single node
+ bunch=iter([nbunch])
+ else: # if nbunch is a sequence of nodes
+ def bunch_iter(nlist,adj):
+ try:
+ for n in nlist:
+ if n in adj:
+ yield n
+ except TypeError as e:
+ message=e.args[0]
+ import sys
+ sys.stdout.write(message)
+ # capture error for non-sequence/iterator nbunch.
+ if 'iter' in message:
+ raise NetworkXError(\
+ "nbunch is not a node or a sequence of nodes.")
+ # capture error for unhashable node.
+ elif 'hashable' in message:
+ raise NetworkXError(\
+ "Node %s in the sequence nbunch is not a valid node."%n)
+ else:
+ raise
+ bunch=bunch_iter(nbunch,self.adj)
+ return bunch
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/classes/multidigraph.py b/lib/python2.7/site-packages/setoolsgui/networkx/classes/multidigraph.py
new file mode 100644
index 0000000..392db89
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/classes/multidigraph.py
@@ -0,0 +1,851 @@
+"""Base class for MultiDiGraph."""
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+from copy import deepcopy
+import networkx as nx
+from networkx.classes.graph import Graph # for doctests
+from networkx.classes.digraph import DiGraph
+from networkx.classes.multigraph import MultiGraph
+from networkx.exception import NetworkXError
+__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Dan Schult(dschult@colgate.edu)'])
+
+class MultiDiGraph(MultiGraph,DiGraph):
+ """A directed graph class that can store multiedges.
+
+ Multiedges are multiple edges between two nodes. Each edge
+ can hold optional data or attributes.
+
+ A MultiDiGraph holds directed edges. Self loops are allowed.
+
+ Nodes can be arbitrary (hashable) Python objects with optional
+ key/value attributes.
+
+ Edges are represented as links between nodes with optional
+ key/value attributes.
+
+ Parameters
+ ----------
+ data : input graph
+ Data to initialize graph. If data=None (default) an empty
+ graph is created. The data can be an edge list, or any
+ NetworkX graph object. If the corresponding optional Python
+ packages are installed the data can also be a NumPy matrix
+ or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
+ attr : keyword arguments, optional (default= no attributes)
+ Attributes to add to graph as key=value pairs.
+
+ See Also
+ --------
+ Graph
+ DiGraph
+ MultiGraph
+
+ Examples
+ --------
+ Create an empty graph structure (a "null graph") with no nodes and
+ no edges.
+
+ >>> G = nx.MultiDiGraph()
+
+ G can be grown in several ways.
+
+ **Nodes:**
+
+ Add one node at a time:
+
+ >>> G.add_node(1)
+
+ Add the nodes from any container (a list, dict, set or
+ even the lines from a file or the nodes from another graph).
+
+ >>> G.add_nodes_from([2,3])
+ >>> G.add_nodes_from(range(100,110))
+ >>> H=nx.Graph()
+ >>> H.add_path([0,1,2,3,4,5,6,7,8,9])
+ >>> G.add_nodes_from(H)
+
+ In addition to strings and integers any hashable Python object
+ (except None) can represent a node, e.g. a customized node object,
+ or even another Graph.
+
+ >>> G.add_node(H)
+
+ **Edges:**
+
+ G can also be grown by adding edges.
+
+ Add one edge,
+
+ >>> G.add_edge(1, 2)
+
+ a list of edges,
+
+ >>> G.add_edges_from([(1,2),(1,3)])
+
+ or a collection of edges,
+
+ >>> G.add_edges_from(H.edges())
+
+ If some edges connect nodes not yet in the graph, the nodes
+ are added automatically. If an edge already exists, an additional
+ edge is created and stored using a key to identify the edge.
+ By default the key is the lowest unused integer.
+
+ >>> G.add_edges_from([(4,5,dict(route=282)), (4,5,dict(route=37))])
+ >>> G[4]
+ {5: {0: {}, 1: {'route': 282}, 2: {'route': 37}}}
+
+ **Attributes:**
+
+ Each graph, node, and edge can hold key/value attribute pairs
+ in an associated attribute dictionary (the keys must be hashable).
+ By default these are empty, but can be added or changed using
+ add_edge, add_node or direct manipulation of the attribute
+ dictionaries named graph, node and edge respectively.
+
+ >>> G = nx.MultiDiGraph(day="Friday")
+ >>> G.graph
+ {'day': 'Friday'}
+
+ Add node attributes using add_node(), add_nodes_from() or G.node
+
+ >>> G.add_node(1, time='5pm')
+ >>> G.add_nodes_from([3], time='2pm')
+ >>> G.node[1]
+ {'time': '5pm'}
+ >>> G.node[1]['room'] = 714
+ >>> del G.node[1]['room'] # remove attribute
+ >>> G.nodes(data=True)
+ [(1, {'time': '5pm'}), (3, {'time': '2pm'})]
+
+ Warning: adding a node to G.node does not add it to the graph.
+
+ Add edge attributes using add_edge(), add_edges_from(), subscript
+ notation, or G.edge.
+
+ >>> G.add_edge(1, 2, weight=4.7 )
+ >>> G.add_edges_from([(3,4),(4,5)], color='red')
+ >>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})])
+ >>> G[1][2][0]['weight'] = 4.7
+ >>> G.edge[1][2][0]['weight'] = 4
+
+ **Shortcuts:**
+
+ Many common graph features allow python syntax to speed reporting.
+
+ >>> 1 in G # check if node in graph
+ True
+ >>> [n for n in G if n<3] # iterate through nodes
+ [1, 2]
+ >>> len(G) # number of nodes in graph
+ 5
+ >>> G[1] # adjacency dict keyed by neighbor to edge attributes
+ ... # Note: you should not change this dict manually!
+ {2: {0: {'weight': 4}, 1: {'color': 'blue'}}}
+
+ The fastest way to traverse all edges of a graph is via
+ adjacency_iter(), but the edges() method is often more convenient.
+
+ >>> for n,nbrsdict in G.adjacency_iter():
+ ... for nbr,keydict in nbrsdict.items():
+ ... for key,eattr in keydict.items():
+ ... if 'weight' in eattr:
+ ... (n,nbr,eattr['weight'])
+ (1, 2, 4)
+ (2, 3, 8)
+ >>> [ (u,v,edata['weight']) for u,v,edata in G.edges(data=True) if 'weight' in edata ]
+ [(1, 2, 4), (2, 3, 8)]
+
+ **Reporting:**
+
+ Simple graph information is obtained using methods.
+ Iterator versions of many reporting methods exist for efficiency.
+ Methods exist for reporting nodes(), edges(), neighbors() and degree()
+ as well as the number of nodes and edges.
+
+ For details on these and other miscellaneous methods, see below.
+ """
+ def add_edge(self, u, v, key=None, attr_dict=None, **attr):
+ """Add an edge between u and v.
+
+ The nodes u and v will be automatically added if they are
+ not already in the graph.
+
+ Edge attributes can be specified with keywords or by providing
+ a dictionary with key/value pairs. See examples below.
+
+ Parameters
+ ----------
+ u,v : nodes
+ Nodes can be, for example, strings or numbers.
+ Nodes must be hashable (and not None) Python objects.
+ key : hashable identifier, optional (default=lowest unused integer)
+ Used to distinguish multiedges between a pair of nodes.
+ attr_dict : dictionary, optional (default= no attributes)
+ Dictionary of edge attributes. Key/value pairs will
+ update existing data associated with the edge.
+ attr : keyword arguments, optional
+ Edge data (or labels or objects) can be assigned using
+ keyword arguments.
+
+ See Also
+ --------
+ add_edges_from : add a collection of edges
+
+ Notes
+ -----
+ To replace/update edge data, use the optional key argument
+ to identify a unique edge. Otherwise a new edge will be created.
+
+ NetworkX algorithms designed for weighted graphs cannot use
+ multigraphs directly because it is not clear how to handle
+ multiedge weights. Convert to Graph using edge attribute
+ 'weight' to enable weighted graph algorithms.
+
+ Examples
+ --------
+ The following all add the edge e=(1,2) to graph G:
+
+ >>> G = nx.MultiDiGraph()
+ >>> e = (1,2)
+ >>> G.add_edge(1, 2) # explicit two-node form
+ >>> G.add_edge(*e) # single edge as tuple of two nodes
+ >>> G.add_edges_from( [(1,2)] ) # add edges from iterable container
+
+ Associate data to edges using keywords:
+
+ >>> G.add_edge(1, 2, weight=3)
+ >>> G.add_edge(1, 2, key=0, weight=4) # update data for key=0
+ >>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
+ """
+ # set up attribute dict
+ if attr_dict is None:
+ attr_dict=attr
+ else:
+ try:
+ attr_dict.update(attr)
+ except AttributeError:
+ raise NetworkXError(\
+ "The attr_dict argument must be a dictionary.")
+ # add nodes
+ if u not in self.succ:
+ self.succ[u] = {}
+ self.pred[u] = {}
+ self.node[u] = {}
+ if v not in self.succ:
+ self.succ[v] = {}
+ self.pred[v] = {}
+ self.node[v] = {}
+ if v in self.succ[u]:
+ keydict=self.adj[u][v]
+ if key is None:
+ # find a unique integer key
+ # other methods might be better here?
+ key=len(keydict)
+ while key in keydict:
+ key+=1
+ datadict=keydict.get(key,{})
+ datadict.update(attr_dict)
+ keydict[key]=datadict
+ else:
+ # selfloops work this way without special treatment
+ if key is None:
+ key=0
+ datadict={}
+ datadict.update(attr_dict)
+ keydict={key:datadict}
+ self.succ[u][v] = keydict
+ self.pred[v][u] = keydict
+
+ def remove_edge(self, u, v, key=None):
+ """Remove an edge between u and v.
+
+ Parameters
+ ----------
+ u,v: nodes
+ Remove an edge between nodes u and v.
+ key : hashable identifier, optional (default=None)
+ Used to distinguish multiple edges between a pair of nodes.
+ If None remove a single (abritrary) edge between u and v.
+
+ Raises
+ ------
+ NetworkXError
+ If there is not an edge between u and v, or
+ if there is no edge with the specified key.
+
+ See Also
+ --------
+ remove_edges_from : remove a collection of edges
+
+ Examples
+ --------
+ >>> G = nx.MultiDiGraph()
+ >>> G.add_path([0,1,2,3])
+ >>> G.remove_edge(0,1)
+ >>> e = (1,2)
+ >>> G.remove_edge(*e) # unpacks e from an edge tuple
+
+ For multiple edges
+
+ >>> G = nx.MultiDiGraph()
+ >>> G.add_edges_from([(1,2),(1,2),(1,2)])
+ >>> G.remove_edge(1,2) # remove a single (arbitrary) edge
+
+ For edges with keys
+
+ >>> G = nx.MultiDiGraph()
+ >>> G.add_edge(1,2,key='first')
+ >>> G.add_edge(1,2,key='second')
+ >>> G.remove_edge(1,2,key='second')
+
+ """
+ try:
+ d=self.adj[u][v]
+ except (KeyError):
+ raise NetworkXError(
+ "The edge %s-%s is not in the graph."%(u,v))
+ # remove the edge with specified data
+ if key is None:
+ d.popitem()
+ else:
+ try:
+ del d[key]
+ except (KeyError):
+ raise NetworkXError(
+ "The edge %s-%s with key %s is not in the graph."%(u,v,key))
+ if len(d)==0:
+ # remove the key entries if last edge
+ del self.succ[u][v]
+ del self.pred[v][u]
+
+
+ def edges_iter(self, nbunch=None, data=False, keys=False):
+ """Return an iterator over the edges.
+
+ Edges are returned as tuples with optional data and keys
+ in the order (node, neighbor, key, data).
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default= all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+ data : bool, optional (default=False)
+ If True, return edge attribute dict with each edge.
+ keys : bool, optional (default=False)
+ If True, return edge keys with each edge.
+
+ Returns
+ -------
+ edge_iter : iterator
+ An iterator of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
+
+ See Also
+ --------
+ edges : return a list of edges
+
+ Notes
+ -----
+ Nodes in nbunch that are not in the graph will be (quietly) ignored.
+ For directed graphs this returns the out-edges.
+
+ Examples
+ --------
+ >>> G = nx.MultiDiGraph()
+ >>> G.add_path([0,1,2,3])
+ >>> [e for e in G.edges_iter()]
+ [(0, 1), (1, 2), (2, 3)]
+ >>> list(G.edges_iter(data=True)) # default data is {} (empty dict)
+ [(0, 1, {}), (1, 2, {}), (2, 3, {})]
+ >>> list(G.edges_iter([0,2]))
+ [(0, 1), (2, 3)]
+ >>> list(G.edges_iter(0))
+ [(0, 1)]
+
+ """
+ if nbunch is None:
+ nodes_nbrs = self.adj.items()
+ else:
+ nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch))
+ if data:
+ for n,nbrs in nodes_nbrs:
+ for nbr,keydict in nbrs.items():
+ for key,data in keydict.items():
+ if keys:
+ yield (n,nbr,key,data)
+ else:
+ yield (n,nbr,data)
+ else:
+ for n,nbrs in nodes_nbrs:
+ for nbr,keydict in nbrs.items():
+ for key,data in keydict.items():
+ if keys:
+ yield (n,nbr,key)
+ else:
+ yield (n,nbr)
+
+ # alias out_edges to edges
+ out_edges_iter=edges_iter
+
+ def out_edges(self, nbunch=None, keys=False, data=False):
+ """Return a list of the outgoing edges.
+
+ Edges are returned as tuples with optional data and keys
+ in the order (node, neighbor, key, data).
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default= all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+ data : bool, optional (default=False)
+ If True, return edge attribute dict with each edge.
+ keys : bool, optional (default=False)
+ If True, return edge keys with each edge.
+
+ Returns
+ -------
+ out_edges : list
+ An listr of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
+
+ Notes
+ -----
+ Nodes in nbunch that are not in the graph will be (quietly) ignored.
+ For directed graphs edges() is the same as out_edges().
+
+ See Also
+ --------
+ in_edges: return a list of incoming edges
+ """
+ return list(self.out_edges_iter(nbunch, keys=keys, data=data))
+
+
+ def in_edges_iter(self, nbunch=None, data=False, keys=False):
+ """Return an iterator over the incoming edges.
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default= all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+ data : bool, optional (default=False)
+ If True, return edge attribute dict with each edge.
+ keys : bool, optional (default=False)
+ If True, return edge keys with each edge.
+
+ Returns
+ -------
+ in_edge_iter : iterator
+ An iterator of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
+
+ See Also
+ --------
+ edges_iter : return an iterator of edges
+ """
+ if nbunch is None:
+ nodes_nbrs=self.pred.items()
+ else:
+ nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch))
+ if data:
+ for n,nbrs in nodes_nbrs:
+ for nbr,keydict in nbrs.items():
+ for key,data in keydict.items():
+ if keys:
+ yield (nbr,n,key,data)
+ else:
+ yield (nbr,n,data)
+ else:
+ for n,nbrs in nodes_nbrs:
+ for nbr,keydict in nbrs.items():
+ for key,data in keydict.items():
+ if keys:
+ yield (nbr,n,key)
+ else:
+ yield (nbr,n)
+
+ def in_edges(self, nbunch=None, keys=False, data=False):
+ """Return a list of the incoming edges.
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default= all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+ data : bool, optional (default=False)
+ If True, return edge attribute dict with each edge.
+ keys : bool, optional (default=False)
+ If True, return edge keys with each edge.
+
+ Returns
+ -------
+ in_edges : list
+ A list of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
+
+ See Also
+ --------
+ out_edges: return a list of outgoing edges
+ """
+ return list(self.in_edges_iter(nbunch, keys=keys, data=data))
+
+
+ def degree_iter(self, nbunch=None, weight=None):
+ """Return an iterator for (node, degree).
+
+ The node degree is the number of edges adjacent to the node.
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default=all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+
+ weight : string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights.
+
+ Returns
+ -------
+ nd_iter : an iterator
+ The iterator returns two-tuples of (node, degree).
+
+ See Also
+ --------
+ degree
+
+ Examples
+ --------
+ >>> G = nx.MultiDiGraph()
+ >>> G.add_path([0,1,2,3])
+ >>> list(G.degree_iter(0)) # node 0 with degree 1
+ [(0, 1)]
+ >>> list(G.degree_iter([0,1]))
+ [(0, 1), (1, 2)]
+
+ """
+ if nbunch is None:
+ nodes_nbrs=zip(iter(self.succ.items()),iter(self.pred.items()))
+ else:
+ nodes_nbrs=zip(
+ ((n,self.succ[n]) for n in self.nbunch_iter(nbunch)),
+ ((n,self.pred[n]) for n in self.nbunch_iter(nbunch)))
+
+ if weight is None:
+ for (n,succ),(n2,pred) in nodes_nbrs:
+ indeg = sum([len(data) for data in pred.values()])
+ outdeg = sum([len(data) for data in succ.values()])
+ yield (n, indeg + outdeg)
+ else:
+ # edge weighted graph - degree is sum of nbr edge weights
+ for (n,succ),(n2,pred) in nodes_nbrs:
+ deg = sum([d.get(weight,1)
+ for data in pred.values()
+ for d in data.values()])
+ deg += sum([d.get(weight,1)
+ for data in succ.values()
+ for d in data.values()])
+ yield (n, deg)
+
+
+ def in_degree_iter(self, nbunch=None, weight=None):
+ """Return an iterator for (node, in-degree).
+
+ The node in-degree is the number of edges pointing in to the node.
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default=all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+
+ weight : string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ Returns
+ -------
+ nd_iter : an iterator
+ The iterator returns two-tuples of (node, in-degree).
+
+ See Also
+ --------
+ degree, in_degree, out_degree, out_degree_iter
+
+ Examples
+ --------
+ >>> G = nx.MultiDiGraph()
+ >>> G.add_path([0,1,2,3])
+ >>> list(G.in_degree_iter(0)) # node 0 with degree 0
+ [(0, 0)]
+ >>> list(G.in_degree_iter([0,1]))
+ [(0, 0), (1, 1)]
+
+ """
+ if nbunch is None:
+ nodes_nbrs=self.pred.items()
+ else:
+ nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch))
+
+ if weight is None:
+ for n,nbrs in nodes_nbrs:
+ yield (n, sum([len(data) for data in nbrs.values()]) )
+ else:
+ # edge weighted graph - degree is sum of nbr edge weights
+ for n,pred in nodes_nbrs:
+ deg = sum([d.get(weight,1)
+ for data in pred.values()
+ for d in data.values()])
+ yield (n, deg)
+
+
+ def out_degree_iter(self, nbunch=None, weight=None):
+ """Return an iterator for (node, out-degree).
+
+ The node out-degree is the number of edges pointing out of the node.
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default=all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+
+ weight : string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights.
+
+ Returns
+ -------
+ nd_iter : an iterator
+ The iterator returns two-tuples of (node, out-degree).
+
+ See Also
+ --------
+ degree, in_degree, out_degree, in_degree_iter
+
+ Examples
+ --------
+ >>> G = nx.MultiDiGraph()
+ >>> G.add_path([0,1,2,3])
+ >>> list(G.out_degree_iter(0)) # node 0 with degree 1
+ [(0, 1)]
+ >>> list(G.out_degree_iter([0,1]))
+ [(0, 1), (1, 1)]
+
+ """
+ if nbunch is None:
+ nodes_nbrs=self.succ.items()
+ else:
+ nodes_nbrs=((n,self.succ[n]) for n in self.nbunch_iter(nbunch))
+
+ if weight is None:
+ for n,nbrs in nodes_nbrs:
+ yield (n, sum([len(data) for data in nbrs.values()]) )
+ else:
+ for n,succ in nodes_nbrs:
+ deg = sum([d.get(weight,1)
+ for data in succ.values()
+ for d in data.values()])
+ yield (n, deg)
+
+ def is_multigraph(self):
+ """Return True if graph is a multigraph, False otherwise."""
+ return True
+
+ def is_directed(self):
+ """Return True if graph is directed, False otherwise."""
+ return True
+
+ def to_directed(self):
+ """Return a directed copy of the graph.
+
+ Returns
+ -------
+ G : MultiDiGraph
+ A deepcopy of the graph.
+
+ Notes
+ -----
+ If edges in both directions (u,v) and (v,u) exist in the
+ graph, attributes for the new undirected edge will be a combination of
+ the attributes of the directed edges. The edge data is updated
+ in the (arbitrary) order that the edges are encountered. For
+ more customized control of the edge attributes use add_edge().
+
+ This returns a "deepcopy" of the edge, node, and
+ graph attributes which attempts to completely copy
+ all of the data and references.
+
+ This is in contrast to the similar G=DiGraph(D) which returns a
+ shallow copy of the data.
+
+ See the Python copy module for more information on shallow
+ and deep copies, http://docs.python.org/library/copy.html.
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or MultiGraph, etc
+ >>> G.add_path([0,1])
+ >>> H = G.to_directed()
+ >>> H.edges()
+ [(0, 1), (1, 0)]
+
+ If already directed, return a (deep) copy
+
+ >>> G = nx.MultiDiGraph()
+ >>> G.add_path([0,1])
+ >>> H = G.to_directed()
+ >>> H.edges()
+ [(0, 1)]
+ """
+ return deepcopy(self)
+
+ def to_undirected(self, reciprocal=False):
+ """Return an undirected representation of the digraph.
+
+ Parameters
+ ----------
+ reciprocal : bool (optional)
+ If True only keep edges that appear in both directions
+ in the original digraph.
+
+ Returns
+ -------
+ G : MultiGraph
+ An undirected graph with the same name and nodes and
+ with edge (u,v,data) if either (u,v,data) or (v,u,data)
+ is in the digraph. If both edges exist in digraph and
+ their edge data is different, only one edge is created
+ with an arbitrary choice of which edge data to use.
+ You must check and correct for this manually if desired.
+
+ Notes
+ -----
+ This returns a "deepcopy" of the edge, node, and
+ graph attributes which attempts to completely copy
+ all of the data and references.
+
+ This is in contrast to the similar D=DiGraph(G) which returns a
+ shallow copy of the data.
+
+ See the Python copy module for more information on shallow
+ and deep copies, http://docs.python.org/library/copy.html.
+ """
+ H=MultiGraph()
+ H.name=self.name
+ H.add_nodes_from(self)
+ if reciprocal is True:
+ H.add_edges_from( (u,v,key,deepcopy(data))
+ for u,nbrs in self.adjacency_iter()
+ for v,keydict in nbrs.items()
+ for key,data in keydict.items()
+ if self.has_edge(v,u,key))
+ else:
+ H.add_edges_from( (u,v,key,deepcopy(data))
+ for u,nbrs in self.adjacency_iter()
+ for v,keydict in nbrs.items()
+ for key,data in keydict.items())
+ H.graph=deepcopy(self.graph)
+ H.node=deepcopy(self.node)
+ return H
+
+ def subgraph(self, nbunch):
+ """Return the subgraph induced on nodes in nbunch.
+
+ The induced subgraph of the graph contains the nodes in nbunch
+ and the edges between those nodes.
+
+ Parameters
+ ----------
+ nbunch : list, iterable
+ A container of nodes which will be iterated through once.
+
+ Returns
+ -------
+ G : Graph
+ A subgraph of the graph with the same edge attributes.
+
+ Notes
+ -----
+ The graph, edge or node attributes just point to the original graph.
+ So changes to the node or edge structure will not be reflected in
+ the original graph while changes to the attributes will.
+
+ To create a subgraph with its own copy of the edge/node attributes use:
+ nx.Graph(G.subgraph(nbunch))
+
+ If edge attributes are containers, a deep copy can be obtained using:
+ G.subgraph(nbunch).copy()
+
+ For an inplace reduction of a graph to a subgraph you can remove nodes:
+ G.remove_nodes_from([ n in G if n not in set(nbunch)])
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> H = G.subgraph([0,1,2])
+ >>> H.edges()
+ [(0, 1), (1, 2)]
+ """
+ bunch = self.nbunch_iter(nbunch)
+ # create new graph and copy subgraph into it
+ H = self.__class__()
+ # copy node and attribute dictionaries
+ for n in bunch:
+ H.node[n]=self.node[n]
+ # namespace shortcuts for speed
+ H_succ=H.succ
+ H_pred=H.pred
+ self_succ=self.succ
+ self_pred=self.pred
+ # add nodes
+ for n in H:
+ H_succ[n]={}
+ H_pred[n]={}
+ # add edges
+ for u in H_succ:
+ Hnbrs=H_succ[u]
+ for v,edgedict in self_succ[u].items():
+ if v in H_succ:
+ # add both representations of edge: u-v and v-u
+ # they share the same edgedict
+ ed=edgedict.copy()
+ Hnbrs[v]=ed
+ H_pred[v][u]=ed
+ H.graph=self.graph
+ return H
+
+ def reverse(self, copy=True):
+ """Return the reverse of the graph.
+
+ The reverse is a graph with the same nodes and edges
+ but with the directions of the edges reversed.
+
+ Parameters
+ ----------
+ copy : bool optional (default=True)
+ If True, return a new DiGraph holding the reversed edges.
+ If False, reverse the reverse graph is created using
+ the original graph (this changes the original graph).
+ """
+ if copy:
+ H = self.__class__(name="Reverse of (%s)"%self.name)
+ H.add_nodes_from(self)
+ H.add_edges_from( (v,u,k,deepcopy(d)) for u,v,k,d
+ in self.edges(keys=True, data=True) )
+ H.graph=deepcopy(self.graph)
+ H.node=deepcopy(self.node)
+ else:
+ self.pred,self.succ=self.succ,self.pred
+ self.adj=self.succ
+ H=self
+ return H
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/classes/multigraph.py b/lib/python2.7/site-packages/setoolsgui/networkx/classes/multigraph.py
new file mode 100644
index 0000000..63bdf0f
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/classes/multigraph.py
@@ -0,0 +1,966 @@
+"""Base class for MultiGraph."""
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+from copy import deepcopy
+import networkx as nx
+from networkx.classes.graph import Graph
+from networkx import NetworkXError
+__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Dan Schult(dschult@colgate.edu)'])
+
+class MultiGraph(Graph):
+ """
+ An undirected graph class that can store multiedges.
+
+ Multiedges are multiple edges between two nodes. Each edge
+ can hold optional data or attributes.
+
+ A MultiGraph holds undirected edges. Self loops are allowed.
+
+ Nodes can be arbitrary (hashable) Python objects with optional
+ key/value attributes.
+
+ Edges are represented as links between nodes with optional
+ key/value attributes.
+
+ Parameters
+ ----------
+ data : input graph
+ Data to initialize graph. If data=None (default) an empty
+ graph is created. The data can be an edge list, or any
+ NetworkX graph object. If the corresponding optional Python
+ packages are installed the data can also be a NumPy matrix
+ or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
+ attr : keyword arguments, optional (default= no attributes)
+ Attributes to add to graph as key=value pairs.
+
+ See Also
+ --------
+ Graph
+ DiGraph
+ MultiDiGraph
+
+ Examples
+ --------
+ Create an empty graph structure (a "null graph") with no nodes and
+ no edges.
+
+ >>> G = nx.MultiGraph()
+
+ G can be grown in several ways.
+
+ **Nodes:**
+
+ Add one node at a time:
+
+ >>> G.add_node(1)
+
+ Add the nodes from any container (a list, dict, set or
+ even the lines from a file or the nodes from another graph).
+
+ >>> G.add_nodes_from([2,3])
+ >>> G.add_nodes_from(range(100,110))
+ >>> H=nx.Graph()
+ >>> H.add_path([0,1,2,3,4,5,6,7,8,9])
+ >>> G.add_nodes_from(H)
+
+ In addition to strings and integers any hashable Python object
+ (except None) can represent a node, e.g. a customized node object,
+ or even another Graph.
+
+ >>> G.add_node(H)
+
+ **Edges:**
+
+ G can also be grown by adding edges.
+
+ Add one edge,
+
+ >>> G.add_edge(1, 2)
+
+ a list of edges,
+
+ >>> G.add_edges_from([(1,2),(1,3)])
+
+ or a collection of edges,
+
+ >>> G.add_edges_from(H.edges())
+
+ If some edges connect nodes not yet in the graph, the nodes
+ are added automatically. If an edge already exists, an additional
+ edge is created and stored using a key to identify the edge.
+ By default the key is the lowest unused integer.
+
+ >>> G.add_edges_from([(4,5,dict(route=282)), (4,5,dict(route=37))])
+ >>> G[4]
+ {3: {0: {}}, 5: {0: {}, 1: {'route': 282}, 2: {'route': 37}}}
+
+ **Attributes:**
+
+ Each graph, node, and edge can hold key/value attribute pairs
+ in an associated attribute dictionary (the keys must be hashable).
+ By default these are empty, but can be added or changed using
+ add_edge, add_node or direct manipulation of the attribute
+ dictionaries named graph, node and edge respectively.
+
+ >>> G = nx.MultiGraph(day="Friday")
+ >>> G.graph
+ {'day': 'Friday'}
+
+ Add node attributes using add_node(), add_nodes_from() or G.node
+
+ >>> G.add_node(1, time='5pm')
+ >>> G.add_nodes_from([3], time='2pm')
+ >>> G.node[1]
+ {'time': '5pm'}
+ >>> G.node[1]['room'] = 714
+ >>> del G.node[1]['room'] # remove attribute
+ >>> G.nodes(data=True)
+ [(1, {'time': '5pm'}), (3, {'time': '2pm'})]
+
+ Warning: adding a node to G.node does not add it to the graph.
+
+ Add edge attributes using add_edge(), add_edges_from(), subscript
+ notation, or G.edge.
+
+ >>> G.add_edge(1, 2, weight=4.7 )
+ >>> G.add_edges_from([(3,4),(4,5)], color='red')
+ >>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})])
+ >>> G[1][2][0]['weight'] = 4.7
+ >>> G.edge[1][2][0]['weight'] = 4
+
+ **Shortcuts:**
+
+ Many common graph features allow python syntax to speed reporting.
+
+ >>> 1 in G # check if node in graph
+ True
+ >>> [n for n in G if n<3] # iterate through nodes
+ [1, 2]
+ >>> len(G) # number of nodes in graph
+ 5
+ >>> G[1] # adjacency dict keyed by neighbor to edge attributes
+ ... # Note: you should not change this dict manually!
+ {2: {0: {'weight': 4}, 1: {'color': 'blue'}}}
+
+ The fastest way to traverse all edges of a graph is via
+ adjacency_iter(), but the edges() method is often more convenient.
+
+ >>> for n,nbrsdict in G.adjacency_iter():
+ ... for nbr,keydict in nbrsdict.items():
+ ... for key,eattr in keydict.items():
+ ... if 'weight' in eattr:
+ ... (n,nbr,eattr['weight'])
+ (1, 2, 4)
+ (2, 1, 4)
+ (2, 3, 8)
+ (3, 2, 8)
+ >>> [ (u,v,edata['weight']) for u,v,edata in G.edges(data=True) if 'weight' in edata ]
+ [(1, 2, 4), (2, 3, 8)]
+
+ **Reporting:**
+
+ Simple graph information is obtained using methods.
+ Iterator versions of many reporting methods exist for efficiency.
+ Methods exist for reporting nodes(), edges(), neighbors() and degree()
+ as well as the number of nodes and edges.
+
+ For details on these and other miscellaneous methods, see below.
+ """
+ def add_edge(self, u, v, key=None, attr_dict=None, **attr):
+ """Add an edge between u and v.
+
+ The nodes u and v will be automatically added if they are
+ not already in the graph.
+
+ Edge attributes can be specified with keywords or by providing
+ a dictionary with key/value pairs. See examples below.
+
+ Parameters
+ ----------
+ u,v : nodes
+ Nodes can be, for example, strings or numbers.
+ Nodes must be hashable (and not None) Python objects.
+ key : hashable identifier, optional (default=lowest unused integer)
+ Used to distinguish multiedges between a pair of nodes.
+ attr_dict : dictionary, optional (default= no attributes)
+ Dictionary of edge attributes. Key/value pairs will
+ update existing data associated with the edge.
+ attr : keyword arguments, optional
+ Edge data (or labels or objects) can be assigned using
+ keyword arguments.
+
+ See Also
+ --------
+ add_edges_from : add a collection of edges
+
+ Notes
+ -----
+ To replace/update edge data, use the optional key argument
+ to identify a unique edge. Otherwise a new edge will be created.
+
+ NetworkX algorithms designed for weighted graphs cannot use
+ multigraphs directly because it is not clear how to handle
+ multiedge weights. Convert to Graph using edge attribute
+ 'weight' to enable weighted graph algorithms.
+
+ Examples
+ --------
+ The following all add the edge e=(1,2) to graph G:
+
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> e = (1,2)
+ >>> G.add_edge(1, 2) # explicit two-node form
+ >>> G.add_edge(*e) # single edge as tuple of two nodes
+ >>> G.add_edges_from( [(1,2)] ) # add edges from iterable container
+
+ Associate data to edges using keywords:
+
+ >>> G.add_edge(1, 2, weight=3)
+ >>> G.add_edge(1, 2, key=0, weight=4) # update data for key=0
+ >>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
+ """
+ # set up attribute dict
+ if attr_dict is None:
+ attr_dict=attr
+ else:
+ try:
+ attr_dict.update(attr)
+ except AttributeError:
+ raise NetworkXError(\
+ "The attr_dict argument must be a dictionary.")
+ # add nodes
+ if u not in self.adj:
+ self.adj[u] = {}
+ self.node[u] = {}
+ if v not in self.adj:
+ self.adj[v] = {}
+ self.node[v] = {}
+ if v in self.adj[u]:
+ keydict=self.adj[u][v]
+ if key is None:
+ # find a unique integer key
+ # other methods might be better here?
+ key=len(keydict)
+ while key in keydict:
+ key+=1
+ datadict=keydict.get(key,{})
+ datadict.update(attr_dict)
+ keydict[key]=datadict
+ else:
+ # selfloops work this way without special treatment
+ if key is None:
+ key=0
+ datadict={}
+ datadict.update(attr_dict)
+ keydict={key:datadict}
+ self.adj[u][v] = keydict
+ self.adj[v][u] = keydict
+
+
+ def add_edges_from(self, ebunch, attr_dict=None, **attr):
+ """Add all the edges in ebunch.
+
+ Parameters
+ ----------
+ ebunch : container of edges
+ Each edge given in the container will be added to the
+ graph. The edges can be:
+
+ - 2-tuples (u,v) or
+ - 3-tuples (u,v,d) for an edge attribute dict d, or
+ - 4-tuples (u,v,k,d) for an edge identified by key k
+
+ attr_dict : dictionary, optional (default= no attributes)
+ Dictionary of edge attributes. Key/value pairs will
+ update existing data associated with each edge.
+ attr : keyword arguments, optional
+ Edge data (or labels or objects) can be assigned using
+ keyword arguments.
+
+
+ See Also
+ --------
+ add_edge : add a single edge
+ add_weighted_edges_from : convenient way to add weighted edges
+
+ Notes
+ -----
+ Adding the same edge twice has no effect but any edge data
+ will be updated when each duplicate edge is added.
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_edges_from([(0,1),(1,2)]) # using a list of edge tuples
+ >>> e = zip(range(0,3),range(1,4))
+ >>> G.add_edges_from(e) # Add the path graph 0-1-2-3
+
+ Associate data to edges
+
+ >>> G.add_edges_from([(1,2),(2,3)], weight=3)
+ >>> G.add_edges_from([(3,4),(1,4)], label='WN2898')
+ """
+ # set up attribute dict
+ if attr_dict is None:
+ attr_dict=attr
+ else:
+ try:
+ attr_dict.update(attr)
+ except AttributeError:
+ raise NetworkXError(\
+ "The attr_dict argument must be a dictionary.")
+ # process ebunch
+ for e in ebunch:
+ ne=len(e)
+ if ne==4:
+ u,v,key,dd = e
+ elif ne==3:
+ u,v,dd = e
+ key=None
+ elif ne==2:
+ u,v = e
+ dd = {}
+ key=None
+ else:
+ raise NetworkXError(\
+ "Edge tuple %s must be a 2-tuple, 3-tuple or 4-tuple."%(e,))
+ if u in self.adj:
+ keydict=self.adj[u].get(v,{})
+ else:
+ keydict={}
+ if key is None:
+ # find a unique integer key
+ # other methods might be better here?
+ key=len(keydict)
+ while key in keydict:
+ key+=1
+ datadict=keydict.get(key,{})
+ datadict.update(attr_dict)
+ datadict.update(dd)
+ self.add_edge(u,v,key=key,attr_dict=datadict)
+
+
+ def remove_edge(self, u, v, key=None):
+ """Remove an edge between u and v.
+
+ Parameters
+ ----------
+ u,v: nodes
+ Remove an edge between nodes u and v.
+ key : hashable identifier, optional (default=None)
+ Used to distinguish multiple edges between a pair of nodes.
+ If None remove a single (abritrary) edge between u and v.
+
+ Raises
+ ------
+ NetworkXError
+ If there is not an edge between u and v, or
+ if there is no edge with the specified key.
+
+ See Also
+ --------
+ remove_edges_from : remove a collection of edges
+
+ Examples
+ --------
+ >>> G = nx.MultiGraph()
+ >>> G.add_path([0,1,2,3])
+ >>> G.remove_edge(0,1)
+ >>> e = (1,2)
+ >>> G.remove_edge(*e) # unpacks e from an edge tuple
+
+ For multiple edges
+
+ >>> G = nx.MultiGraph() # or MultiDiGraph, etc
+ >>> G.add_edges_from([(1,2),(1,2),(1,2)])
+ >>> G.remove_edge(1,2) # remove a single (arbitrary) edge
+
+ For edges with keys
+
+ >>> G = nx.MultiGraph() # or MultiDiGraph, etc
+ >>> G.add_edge(1,2,key='first')
+ >>> G.add_edge(1,2,key='second')
+ >>> G.remove_edge(1,2,key='second')
+
+ """
+ try:
+ d=self.adj[u][v]
+ except (KeyError):
+ raise NetworkXError(
+ "The edge %s-%s is not in the graph."%(u,v))
+ # remove the edge with specified data
+ if key is None:
+ d.popitem()
+ else:
+ try:
+ del d[key]
+ except (KeyError):
+ raise NetworkXError(
+ "The edge %s-%s with key %s is not in the graph."%(u,v,key))
+ if len(d)==0:
+ # remove the key entries if last edge
+ del self.adj[u][v]
+ if u!=v: # check for selfloop
+ del self.adj[v][u]
+
+
+ def remove_edges_from(self, ebunch):
+ """Remove all edges specified in ebunch.
+
+ Parameters
+ ----------
+ ebunch: list or container of edge tuples
+ Each edge given in the list or container will be removed
+ from the graph. The edges can be:
+
+ - 2-tuples (u,v) All edges between u and v are removed.
+ - 3-tuples (u,v,key) The edge identified by key is removed.
+ - 4-tuples (u,v,key,data) where data is ignored.
+
+ See Also
+ --------
+ remove_edge : remove a single edge
+
+ Notes
+ -----
+ Will fail silently if an edge in ebunch is not in the graph.
+
+ Examples
+ --------
+ >>> G = nx.MultiGraph() # or MultiDiGraph
+ >>> G.add_path([0,1,2,3])
+ >>> ebunch=[(1,2),(2,3)]
+ >>> G.remove_edges_from(ebunch)
+
+ Removing multiple copies of edges
+
+ >>> G = nx.MultiGraph()
+ >>> G.add_edges_from([(1,2),(1,2),(1,2)])
+ >>> G.remove_edges_from([(1,2),(1,2)])
+ >>> G.edges()
+ [(1, 2)]
+ >>> G.remove_edges_from([(1,2),(1,2)]) # silently ignore extra copy
+ >>> G.edges() # now empty graph
+ []
+ """
+ for e in ebunch:
+ try:
+ self.remove_edge(*e[:3])
+ except NetworkXError:
+ pass
+
+
+ def has_edge(self, u, v, key=None):
+ """Return True if the graph has an edge between nodes u and v.
+
+ Parameters
+ ----------
+ u,v : nodes
+ Nodes can be, for example, strings or numbers.
+
+ key : hashable identifier, optional (default=None)
+ If specified return True only if the edge with
+ key is found.
+
+ Returns
+ -------
+ edge_ind : bool
+ True if edge is in the graph, False otherwise.
+
+ Examples
+ --------
+ Can be called either using two nodes u,v, an edge tuple (u,v),
+ or an edge tuple (u,v,key).
+
+ >>> G = nx.MultiGraph() # or MultiDiGraph
+ >>> G.add_path([0,1,2,3])
+ >>> G.has_edge(0,1) # using two nodes
+ True
+ >>> e = (0,1)
+ >>> G.has_edge(*e) # e is a 2-tuple (u,v)
+ True
+ >>> G.add_edge(0,1,key='a')
+ >>> G.has_edge(0,1,key='a') # specify key
+ True
+ >>> e=(0,1,'a')
+ >>> G.has_edge(*e) # e is a 3-tuple (u,v,'a')
+ True
+
+ The following syntax are equivalent:
+
+ >>> G.has_edge(0,1)
+ True
+ >>> 1 in G[0] # though this gives KeyError if 0 not in G
+ True
+
+
+
+ """
+ try:
+ if key is None:
+ return v in self.adj[u]
+ else:
+ return key in self.adj[u][v]
+ except KeyError:
+ return False
+
+ def edges(self, nbunch=None, data=False, keys=False):
+ """Return a list of edges.
+
+ Edges are returned as tuples with optional data and keys
+ in the order (node, neighbor, key, data).
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default= all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+ data : bool, optional (default=False)
+ Return two tuples (u,v) (False) or three-tuples (u,v,data) (True).
+ keys : bool, optional (default=False)
+ Return two tuples (u,v) (False) or three-tuples (u,v,key) (True).
+
+ Returns
+ --------
+ edge_list: list of edge tuples
+ Edges that are adjacent to any node in nbunch, or a list
+ of all edges if nbunch is not specified.
+
+ See Also
+ --------
+ edges_iter : return an iterator over the edges
+
+ Notes
+ -----
+ Nodes in nbunch that are not in the graph will be (quietly) ignored.
+ For directed graphs this returns the out-edges.
+
+ Examples
+ --------
+ >>> G = nx.MultiGraph() # or MultiDiGraph
+ >>> G.add_path([0,1,2,3])
+ >>> G.edges()
+ [(0, 1), (1, 2), (2, 3)]
+ >>> G.edges(data=True) # default edge data is {} (empty dictionary)
+ [(0, 1, {}), (1, 2, {}), (2, 3, {})]
+ >>> G.edges(keys=True) # default keys are integers
+ [(0, 1, 0), (1, 2, 0), (2, 3, 0)]
+ >>> G.edges(data=True,keys=True) # default keys are integers
+ [(0, 1, 0, {}), (1, 2, 0, {}), (2, 3, 0, {})]
+ >>> G.edges([0,3])
+ [(0, 1), (3, 2)]
+ >>> G.edges(0)
+ [(0, 1)]
+
+ """
+ return list(self.edges_iter(nbunch, data=data,keys=keys))
+
+ def edges_iter(self, nbunch=None, data=False, keys=False):
+ """Return an iterator over the edges.
+
+ Edges are returned as tuples with optional data and keys
+ in the order (node, neighbor, key, data).
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default= all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+ data : bool, optional (default=False)
+ If True, return edge attribute dict with each edge.
+ keys : bool, optional (default=False)
+ If True, return edge keys with each edge.
+
+ Returns
+ -------
+ edge_iter : iterator
+ An iterator of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
+
+ See Also
+ --------
+ edges : return a list of edges
+
+ Notes
+ -----
+ Nodes in nbunch that are not in the graph will be (quietly) ignored.
+ For directed graphs this returns the out-edges.
+
+ Examples
+ --------
+ >>> G = nx.MultiGraph() # or MultiDiGraph
+ >>> G.add_path([0,1,2,3])
+ >>> [e for e in G.edges_iter()]
+ [(0, 1), (1, 2), (2, 3)]
+ >>> list(G.edges_iter(data=True)) # default data is {} (empty dict)
+ [(0, 1, {}), (1, 2, {}), (2, 3, {})]
+ >>> list(G.edges(keys=True)) # default keys are integers
+ [(0, 1, 0), (1, 2, 0), (2, 3, 0)]
+ >>> list(G.edges(data=True,keys=True)) # default keys are integers
+ [(0, 1, 0, {}), (1, 2, 0, {}), (2, 3, 0, {})]
+ >>> list(G.edges_iter([0,3]))
+ [(0, 1), (3, 2)]
+ >>> list(G.edges_iter(0))
+ [(0, 1)]
+
+ """
+ seen={} # helper dict to keep track of multiply stored edges
+ if nbunch is None:
+ nodes_nbrs = self.adj.items()
+ else:
+ nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch))
+ if data:
+ for n,nbrs in nodes_nbrs:
+ for nbr,keydict in nbrs.items():
+ if nbr not in seen:
+ for key,data in keydict.items():
+ if keys:
+ yield (n,nbr,key,data)
+ else:
+ yield (n,nbr,data)
+ seen[n]=1
+ else:
+ for n,nbrs in nodes_nbrs:
+ for nbr,keydict in nbrs.items():
+ if nbr not in seen:
+ for key,data in keydict.items():
+ if keys:
+ yield (n,nbr,key)
+ else:
+ yield (n,nbr)
+
+ seen[n] = 1
+ del seen
+
+
+ def get_edge_data(self, u, v, key=None, default=None):
+ """Return the attribute dictionary associated with edge (u,v).
+
+ Parameters
+ ----------
+ u,v : nodes
+ default: any Python object (default=None)
+ Value to return if the edge (u,v) is not found.
+ key : hashable identifier, optional (default=None)
+ Return data only for the edge with specified key.
+
+ Returns
+ -------
+ edge_dict : dictionary
+ The edge attribute dictionary.
+
+ Notes
+ -----
+ It is faster to use G[u][v][key].
+
+ >>> G = nx.MultiGraph() # or MultiDiGraph
+ >>> G.add_edge(0,1,key='a',weight=7)
+ >>> G[0][1]['a'] # key='a'
+ {'weight': 7}
+
+ Warning: Assigning G[u][v][key] corrupts the graph data structure.
+ But it is safe to assign attributes to that dictionary,
+
+ >>> G[0][1]['a']['weight'] = 10
+ >>> G[0][1]['a']['weight']
+ 10
+ >>> G[1][0]['a']['weight']
+ 10
+
+ Examples
+ --------
+ >>> G = nx.MultiGraph() # or MultiDiGraph
+ >>> G.add_path([0,1,2,3])
+ >>> G.get_edge_data(0,1)
+ {0: {}}
+ >>> e = (0,1)
+ >>> G.get_edge_data(*e) # tuple form
+ {0: {}}
+ >>> G.get_edge_data('a','b',default=0) # edge not in graph, return 0
+ 0
+ """
+ try:
+ if key is None:
+ return self.adj[u][v]
+ else:
+ return self.adj[u][v][key]
+ except KeyError:
+ return default
+
+ def degree_iter(self, nbunch=None, weight=None):
+ """Return an iterator for (node, degree).
+
+ The node degree is the number of edges adjacent to the node.
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default=all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+
+ weight : string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ Returns
+ -------
+ nd_iter : an iterator
+ The iterator returns two-tuples of (node, degree).
+
+ See Also
+ --------
+ degree
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> list(G.degree_iter(0)) # node 0 with degree 1
+ [(0, 1)]
+ >>> list(G.degree_iter([0,1]))
+ [(0, 1), (1, 2)]
+
+ """
+ if nbunch is None:
+ nodes_nbrs = self.adj.items()
+ else:
+ nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch))
+
+ if weight is None:
+ for n,nbrs in nodes_nbrs:
+ deg = sum([len(data) for data in nbrs.values()])
+ yield (n, deg+(n in nbrs and len(nbrs[n])))
+ else:
+ # edge weighted graph - degree is sum of nbr edge weights
+ for n,nbrs in nodes_nbrs:
+ deg = sum([d.get(weight,1)
+ for data in nbrs.values()
+ for d in data.values()])
+ if n in nbrs:
+ deg += sum([d.get(weight,1)
+ for key,d in nbrs[n].items()])
+ yield (n, deg)
+
+
+ def is_multigraph(self):
+ """Return True if graph is a multigraph, False otherwise."""
+ return True
+
+ def is_directed(self):
+ """Return True if graph is directed, False otherwise."""
+ return False
+
+ def to_directed(self):
+ """Return a directed representation of the graph.
+
+ Returns
+ -------
+ G : MultiDiGraph
+ A directed graph with the same name, same nodes, and with
+ each edge (u,v,data) replaced by two directed edges
+ (u,v,data) and (v,u,data).
+
+ Notes
+ -----
+ This returns a "deepcopy" of the edge, node, and
+ graph attributes which attempts to completely copy
+ all of the data and references.
+
+ This is in contrast to the similar D=DiGraph(G) which returns a
+ shallow copy of the data.
+
+ See the Python copy module for more information on shallow
+ and deep copies, http://docs.python.org/library/copy.html.
+
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or MultiGraph, etc
+ >>> G.add_path([0,1])
+ >>> H = G.to_directed()
+ >>> H.edges()
+ [(0, 1), (1, 0)]
+
+ If already directed, return a (deep) copy
+
+ >>> G = nx.DiGraph() # or MultiDiGraph, etc
+ >>> G.add_path([0,1])
+ >>> H = G.to_directed()
+ >>> H.edges()
+ [(0, 1)]
+ """
+ from networkx.classes.multidigraph import MultiDiGraph
+ G=MultiDiGraph()
+ G.add_nodes_from(self)
+ G.add_edges_from( (u,v,key,deepcopy(datadict))
+ for u,nbrs in self.adjacency_iter()
+ for v,keydict in nbrs.items()
+ for key,datadict in keydict.items() )
+ G.graph=deepcopy(self.graph)
+ G.node=deepcopy(self.node)
+ return G
+
+
+ def selfloop_edges(self, data=False, keys=False):
+ """Return a list of selfloop edges.
+
+ A selfloop edge has the same node at both ends.
+
+ Parameters
+ -----------
+ data : bool, optional (default=False)
+ Return selfloop edges as two tuples (u,v) (data=False)
+ or three-tuples (u,v,data) (data=True)
+ keys : bool, optional (default=False)
+ If True, return edge keys with each edge.
+
+ Returns
+ -------
+ edgelist : list of edge tuples
+ A list of all selfloop edges.
+
+ See Also
+ --------
+ nodes_with_selfloops, number_of_selfloops
+
+ Examples
+ --------
+ >>> G = nx.MultiGraph() # or MultiDiGraph
+ >>> G.add_edge(1,1)
+ >>> G.add_edge(1,2)
+ >>> G.selfloop_edges()
+ [(1, 1)]
+ >>> G.selfloop_edges(data=True)
+ [(1, 1, {})]
+ >>> G.selfloop_edges(keys=True)
+ [(1, 1, 0)]
+ >>> G.selfloop_edges(keys=True, data=True)
+ [(1, 1, 0, {})]
+ """
+ if data:
+ if keys:
+ return [ (n,n,k,d)
+ for n,nbrs in self.adj.items()
+ if n in nbrs for k,d in nbrs[n].items()]
+ else:
+ return [ (n,n,d)
+ for n,nbrs in self.adj.items()
+ if n in nbrs for d in nbrs[n].values()]
+ else:
+ if keys:
+ return [ (n,n,k)
+ for n,nbrs in self.adj.items()
+ if n in nbrs for k in nbrs[n].keys()]
+
+ else:
+ return [ (n,n)
+ for n,nbrs in self.adj.items()
+ if n in nbrs for d in nbrs[n].values()]
+
+
+ def number_of_edges(self, u=None, v=None):
+ """Return the number of edges between two nodes.
+
+ Parameters
+ ----------
+ u,v : nodes, optional (default=all edges)
+ If u and v are specified, return the number of edges between
+ u and v. Otherwise return the total number of all edges.
+
+ Returns
+ -------
+ nedges : int
+ The number of edges in the graph. If nodes u and v are specified
+ return the number of edges between those nodes.
+
+ See Also
+ --------
+ size
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> G.number_of_edges()
+ 3
+ >>> G.number_of_edges(0,1)
+ 1
+ >>> e = (0,1)
+ >>> G.number_of_edges(*e)
+ 1
+ """
+ if u is None: return self.size()
+ try:
+ edgedata=self.adj[u][v]
+ except KeyError:
+ return 0 # no such edge
+ return len(edgedata)
+
+
+ def subgraph(self, nbunch):
+ """Return the subgraph induced on nodes in nbunch.
+
+ The induced subgraph of the graph contains the nodes in nbunch
+ and the edges between those nodes.
+
+ Parameters
+ ----------
+ nbunch : list, iterable
+ A container of nodes which will be iterated through once.
+
+ Returns
+ -------
+ G : Graph
+ A subgraph of the graph with the same edge attributes.
+
+ Notes
+ -----
+ The graph, edge or node attributes just point to the original graph.
+ So changes to the node or edge structure will not be reflected in
+ the original graph while changes to the attributes will.
+
+ To create a subgraph with its own copy of the edge/node attributes use:
+ nx.Graph(G.subgraph(nbunch))
+
+ If edge attributes are containers, a deep copy can be obtained using:
+ G.subgraph(nbunch).copy()
+
+ For an inplace reduction of a graph to a subgraph you can remove nodes:
+ G.remove_nodes_from([ n in G if n not in set(nbunch)])
+
+ Examples
+ --------
+ >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
+ >>> G.add_path([0,1,2,3])
+ >>> H = G.subgraph([0,1,2])
+ >>> H.edges()
+ [(0, 1), (1, 2)]
+ """
+ bunch =self.nbunch_iter(nbunch)
+ # create new graph and copy subgraph into it
+ H = self.__class__()
+ # copy node and attribute dictionaries
+ for n in bunch:
+ H.node[n]=self.node[n]
+ # namespace shortcuts for speed
+ H_adj=H.adj
+ self_adj=self.adj
+ # add nodes and edges (undirected method)
+ for n in H:
+ Hnbrs={}
+ H_adj[n]=Hnbrs
+ for nbr,edgedict in self_adj[n].items():
+ if nbr in H_adj:
+ # add both representations of edge: n-nbr and nbr-n
+ # they share the same edgedict
+ ed=edgedict.copy()
+ Hnbrs[nbr]=ed
+ H_adj[nbr][n]=ed
+ H.graph=self.graph
+ return H
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/historical_tests.py b/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/historical_tests.py
new file mode 100644
index 0000000..5dd398c
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/historical_tests.py
@@ -0,0 +1,477 @@
+#!/usr/bin/env python
+"""Original NetworkX graph tests"""
+from nose.tools import *
+import networkx
+import networkx as nx
+from networkx import convert_node_labels_to_integers as cnlti
+from networkx.testing import *
+
+class HistoricalTests(object):
+
+ def setUp(self):
+ self.null=nx.null_graph()
+ self.P1=cnlti(nx.path_graph(1),first_label=1)
+ self.P3=cnlti(nx.path_graph(3),first_label=1)
+ self.P10=cnlti(nx.path_graph(10),first_label=1)
+ self.K1=cnlti(nx.complete_graph(1),first_label=1)
+ self.K3=cnlti(nx.complete_graph(3),first_label=1)
+ self.K4=cnlti(nx.complete_graph(4),first_label=1)
+ self.K5=cnlti(nx.complete_graph(5),first_label=1)
+ self.K10=cnlti(nx.complete_graph(10),first_label=1)
+ self.G=nx.Graph
+
+ def test_name(self):
+ G=self.G(name="test")
+ assert_equal(str(G),'test')
+ assert_equal(G.name,'test')
+ H=self.G()
+ assert_equal(H.name,'')
+
+ # Nodes
+
+ def test_add_remove_node(self):
+ G=self.G()
+ G.add_node('A')
+ assert_true(G.has_node('A'))
+ G.remove_node('A')
+ assert_false(G.has_node('A'))
+
+ def test_nonhashable_node(self):
+ # Test if a non-hashable object is in the Graph. A python dict will
+ # raise a TypeError, but for a Graph class a simple False should be
+ # returned (see Graph __contains__). If it cannot be a node then it is
+ # not a node.
+ G=self.G()
+ assert_false(G.has_node(['A']))
+ assert_false(G.has_node({'A':1}))
+
+ def test_add_nodes_from(self):
+ G=self.G()
+ G.add_nodes_from(list("ABCDEFGHIJKL"))
+ assert_true(G.has_node("L"))
+ G.remove_nodes_from(['H','I','J','K','L'])
+ G.add_nodes_from([1,2,3,4])
+ assert_equal(sorted(G.nodes(),key=str),
+ [1, 2, 3, 4, 'A', 'B', 'C', 'D', 'E', 'F', 'G'])
+ # test __iter__
+ assert_equal(sorted(G,key=str),
+ [1, 2, 3, 4, 'A', 'B', 'C', 'D', 'E', 'F', 'G'])
+
+
+ def test_contains(self):
+ G=self.G()
+ G.add_node('A')
+ assert_true('A' in G)
+ assert_false([] in G) # never raise a Key or TypeError in this test
+ assert_false({1:1} in G)
+
+ def test_add_remove(self):
+ # Test add_node and remove_node acting for various nbunch
+ G=self.G()
+ G.add_node('m')
+ assert_true(G.has_node('m'))
+ G.add_node('m') # no complaints
+ assert_raises(nx.NetworkXError,G.remove_node,'j')
+ G.remove_node('m')
+ assert_equal(G.nodes(),[])
+
+ def test_nbunch_is_list(self):
+ G=self.G()
+ G.add_nodes_from(list("ABCD"))
+ G.add_nodes_from(self.P3) # add nbunch of nodes (nbunch=Graph)
+ assert_equal(sorted(G.nodes(),key=str),
+ [1, 2, 3, 'A', 'B', 'C', 'D'])
+ G.remove_nodes_from(self.P3) # remove nbunch of nodes (nbunch=Graph)
+ assert_equal(sorted(G.nodes(),key=str),
+ ['A', 'B', 'C', 'D'])
+
+ def test_nbunch_is_set(self):
+ G=self.G()
+ nbunch=set("ABCDEFGHIJKL")
+ G.add_nodes_from(nbunch)
+ assert_true(G.has_node("L"))
+
+ def test_nbunch_dict(self):
+ # nbunch is a dict with nodes as keys
+ G=self.G()
+ nbunch=set("ABCDEFGHIJKL")
+ G.add_nodes_from(nbunch)
+ nbunch={'I':"foo",'J':2,'K':True,'L':"spam"}
+ G.remove_nodes_from(nbunch)
+ assert_true(sorted(G.nodes(),key=str),
+ ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
+
+ def test_nbunch_iterator(self):
+ G=self.G()
+ G.add_nodes_from(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
+ n_iter=self.P3.nodes_iter()
+ G.add_nodes_from(n_iter)
+ assert_equal(sorted(G.nodes(),key=str),
+ [1, 2, 3, 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
+ n_iter=self.P3.nodes_iter() # rebuild same iterator
+ G.remove_nodes_from(n_iter) # remove nbunch of nodes (nbunch=iterator)
+ assert_equal(sorted(G.nodes(),key=str),
+ ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
+
+ def test_nbunch_graph(self):
+ G=self.G()
+ G.add_nodes_from(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
+ nbunch=self.K3
+ G.add_nodes_from(nbunch)
+ assert_true(sorted(G.nodes(),key=str),
+ [1, 2, 3, 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
+
+
+
+ # Edges
+
+ def test_add_edge(self):
+ G=self.G()
+ assert_raises(TypeError,G.add_edge,'A')
+
+ G.add_edge('A','B') # testing add_edge()
+ G.add_edge('A','B') # should fail silently
+ assert_true(G.has_edge('A','B'))
+ assert_false(G.has_edge('A','C'))
+ assert_true(G.has_edge( *('A','B') ))
+ if G.is_directed():
+ assert_false(G.has_edge('B','A'))
+ else:
+ # G is undirected, so B->A is an edge
+ assert_true(G.has_edge('B','A'))
+
+
+ G.add_edge('A','C') # test directedness
+ G.add_edge('C','A')
+ G.remove_edge('C','A')
+ if G.is_directed():
+ assert_true(G.has_edge('A','C'))
+ else:
+ assert_false(G.has_edge('A','C'))
+ assert_false(G.has_edge('C','A'))
+
+
+ def test_self_loop(self):
+ G=self.G()
+ G.add_edge('A','A') # test self loops
+ assert_true(G.has_edge('A','A'))
+ G.remove_edge('A','A')
+ G.add_edge('X','X')
+ assert_true(G.has_node('X'))
+ G.remove_node('X')
+ G.add_edge('A','Z') # should add the node silently
+ assert_true(G.has_node('Z'))
+
+ def test_add_edges_from(self):
+ G=self.G()
+ G.add_edges_from([('B','C')]) # test add_edges_from()
+ assert_true(G.has_edge('B','C'))
+ if G.is_directed():
+ assert_false(G.has_edge('C','B'))
+ else:
+ assert_true(G.has_edge('C','B')) # undirected
+
+ G.add_edges_from([('D','F'),('B','D')])
+ assert_true(G.has_edge('D','F'))
+ assert_true(G.has_edge('B','D'))
+
+ if G.is_directed():
+ assert_false(G.has_edge('D','B'))
+ else:
+ assert_true(G.has_edge('D','B')) # undirected
+
+ def test_add_edges_from2(self):
+ G=self.G()
+ # after failing silently, should add 2nd edge
+ G.add_edges_from([tuple('IJ'),list('KK'),tuple('JK')])
+ assert_true(G.has_edge(*('I','J')))
+ assert_true(G.has_edge(*('K','K')))
+ assert_true(G.has_edge(*('J','K')))
+ if G.is_directed():
+ assert_false(G.has_edge(*('K','J')))
+ else:
+ assert_true(G.has_edge(*('K','J')))
+
+ def test_add_edges_from3(self):
+ G=self.G()
+ G.add_edges_from(zip(list('ACD'),list('CDE')))
+ assert_true(G.has_edge('D','E'))
+ assert_false(G.has_edge('E','C'))
+
+ def test_remove_edge(self):
+ G=self.G()
+ G.add_nodes_from([1, 2, 3, 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
+
+ G.add_edges_from(zip(list('MNOP'),list('NOPM')))
+ assert_true(G.has_edge('O','P'))
+ assert_true( G.has_edge('P','M'))
+ G.remove_node('P') # tests remove_node()'s handling of edges.
+ assert_false(G.has_edge('P','M'))
+ assert_raises(TypeError,G.remove_edge,'M')
+
+ G.add_edge('N','M')
+ assert_true(G.has_edge('M','N'))
+ G.remove_edge('M','N')
+ assert_false(G.has_edge('M','N'))
+
+ # self loop fails silently
+ G.remove_edges_from([list('HI'),list('DF'),
+ tuple('KK'),tuple('JK')])
+ assert_false(G.has_edge('H','I'))
+ assert_false(G.has_edge('J','K'))
+ G.remove_edges_from([list('IJ'),list('KK'),list('JK')])
+ assert_false(G.has_edge('I','J'))
+ G.remove_nodes_from(set('ZEFHIMNO'))
+ G.add_edge('J','K')
+
+
+ def test_edges_nbunch(self):
+ # Test G.edges(nbunch) with various forms of nbunch
+ G=self.G()
+ G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
+ ('C', 'B'), ('C', 'D')])
+ # node not in nbunch should be quietly ignored
+ assert_raises(nx.NetworkXError,G.edges,6)
+ assert_equals(G.edges('Z'),[]) # iterable non-node
+ # nbunch can be an empty list
+ assert_equals(G.edges([]),[])
+ if G.is_directed():
+ elist=[('A', 'B'), ('A', 'C'), ('B', 'D')]
+ else:
+ elist=[('A', 'B'), ('A', 'C'), ('B', 'C'), ('B', 'D')]
+ # nbunch can be a list
+ assert_edges_equal(G.edges(['A','B']),elist)
+ # nbunch can be a set
+ assert_edges_equal(G.edges(set(['A','B'])),elist)
+ # nbunch can be a graph
+ G1=self.G()
+ G1.add_nodes_from('AB')
+ assert_edges_equal(G.edges(G1),elist)
+ # nbunch can be a dict with nodes as keys
+ ndict={'A': "thing1", 'B': "thing2"}
+ assert_edges_equal(G.edges(ndict),elist)
+ # nbunch can be a single node
+ assert_edges_equal(G.edges('A'), [('A', 'B'), ('A', 'C')])
+
+ assert_edges_equal(G.nodes_iter(), ['A', 'B', 'C', 'D'])
+
+ def test_edges_iter_nbunch(self):
+ G=self.G()
+ G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
+ ('C', 'B'), ('C', 'D')])
+ # Test G.edges_iter(nbunch) with various forms of nbunch
+ # node not in nbunch should be quietly ignored
+ assert_equals(list(G.edges_iter('Z')),[])
+ # nbunch can be an empty list
+ assert_equals(sorted(G.edges_iter([])),[])
+ if G.is_directed():
+ elist=[('A', 'B'), ('A', 'C'), ('B', 'D')]
+ else:
+ elist=[('A', 'B'), ('A', 'C'), ('B', 'C'), ('B', 'D')]
+ # nbunch can be a list
+ assert_edges_equal(G.edges_iter(['A','B']),elist)
+ # nbunch can be a set
+ assert_edges_equal(G.edges_iter(set(['A','B'])),elist)
+ # nbunch can be a graph
+ G1=self.G()
+ G1.add_nodes_from(['A','B'])
+ assert_edges_equal(G.edges_iter(G1),elist)
+ # nbunch can be a dict with nodes as keys
+ ndict={'A': "thing1", 'B': "thing2"}
+ assert_edges_equal(G.edges_iter(ndict),elist)
+ # nbunch can be a single node
+ assert_edges_equal(G.edges_iter('A'), [('A', 'B'), ('A', 'C')])
+
+ # nbunch can be nothing (whole graph)
+ assert_edges_equal(G.edges_iter(), [('A', 'B'), ('A', 'C'), ('B', 'D'),
+ ('C', 'B'), ('C', 'D')])
+
+
+ def test_degree(self):
+ G=self.G()
+ G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
+ ('C', 'B'), ('C', 'D')])
+ assert_equal(G.degree('A'),2)
+
+ # degree of single node in iterable container must return dict
+ assert_equal(list(G.degree(['A']).values()),[2])
+ assert_equal(G.degree(['A']),{'A': 2})
+ assert_equal(sorted(G.degree(['A','B']).values()),[2, 3])
+ assert_equal(G.degree(['A','B']),{'A': 2, 'B': 3})
+ assert_equal(sorted(G.degree().values()),[2, 2, 3, 3])
+ assert_equal(sorted([v for k,v in G.degree_iter()]),
+ [2, 2, 3, 3])
+
+ def test_degree2(self):
+ H=self.G()
+ H.add_edges_from([(1,24),(1,2)])
+ assert_equal(sorted(H.degree([1,24]).values()),[1, 2])
+
+ def test_degree_graph(self):
+ P3=nx.path_graph(3)
+ P5=nx.path_graph(5)
+ # silently ignore nodes not in P3
+ assert_equal(P3.degree(['A','B']),{})
+ # nbunch can be a graph
+ assert_equal(sorted(P5.degree(P3).values()),[1, 2, 2])
+ # nbunch can be a graph thats way to big
+ assert_equal(sorted(P3.degree(P5).values()),[1, 1, 2])
+ assert_equal(P5.degree([]),{})
+ assert_equal(list(P5.degree_iter([])),[])
+ assert_equal(dict(P5.degree_iter([])),{})
+
+ def test_null(self):
+ null=nx.null_graph()
+ assert_equal(null.degree(),{})
+ assert_equal(list(null.degree_iter()),[])
+ assert_equal(dict(null.degree_iter()),{})
+
+ def test_order_size(self):
+ G=self.G()
+ G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
+ ('C', 'B'), ('C', 'D')])
+ assert_equal(G.order(),4)
+ assert_equal(G.size(),5)
+ assert_equal(G.number_of_edges(),5)
+ assert_equal(G.number_of_edges('A','B'),1)
+ assert_equal(G.number_of_edges('A','D'),0)
+
+ def test_copy(self):
+ G=self.G()
+ H=G.copy() # copy
+ assert_equal(H.adj,G.adj)
+ assert_equal(H.name,G.name)
+ assert_not_equal(H,G)
+
+ def test_subgraph(self):
+ G=self.G()
+ G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
+ ('C', 'B'), ('C', 'D')])
+ SG=G.subgraph(['A','B','D'])
+ assert_nodes_equal(SG.nodes(),['A', 'B', 'D'])
+ assert_edges_equal(SG.edges(),[('A', 'B'), ('B', 'D')])
+
+ def test_to_directed(self):
+ G=self.G()
+ if not G.is_directed():
+ G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
+ ('C', 'B'), ('C', 'D')])
+
+ DG=G.to_directed()
+ assert_not_equal(DG,G) # directed copy or copy
+
+ assert_true(DG.is_directed())
+ assert_equal(DG.name,G.name)
+ assert_equal(DG.adj,G.adj)
+ assert_equal(sorted(DG.out_edges(list('AB'))),
+ [('A', 'B'), ('A', 'C'), ('B', 'A'),
+ ('B', 'C'), ('B', 'D')])
+ DG.remove_edge('A','B')
+ assert_true(DG.has_edge('B','A')) # this removes B-A but not A-B
+ assert_false(DG.has_edge('A','B'))
+
+ def test_to_undirected(self):
+ G=self.G()
+ if G.is_directed():
+ G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
+ ('C', 'B'), ('C', 'D')])
+ UG=G.to_undirected() # to_undirected
+ assert_not_equal(UG,G)
+ assert_false(UG.is_directed())
+ assert_true(G.is_directed())
+ assert_equal(UG.name,G.name)
+ assert_not_equal(UG.adj,G.adj)
+ assert_equal(sorted(UG.edges(list('AB'))),
+ [('A', 'B'), ('A', 'C'), ('B', 'C'), ('B', 'D')])
+ assert_equal(sorted(UG.edges(['A','B'])),
+ [('A', 'B'), ('A', 'C'), ('B', 'C'), ('B', 'D')])
+ UG.remove_edge('A','B')
+ assert_false(UG.has_edge('B','A'))
+ assert_false( UG.has_edge('A','B'))
+
+
+
+ def test_neighbors(self):
+ G=self.G()
+ G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
+ ('C', 'B'), ('C', 'D')])
+ G.add_nodes_from('GJK')
+ assert_equal(sorted(G['A']),['B', 'C'])
+ assert_equal(sorted(G.neighbors('A')),['B', 'C'])
+ assert_equal(sorted(G.neighbors_iter('A')),['B', 'C'])
+ assert_equal(sorted(G.neighbors('G')),[])
+ assert_raises(nx.NetworkXError,G.neighbors,'j')
+
+ def test_iterators(self):
+ G=self.G()
+ G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
+ ('C', 'B'), ('C', 'D')])
+ G.add_nodes_from('GJK')
+ assert_equal(sorted(G.nodes_iter()),
+ ['A', 'B', 'C', 'D', 'G', 'J', 'K'])
+ assert_edges_equal(G.edges_iter(),
+ [('A', 'B'), ('A', 'C'), ('B', 'D'), ('C', 'B'), ('C', 'D')])
+
+ assert_equal(sorted([v for k,v in G.degree_iter()]),
+ [0, 0, 0, 2, 2, 3, 3])
+ assert_equal(sorted(G.degree_iter(),key=str),
+ [('A', 2), ('B', 3), ('C', 3), ('D', 2),
+ ('G', 0), ('J', 0), ('K', 0)])
+ assert_equal(sorted(G.neighbors_iter('A')),['B', 'C'])
+ assert_raises(nx.NetworkXError,G.neighbors_iter,'X')
+ G.clear()
+ assert_equal(nx.number_of_nodes(G),0)
+ assert_equal(nx.number_of_edges(G),0)
+
+
+ def test_null_subgraph(self):
+ # Subgraph of a null graph is a null graph
+ nullgraph=nx.null_graph()
+ G=nx.null_graph()
+ H=G.subgraph([])
+ assert_true(nx.is_isomorphic(H,nullgraph))
+
+ def test_empty_subgraph(self):
+ # Subgraph of an empty graph is an empty graph. test 1
+ nullgraph=nx.null_graph()
+ E5=nx.empty_graph(5)
+ E10=nx.empty_graph(10)
+ H=E10.subgraph([])
+ assert_true(nx.is_isomorphic(H,nullgraph))
+ H=E10.subgraph([1,2,3,4,5])
+ assert_true(nx.is_isomorphic(H,E5))
+
+ def test_complete_subgraph(self):
+ # Subgraph of a complete graph is a complete graph
+ K1=nx.complete_graph(1)
+ K3=nx.complete_graph(3)
+ K5=nx.complete_graph(5)
+ H=K5.subgraph([1,2,3])
+ assert_true(nx.is_isomorphic(H,K3))
+
+ def test_subgraph_nbunch(self):
+ nullgraph=nx.null_graph()
+ K1=nx.complete_graph(1)
+ K3=nx.complete_graph(3)
+ K5=nx.complete_graph(5)
+ # Test G.subgraph(nbunch), where nbunch is a single node
+ H=K5.subgraph(1)
+ assert_true(nx.is_isomorphic(H,K1))
+ # Test G.subgraph(nbunch), where nbunch is a set
+ H=K5.subgraph(set([1]))
+ assert_true(nx.is_isomorphic(H,K1))
+ # Test G.subgraph(nbunch), where nbunch is an iterator
+ H=K5.subgraph(iter(K3))
+ assert_true(nx.is_isomorphic(H,K3))
+ # Test G.subgraph(nbunch), where nbunch is another graph
+ H=K5.subgraph(K3)
+ assert_true(nx.is_isomorphic(H,K3))
+ H=K5.subgraph([9])
+ assert_true(nx.is_isomorphic(H,nullgraph))
+
+ def test_node_tuple_error(self):
+ H=self.G()
+ # Test error handling of tuple as a node
+ assert_raises(nx.NetworkXError,H.remove_node,(1,2))
+ H.remove_nodes_from([(1,2)]) # no error
+ assert_raises(nx.NetworkXError,H.neighbors,(1,2))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_digraph.py b/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_digraph.py
new file mode 100644
index 0000000..9ac3d3e
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_digraph.py
@@ -0,0 +1,257 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx
+from test_graph import BaseGraphTester, BaseAttrGraphTester, TestGraph
+
+class BaseDiGraphTester(BaseGraphTester):
+ def test_has_successor(self):
+ G=self.K3
+ assert_equal(G.has_successor(0,1),True)
+ assert_equal(G.has_successor(0,-1),False)
+
+ def test_successors(self):
+ G=self.K3
+ assert_equal(sorted(G.successors(0)),[1,2])
+ assert_raises((KeyError,networkx.NetworkXError), G.successors,-1)
+
+ def test_successors_iter(self):
+ G=self.K3
+ assert_equal(sorted(G.successors_iter(0)),[1,2])
+ assert_raises((KeyError,networkx.NetworkXError), G.successors_iter,-1)
+
+ def test_has_predecessor(self):
+ G=self.K3
+ assert_equal(G.has_predecessor(0,1),True)
+ assert_equal(G.has_predecessor(0,-1),False)
+
+ def test_predecessors(self):
+ G=self.K3
+ assert_equal(sorted(G.predecessors(0)),[1,2])
+ assert_raises((KeyError,networkx.NetworkXError), G.predecessors,-1)
+
+ def test_predecessors_iter(self):
+ G=self.K3
+ assert_equal(sorted(G.predecessors_iter(0)),[1,2])
+ assert_raises((KeyError,networkx.NetworkXError), G.predecessors_iter,-1)
+
+ def test_edges(self):
+ G=self.K3
+ assert_equal(sorted(G.edges()),[(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
+ assert_equal(sorted(G.edges(0)),[(0,1),(0,2)])
+ assert_raises((KeyError,networkx.NetworkXError), G.edges,-1)
+
+ def test_edges_iter(self):
+ G=self.K3
+ assert_equal(sorted(G.edges_iter()),
+ [(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
+ assert_equal(sorted(G.edges_iter(0)),[(0,1),(0,2)])
+
+ def test_edges_data(self):
+ G=self.K3
+ assert_equal(sorted(G.edges(data=True)),
+ [(0,1,{}),(0,2,{}),(1,0,{}),(1,2,{}),(2,0,{}),(2,1,{})])
+ assert_equal(sorted(G.edges(0,data=True)),[(0,1,{}),(0,2,{})])
+ assert_raises((KeyError,networkx.NetworkXError), G.edges,-1)
+
+ def test_out_edges(self):
+ G=self.K3
+ assert_equal(sorted(G.out_edges()),
+ [(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
+ assert_equal(sorted(G.out_edges(0)),[(0,1),(0,2)])
+ assert_raises((KeyError,networkx.NetworkXError), G.out_edges,-1)
+
+ def test_out_edges_iter(self):
+ G=self.K3
+ assert_equal(sorted(G.out_edges_iter()),
+ [(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
+ assert_equal(sorted(G.edges_iter(0)),[(0,1),(0,2)])
+
+ def test_out_edges_dir(self):
+ G=self.P3
+ assert_equal(sorted(G.out_edges()),[(0, 1), (1, 2)])
+ assert_equal(sorted(G.out_edges(0)),[(0, 1)])
+ assert_equal(sorted(G.out_edges(2)),[])
+
+ def test_out_edges_iter_dir(self):
+ G=self.P3
+ assert_equal(sorted(G.out_edges_iter()),[(0, 1), (1, 2)])
+ assert_equal(sorted(G.out_edges_iter(0)),[(0, 1)])
+ assert_equal(sorted(G.out_edges_iter(2)),[])
+
+ def test_in_edges_dir(self):
+ G=self.P3
+ assert_equal(sorted(G.in_edges()),[(0, 1), (1, 2)])
+ assert_equal(sorted(G.in_edges(0)),[])
+ assert_equal(sorted(G.in_edges(2)),[(1,2)])
+
+ def test_in_edges_iter_dir(self):
+ G=self.P3
+ assert_equal(sorted(G.in_edges_iter()),[(0, 1), (1, 2)])
+ assert_equal(sorted(G.in_edges_iter(0)),[])
+ assert_equal(sorted(G.in_edges_iter(2)),[(1,2)])
+
+ def test_degree(self):
+ G=self.K3
+ assert_equal(list(G.degree().values()),[4,4,4])
+ assert_equal(G.degree(),{0:4,1:4,2:4})
+ assert_equal(G.degree(0),4)
+ assert_equal(G.degree([0]),{0:4})
+ assert_raises((KeyError,networkx.NetworkXError), G.degree,-1)
+
+ def test_degree_iter(self):
+ G=self.K3
+ assert_equal(list(G.degree_iter()),[(0,4),(1,4),(2,4)])
+ assert_equal(dict(G.degree_iter()),{0:4,1:4,2:4})
+ assert_equal(list(G.degree_iter(0)),[(0,4)])
+
+ def test_in_degree(self):
+ G=self.K3
+ assert_equal(list(G.in_degree().values()),[2,2,2])
+ assert_equal(G.in_degree(),{0:2,1:2,2:2})
+ assert_equal(G.in_degree(0),2)
+ assert_equal(G.in_degree([0]),{0:2})
+ assert_raises((KeyError,networkx.NetworkXError), G.in_degree,-1)
+
+ def test_in_degree_iter(self):
+ G=self.K3
+ assert_equal(list(G.in_degree_iter()),[(0,2),(1,2),(2,2)])
+ assert_equal(dict(G.in_degree_iter()),{0:2,1:2,2:2})
+ assert_equal(list(G.in_degree_iter(0)),[(0,2)])
+
+ def test_in_degree_iter_weighted(self):
+ G=self.K3
+ G.add_edge(0,1,weight=0.3,other=1.2)
+ assert_equal(list(G.in_degree_iter(weight='weight')),[(0,2),(1,1.3),(2,2)])
+ assert_equal(dict(G.in_degree_iter(weight='weight')),{0:2,1:1.3,2:2})
+ assert_equal(list(G.in_degree_iter(1,weight='weight')),[(1,1.3)])
+ assert_equal(list(G.in_degree_iter(weight='other')),[(0,2),(1,2.2),(2,2)])
+ assert_equal(dict(G.in_degree_iter(weight='other')),{0:2,1:2.2,2:2})
+ assert_equal(list(G.in_degree_iter(1,weight='other')),[(1,2.2)])
+
+ def test_out_degree(self):
+ G=self.K3
+ assert_equal(list(G.out_degree().values()),[2,2,2])
+ assert_equal(G.out_degree(),{0:2,1:2,2:2})
+ assert_equal(G.out_degree(0),2)
+ assert_equal(G.out_degree([0]),{0:2})
+ assert_raises((KeyError,networkx.NetworkXError), G.out_degree,-1)
+
+ def test_out_degree_iter_weighted(self):
+ G=self.K3
+ G.add_edge(0,1,weight=0.3,other=1.2)
+ assert_equal(list(G.out_degree_iter(weight='weight')),[(0,1.3),(1,2),(2,2)])
+ assert_equal(dict(G.out_degree_iter(weight='weight')),{0:1.3,1:2,2:2})
+ assert_equal(list(G.out_degree_iter(0,weight='weight')),[(0,1.3)])
+ assert_equal(list(G.out_degree_iter(weight='other')),[(0,2.2),(1,2),(2,2)])
+ assert_equal(dict(G.out_degree_iter(weight='other')),{0:2.2,1:2,2:2})
+ assert_equal(list(G.out_degree_iter(0,weight='other')),[(0,2.2)])
+
+ def test_out_degree_iter(self):
+ G=self.K3
+ assert_equal(list(G.out_degree_iter()),[(0,2),(1,2),(2,2)])
+ assert_equal(dict(G.out_degree_iter()),{0:2,1:2,2:2})
+ assert_equal(list(G.out_degree_iter(0)),[(0,2)])
+
+ def test_size(self):
+ G=self.K3
+ assert_equal(G.size(),6)
+ assert_equal(G.number_of_edges(),6)
+
+ def test_to_undirected_reciprocal(self):
+ G=self.Graph()
+ G.add_edge(1,2)
+ assert_true(G.to_undirected().has_edge(1,2))
+ assert_false(G.to_undirected(reciprocal=True).has_edge(1,2))
+ G.add_edge(2,1)
+ assert_true(G.to_undirected(reciprocal=True).has_edge(1,2))
+
+ def test_reverse_copy(self):
+ G=networkx.DiGraph([(0,1),(1,2)])
+ R=G.reverse()
+ assert_equal(sorted(R.edges()),[(1,0),(2,1)])
+ R.remove_edge(1,0)
+ assert_equal(sorted(R.edges()),[(2,1)])
+ assert_equal(sorted(G.edges()),[(0,1),(1,2)])
+
+ def test_reverse_nocopy(self):
+ G=networkx.DiGraph([(0,1),(1,2)])
+ R=G.reverse(copy=False)
+ assert_equal(sorted(R.edges()),[(1,0),(2,1)])
+ R.remove_edge(1,0)
+ assert_equal(sorted(R.edges()),[(2,1)])
+ assert_equal(sorted(G.edges()),[(2,1)])
+
+class BaseAttrDiGraphTester(BaseDiGraphTester,BaseAttrGraphTester):
+ pass
+
+
+class TestDiGraph(BaseAttrDiGraphTester,TestGraph):
+ """Tests specific to dict-of-dict-of-dict digraph data structure"""
+ def setUp(self):
+ self.Graph=networkx.DiGraph
+ # build dict-of-dict-of-dict K3
+ ed1,ed2,ed3,ed4,ed5,ed6 = ({},{},{},{},{},{})
+ self.k3adj={0: {1: ed1, 2: ed2}, 1: {0: ed3, 2: ed4}, 2: {0: ed5, 1:ed6}}
+ self.k3edges=[(0, 1), (0, 2), (1, 2)]
+ self.k3nodes=[0, 1, 2]
+ self.K3=self.Graph()
+ self.K3.adj = self.K3.succ = self.K3.edge = self.k3adj
+ self.K3.pred={0: {1: ed3, 2: ed5}, 1: {0: ed1, 2: ed6}, 2: {0: ed2, 1:ed4}}
+
+ ed1,ed2 = ({},{})
+ self.P3=self.Graph()
+ self.P3.adj={0: {1: ed1}, 1: {2: ed2}, 2: {}}
+ self.P3.succ=self.P3.adj
+ self.P3.pred={0: {}, 1: {0: ed1}, 2: {1: ed2}}
+ self.K3.node={}
+ self.K3.node[0]={}
+ self.K3.node[1]={}
+ self.K3.node[2]={}
+ self.P3.node={}
+ self.P3.node[0]={}
+ self.P3.node[1]={}
+ self.P3.node[2]={}
+
+ def test_data_input(self):
+ G=self.Graph(data={1:[2],2:[1]}, name="test")
+ assert_equal(G.name,"test")
+ assert_equal(sorted(G.adj.items()),[(1, {2: {}}), (2, {1: {}})])
+ assert_equal(sorted(G.succ.items()),[(1, {2: {}}), (2, {1: {}})])
+ assert_equal(sorted(G.pred.items()),[(1, {2: {}}), (2, {1: {}})])
+
+ def test_add_edge(self):
+ G=self.Graph()
+ G.add_edge(0,1)
+ assert_equal(G.adj,{0: {1: {}}, 1: {}})
+ assert_equal(G.succ,{0: {1: {}}, 1: {}})
+ assert_equal(G.pred,{0: {}, 1: {0:{}}})
+ G=self.Graph()
+ G.add_edge(*(0,1))
+ assert_equal(G.adj,{0: {1: {}}, 1: {}})
+ assert_equal(G.succ,{0: {1: {}}, 1: {}})
+ assert_equal(G.pred,{0: {}, 1: {0:{}}})
+
+ def test_add_edges_from(self):
+ G=self.Graph()
+ G.add_edges_from([(0,1),(0,2,{'data':3})],data=2)
+ assert_equal(G.adj,{0: {1: {'data':2}, 2: {'data':3}}, 1: {}, 2: {}})
+ assert_equal(G.succ,{0: {1: {'data':2}, 2: {'data':3}}, 1: {}, 2: {}})
+ assert_equal(G.pred,{0: {}, 1: {0: {'data':2}}, 2: {0: {'data':3}}})
+
+ assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,)]) # too few in tuple
+ assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,1,2,3)]) # too many in tuple
+ assert_raises(TypeError, G.add_edges_from,[0]) # not a tuple
+
+ def test_remove_edge(self):
+ G=self.K3
+ G.remove_edge(0,1)
+ assert_equal(G.succ,{0:{2:{}},1:{0:{},2:{}},2:{0:{},1:{}}})
+ assert_equal(G.pred,{0:{1:{}, 2:{}}, 1:{2:{}}, 2:{0:{},1:{}}})
+ assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,-1,0)
+
+ def test_remove_edges_from(self):
+ G=self.K3
+ G.remove_edges_from([(0,1)])
+ assert_equal(G.succ,{0:{2:{}},1:{0:{},2:{}},2:{0:{},1:{}}})
+ assert_equal(G.pred,{0:{1:{}, 2:{}}, 1:{2:{}}, 2:{0:{},1: {}}})
+ G.remove_edges_from([(0,0)]) # silent fail
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_digraph_historical.py b/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_digraph_historical.py
new file mode 100644
index 0000000..6bf295c
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_digraph_historical.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+"""Original NetworkX graph tests"""
+from nose.tools import *
+import networkx
+import networkx as nx
+
+from historical_tests import HistoricalTests
+
+class TestDiGraphHistorical(HistoricalTests):
+
+ def setUp(self):
+ HistoricalTests.setUp(self)
+ self.G=nx.DiGraph
+
+
+ def test_in_degree(self):
+ G=self.G()
+ G.add_nodes_from('GJK')
+ G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
+ ('B', 'C'), ('C', 'D')])
+
+ assert_equal(sorted(G.in_degree().values()),[0, 0, 0, 0, 1, 2, 2])
+ assert_equal(G.in_degree(),
+ {'A': 0, 'C': 2, 'B': 1, 'D': 2, 'G': 0, 'K': 0, 'J': 0})
+ assert_equal(sorted([v for k,v in G.in_degree_iter()]),
+ [0, 0, 0, 0, 1, 2, 2])
+ assert_equal(dict(G.in_degree_iter()),
+ {'A': 0, 'C': 2, 'B': 1, 'D': 2, 'G': 0, 'K': 0, 'J': 0})
+
+
+ def test_out_degree(self):
+ G=self.G()
+ G.add_nodes_from('GJK')
+ G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
+ ('B', 'C'), ('C', 'D')])
+ assert_equal(sorted(G.out_degree().values()),[0, 0, 0, 0, 1, 2, 2])
+ assert_equal(G.out_degree(),
+ {'A': 2, 'C': 1, 'B': 2, 'D': 0, 'G': 0, 'K': 0, 'J': 0})
+ assert_equal(sorted([v for k,v in G.in_degree_iter()]),
+ [0, 0, 0, 0, 1, 2, 2])
+ assert_equal(dict(G.out_degree_iter()),
+ {'A': 2, 'C': 1, 'B': 2, 'D': 0, 'G': 0, 'K': 0, 'J': 0})
+
+
+ def test_degree_digraph(self):
+ H=nx.DiGraph()
+ H.add_edges_from([(1,24),(1,2)])
+ assert_equal(sorted(H.in_degree([1,24]).values()),[0, 1])
+ assert_equal(sorted(H.out_degree([1,24]).values()),[0, 2])
+ assert_equal(sorted(H.degree([1,24]).values()),[1, 2])
+
+
+ def test_neighbors(self):
+ G=self.G()
+ G.add_nodes_from('GJK')
+ G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
+ ('B', 'C'), ('C', 'D')])
+
+ assert_equal(sorted(G.neighbors('C')),['D'])
+ assert_equal(sorted(G['C']),['D'])
+ assert_equal(sorted(G.neighbors('A')),['B', 'C'])
+ assert_equal(sorted(G.neighbors_iter('A')),['B', 'C'])
+ assert_equal(sorted(G.neighbors_iter('C')),['D'])
+ assert_equal(sorted(G.neighbors('A')),['B', 'C'])
+ assert_raises(nx.NetworkXError,G.neighbors,'j')
+ assert_raises(nx.NetworkXError,G.neighbors_iter,'j')
+
+ def test_successors(self):
+ G=self.G()
+ G.add_nodes_from('GJK')
+ G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
+ ('B', 'C'), ('C', 'D')])
+ assert_equal(sorted(G.successors('A')),['B', 'C'])
+ assert_equal(sorted(G.successors_iter('A')),['B', 'C'])
+ assert_equal(sorted(G.successors('G')),[])
+ assert_equal(sorted(G.successors('D')),[])
+ assert_equal(sorted(G.successors_iter('G')),[])
+ assert_raises(nx.NetworkXError,G.successors,'j')
+ assert_raises(nx.NetworkXError,G.successors_iter,'j')
+
+
+ def test_predecessors(self):
+ G=self.G()
+ G.add_nodes_from('GJK')
+ G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
+ ('B', 'C'), ('C', 'D')])
+ assert_equal(sorted(G.predecessors('C')),['A', 'B'])
+ assert_equal(sorted(G.predecessors_iter('C')),['A', 'B'])
+ assert_equal(sorted(G.predecessors('G')),[])
+ assert_equal(sorted(G.predecessors('A')),[])
+ assert_equal(sorted(G.predecessors_iter('G')),[])
+ assert_equal(sorted(G.predecessors_iter('A')),[])
+ assert_equal(sorted(G.successors_iter('D')),[])
+
+ assert_raises(nx.NetworkXError,G.predecessors,'j')
+ assert_raises(nx.NetworkXError,G.predecessors,'j')
+
+
+
+ def test_reverse(self):
+ G=nx.complete_graph(10)
+ H=G.to_directed()
+ HR=H.reverse()
+ assert_true(nx.is_isomorphic(H,HR))
+ assert_equal(sorted(H.edges()),sorted(HR.edges()))
+
+ def test_reverse2(self):
+ H=nx.DiGraph()
+ foo=[H.add_edge(u,u+1) for u in range(0,5)]
+ HR=H.reverse()
+ for u in range(0,5):
+ assert_true(HR.has_edge(u+1,u))
+
+ def test_reverse3(self):
+ H=nx.DiGraph()
+ H.add_nodes_from([1,2,3,4])
+ HR=H.reverse()
+ assert_equal(sorted(HR.nodes()),[1, 2, 3, 4])
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_function.py b/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_function.py
new file mode 100644
index 0000000..2c12178
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_function.py
@@ -0,0 +1,190 @@
+#!/usr/bin/env python
+import random
+from nose.tools import *
+import networkx
+import networkx as nx
+
+class TestFunction(object):
+ def setUp(self):
+ self.G=networkx.Graph({0:[1,2,3], 1:[1,2,0], 4:[]}, name='Test')
+ self.Gdegree={0:3, 1:2, 2:2, 3:1, 4:0}
+ self.Gnodes=list(range(5))
+ self.Gedges=[(0,1),(0,2),(0,3),(1,0),(1,1),(1,2)]
+ self.DG=networkx.DiGraph({0:[1,2,3], 1:[1,2,0], 4:[]})
+ self.DGin_degree={0:1, 1:2, 2:2, 3:1, 4:0}
+ self.DGout_degree={0:3, 1:3, 2:0, 3:0, 4:0}
+ self.DGnodes=list(range(5))
+ self.DGedges=[(0,1),(0,2),(0,3),(1,0),(1,1),(1,2)]
+
+ def test_nodes(self):
+ assert_equal(self.G.nodes(),networkx.nodes(self.G))
+ assert_equal(self.DG.nodes(),networkx.nodes(self.DG))
+ def test_edges(self):
+ assert_equal(self.G.edges(),networkx.edges(self.G))
+ assert_equal(self.DG.edges(),networkx.edges(self.DG))
+ assert_equal(self.G.edges(nbunch=[0,1,3]),networkx.edges(self.G,nbunch=[0,1,3]))
+ assert_equal(self.DG.edges(nbunch=[0,1,3]),networkx.edges(self.DG,nbunch=[0,1,3]))
+ def test_nodes_iter(self):
+ assert_equal(list(self.G.nodes_iter()),list(networkx.nodes_iter(self.G)))
+ assert_equal(list(self.DG.nodes_iter()),list(networkx.nodes_iter(self.DG)))
+ def test_edges_iter(self):
+ assert_equal(list(self.G.edges_iter()),list(networkx.edges_iter(self.G)))
+ assert_equal(list(self.DG.edges_iter()),list(networkx.edges_iter(self.DG)))
+ assert_equal(list(self.G.edges_iter(nbunch=[0,1,3])),list(networkx.edges_iter(self.G,nbunch=[0,1,3])))
+ assert_equal(list(self.DG.edges_iter(nbunch=[0,1,3])),list(networkx.edges_iter(self.DG,nbunch=[0,1,3])))
+ def test_degree(self):
+ assert_equal(self.G.degree(),networkx.degree(self.G))
+ assert_equal(self.DG.degree(),networkx.degree(self.DG))
+ assert_equal(self.G.degree(nbunch=[0,1]),networkx.degree(self.G,nbunch=[0,1]))
+ assert_equal(self.DG.degree(nbunch=[0,1]),networkx.degree(self.DG,nbunch=[0,1]))
+ assert_equal(self.G.degree(weight='weight'),networkx.degree(self.G,weight='weight'))
+ assert_equal(self.DG.degree(weight='weight'),networkx.degree(self.DG,weight='weight'))
+ def test_neighbors(self):
+ assert_equal(self.G.neighbors(1),networkx.neighbors(self.G,1))
+ assert_equal(self.DG.neighbors(1),networkx.neighbors(self.DG,1))
+ def test_number_of_nodes(self):
+ assert_equal(self.G.number_of_nodes(),networkx.number_of_nodes(self.G))
+ assert_equal(self.DG.number_of_nodes(),networkx.number_of_nodes(self.DG))
+ def test_number_of_edges(self):
+ assert_equal(self.G.number_of_edges(),networkx.number_of_edges(self.G))
+ assert_equal(self.DG.number_of_edges(),networkx.number_of_edges(self.DG))
+ def test_is_directed(self):
+ assert_equal(self.G.is_directed(),networkx.is_directed(self.G))
+ assert_equal(self.DG.is_directed(),networkx.is_directed(self.DG))
+ def test_subgraph(self):
+ assert_equal(self.G.subgraph([0,1,2,4]).adj,networkx.subgraph(self.G,[0,1,2,4]).adj)
+ assert_equal(self.DG.subgraph([0,1,2,4]).adj,networkx.subgraph(self.DG,[0,1,2,4]).adj)
+
+ def test_create_empty_copy(self):
+ G=networkx.create_empty_copy(self.G, with_nodes=False)
+ assert_equal(G.nodes(),[])
+ assert_equal(G.graph,{})
+ assert_equal(G.node,{})
+ assert_equal(G.edge,{})
+ G=networkx.create_empty_copy(self.G)
+ assert_equal(G.nodes(),self.G.nodes())
+ assert_equal(G.graph,{})
+ assert_equal(G.node,{}.fromkeys(self.G.nodes(),{}))
+ assert_equal(G.edge,{}.fromkeys(self.G.nodes(),{}))
+
+ def test_degree_histogram(self):
+ assert_equal(networkx.degree_histogram(self.G), [1,1,1,1,1])
+
+ def test_density(self):
+ assert_equal(networkx.density(self.G), 0.5)
+ assert_equal(networkx.density(self.DG), 0.3)
+ G=networkx.Graph()
+ G.add_node(1)
+ assert_equal(networkx.density(G), 0.0)
+
+ def test_density_selfloop(self):
+ G = nx.Graph()
+ G.add_edge(1,1)
+ assert_equal(networkx.density(G), 0.0)
+ G.add_edge(1,2)
+ assert_equal(networkx.density(G), 2.0)
+
+ def test_freeze(self):
+ G=networkx.freeze(self.G)
+ assert_equal(G.frozen,True)
+ assert_raises(networkx.NetworkXError, G.add_node, 1)
+ assert_raises(networkx.NetworkXError, G.add_nodes_from, [1])
+ assert_raises(networkx.NetworkXError, G.remove_node, 1)
+ assert_raises(networkx.NetworkXError, G.remove_nodes_from, [1])
+ assert_raises(networkx.NetworkXError, G.add_edge, 1,2)
+ assert_raises(networkx.NetworkXError, G.add_edges_from, [(1,2)])
+ assert_raises(networkx.NetworkXError, G.remove_edge, 1,2)
+ assert_raises(networkx.NetworkXError, G.remove_edges_from, [(1,2)])
+ assert_raises(networkx.NetworkXError, G.clear)
+
+ def test_is_frozen(self):
+ assert_equal(networkx.is_frozen(self.G), False)
+ G=networkx.freeze(self.G)
+ assert_equal(G.frozen, networkx.is_frozen(self.G))
+ assert_equal(G.frozen,True)
+
+ def test_info(self):
+ G=networkx.path_graph(5)
+ info=networkx.info(G)
+ expected_graph_info='\n'.join(['Name: path_graph(5)',
+ 'Type: Graph',
+ 'Number of nodes: 5',
+ 'Number of edges: 4',
+ 'Average degree: 1.6000'])
+ assert_equal(info,expected_graph_info)
+
+ info=networkx.info(G,n=1)
+ expected_node_info='\n'.join(
+ ['Node 1 has the following properties:',
+ 'Degree: 2',
+ 'Neighbors: 0 2'])
+ assert_equal(info,expected_node_info)
+
+ def test_info_digraph(self):
+ G=networkx.DiGraph(name='path_graph(5)')
+ G.add_path([0,1,2,3,4])
+ info=networkx.info(G)
+ expected_graph_info='\n'.join(['Name: path_graph(5)',
+ 'Type: DiGraph',
+ 'Number of nodes: 5',
+ 'Number of edges: 4',
+ 'Average in degree: 0.8000',
+ 'Average out degree: 0.8000'])
+ assert_equal(info,expected_graph_info)
+
+ info=networkx.info(G,n=1)
+ expected_node_info='\n'.join(
+ ['Node 1 has the following properties:',
+ 'Degree: 2',
+ 'Neighbors: 2'])
+ assert_equal(info,expected_node_info)
+
+ assert_raises(networkx.NetworkXError,networkx.info,G,n=-1)
+
+ def test_neighbors(self):
+ graph = nx.complete_graph(100)
+ pop = random.sample(graph.nodes(), 1)
+ nbors = list(nx.neighbors(graph, pop[0]))
+ # should be all the other vertices in the graph
+ assert_equal(len(nbors), len(graph) - 1)
+
+ graph = nx.path_graph(100)
+ node = random.sample(graph.nodes(), 1)[0]
+ nbors = list(nx.neighbors(graph, node))
+ # should be all the other vertices in the graph
+ if node != 0 and node != 99:
+ assert_equal(len(nbors), 2)
+ else:
+ assert_equal(len(nbors), 1)
+
+ # create a star graph with 99 outer nodes
+ graph = nx.star_graph(99)
+ nbors = list(nx.neighbors(graph, 0))
+ assert_equal(len(nbors), 99)
+
+ def test_non_neighbors(self):
+ graph = nx.complete_graph(100)
+ pop = random.sample(graph.nodes(), 1)
+ nbors = list(nx.non_neighbors(graph, pop[0]))
+ # should be all the other vertices in the graph
+ assert_equal(len(nbors), 0)
+
+ graph = nx.path_graph(100)
+ node = random.sample(graph.nodes(), 1)[0]
+ nbors = list(nx.non_neighbors(graph, node))
+ # should be all the other vertices in the graph
+ if node != 0 and node != 99:
+ assert_equal(len(nbors), 97)
+ else:
+ assert_equal(len(nbors), 98)
+
+ # create a star graph with 99 outer nodes
+ graph = nx.star_graph(99)
+ nbors = list(nx.non_neighbors(graph, 0))
+ assert_equal(len(nbors), 0)
+
+ # disconnected graph
+ graph = nx.Graph()
+ graph.add_nodes_from(range(10))
+ nbors = list(nx.non_neighbors(graph, 0))
+ assert_equal(len(nbors), 9)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_graph.py b/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_graph.py
new file mode 100644
index 0000000..077dead
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_graph.py
@@ -0,0 +1,602 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx
+
+class BaseGraphTester(object):
+ """ Tests for data-structure independent graph class features."""
+ def test_contains(self):
+ G=self.K3
+ assert(1 in G )
+ assert(4 not in G )
+ assert('b' not in G )
+ assert([] not in G ) # no exception for nonhashable
+ assert({1:1} not in G) # no exception for nonhashable
+
+ def test_order(self):
+ G=self.K3
+ assert_equal(len(G),3)
+ assert_equal(G.order(),3)
+ assert_equal(G.number_of_nodes(),3)
+
+ def test_nodes_iter(self):
+ G=self.K3
+ assert_equal(sorted(G.nodes_iter()),self.k3nodes)
+ assert_equal(sorted(G.nodes_iter(data=True)),[(0,{}),(1,{}),(2,{})])
+
+ def test_nodes(self):
+ G=self.K3
+ assert_equal(sorted(G.nodes()),self.k3nodes)
+ assert_equal(sorted(G.nodes(data=True)),[(0,{}),(1,{}),(2,{})])
+
+ def test_has_node(self):
+ G=self.K3
+ assert(G.has_node(1))
+ assert(not G.has_node(4))
+ assert(not G.has_node([])) # no exception for nonhashable
+ assert(not G.has_node({1:1})) # no exception for nonhashable
+
+ def test_has_edge(self):
+ G=self.K3
+ assert_equal(G.has_edge(0,1),True)
+ assert_equal(G.has_edge(0,-1),False)
+
+ def test_neighbors(self):
+ G=self.K3
+ assert_equal(sorted(G.neighbors(0)),[1,2])
+ assert_raises((KeyError,networkx.NetworkXError), G.neighbors,-1)
+
+ def test_neighbors_iter(self):
+ G=self.K3
+ assert_equal(sorted(G.neighbors_iter(0)),[1,2])
+ assert_raises((KeyError,networkx.NetworkXError), G.neighbors_iter,-1)
+
+ def test_edges(self):
+ G=self.K3
+ assert_equal(sorted(G.edges()),[(0,1),(0,2),(1,2)])
+ assert_equal(sorted(G.edges(0)),[(0,1),(0,2)])
+ assert_raises((KeyError,networkx.NetworkXError), G.edges,-1)
+
+ def test_edges_iter(self):
+ G=self.K3
+ assert_equal(sorted(G.edges_iter()),[(0,1),(0,2),(1,2)])
+ assert_equal(sorted(G.edges_iter(0)),[(0,1),(0,2)])
+ f=lambda x:list(G.edges_iter(x))
+ assert_raises((KeyError,networkx.NetworkXError), f, -1)
+
+ def test_adjacency_list(self):
+ G=self.K3
+ assert_equal(G.adjacency_list(),[[1,2],[0,2],[0,1]])
+
+ def test_degree(self):
+ G=self.K3
+ assert_equal(list(G.degree().values()),[2,2,2])
+ assert_equal(G.degree(),{0:2,1:2,2:2})
+ assert_equal(G.degree(0),2)
+ assert_equal(G.degree([0]),{0:2})
+ assert_raises((KeyError,networkx.NetworkXError), G.degree,-1)
+
+ def test_weighted_degree(self):
+ G=self.Graph()
+ G.add_edge(1,2,weight=2)
+ G.add_edge(2,3,weight=3)
+ assert_equal(list(G.degree(weight='weight').values()),[2,5,3])
+ assert_equal(G.degree(weight='weight'),{1:2,2:5,3:3})
+ assert_equal(G.degree(1,weight='weight'),2)
+ assert_equal(G.degree([1],weight='weight'),{1:2})
+
+ def test_degree_iter(self):
+ G=self.K3
+ assert_equal(list(G.degree_iter()),[(0,2),(1,2),(2,2)])
+ assert_equal(dict(G.degree_iter()),{0:2,1:2,2:2})
+ assert_equal(list(G.degree_iter(0)),[(0,2)])
+
+ def test_size(self):
+ G=self.K3
+ assert_equal(G.size(),3)
+ assert_equal(G.number_of_edges(),3)
+
+ def test_add_star(self):
+ G=self.K3.copy()
+ nlist=[12,13,14,15]
+ G.add_star(nlist)
+ assert_equal(sorted(G.edges(nlist)),[(12,13),(12,14),(12,15)])
+ G=self.K3.copy()
+ G.add_star(nlist,weight=2.0)
+ assert_equal(sorted(G.edges(nlist,data=True)),\
+ [(12,13,{'weight':2.}),
+ (12,14,{'weight':2.}),
+ (12,15,{'weight':2.})])
+
+ def test_add_path(self):
+ G=self.K3.copy()
+ nlist=[12,13,14,15]
+ G.add_path(nlist)
+ assert_equal(sorted(G.edges(nlist)),[(12,13),(13,14),(14,15)])
+ G=self.K3.copy()
+ G.add_path(nlist,weight=2.0)
+ assert_equal(sorted(G.edges(nlist,data=True)),\
+ [(12,13,{'weight':2.}),
+ (13,14,{'weight':2.}),
+ (14,15,{'weight':2.})])
+
+ def test_add_cycle(self):
+ G=self.K3.copy()
+ nlist=[12,13,14,15]
+ oklists=[ [(12,13),(12,15),(13,14),(14,15)], \
+ [(12,13),(13,14),(14,15),(15,12)] ]
+ G.add_cycle(nlist)
+ assert_true(sorted(G.edges(nlist)) in oklists)
+ G=self.K3.copy()
+ oklists=[ [(12,13,{'weight':1.}),\
+ (12,15,{'weight':1.}),\
+ (13,14,{'weight':1.}),\
+ (14,15,{'weight':1.})], \
+ \
+ [(12,13,{'weight':1.}),\
+ (13,14,{'weight':1.}),\
+ (14,15,{'weight':1.}),\
+ (15,12,{'weight':1.})] \
+ ]
+
+ G.add_cycle(nlist,weight=1.0)
+ assert_true(sorted(G.edges(nlist,data=True)) in oklists)
+
+ def test_nbunch_iter(self):
+ G=self.K3
+ assert_equal(list(G.nbunch_iter()),self.k3nodes) # all nodes
+ assert_equal(list(G.nbunch_iter(0)),[0]) # single node
+ assert_equal(list(G.nbunch_iter([0,1])),[0,1]) # sequence
+ # sequence with none in graph
+ assert_equal(list(G.nbunch_iter([-1])),[])
+ # string sequence with none in graph
+ assert_equal(list(G.nbunch_iter("foo")),[])
+ # node not in graph doesn't get caught upon creation of iterator
+ bunch=G.nbunch_iter(-1)
+ # but gets caught when iterator used
+ assert_raises(networkx.NetworkXError,list,bunch)
+ # unhashable doesn't get caught upon creation of iterator
+ bunch=G.nbunch_iter([0,1,2,{}])
+ # but gets caught when iterator hits the unhashable
+ assert_raises(networkx.NetworkXError,list,bunch)
+
+ def test_selfloop_degree(self):
+ G=self.Graph()
+ G.add_edge(1,1)
+ assert_equal(list(G.degree().values()),[2])
+ assert_equal(G.degree(),{1:2})
+ assert_equal(G.degree(1),2)
+ assert_equal(G.degree([1]),{1:2})
+ assert_equal(G.degree([1],weight='weight'),{1:2})
+
+ def test_selfloops(self):
+ G=self.K3.copy()
+ G.add_edge(0,0)
+ assert_equal(G.nodes_with_selfloops(),[0])
+ assert_equal(G.selfloop_edges(),[(0,0)])
+ assert_equal(G.number_of_selfloops(),1)
+ G.remove_edge(0,0)
+ G.add_edge(0,0)
+ G.remove_edges_from([(0,0)])
+ G.add_edge(1,1)
+ G.remove_node(1)
+ G.add_edge(0,0)
+ G.add_edge(1,1)
+ G.remove_nodes_from([0,1])
+
+
+class BaseAttrGraphTester(BaseGraphTester):
+ """ Tests of graph class attribute features."""
+ def test_weighted_degree(self):
+ G=self.Graph()
+ G.add_edge(1,2,weight=2,other=3)
+ G.add_edge(2,3,weight=3,other=4)
+ assert_equal(list(G.degree(weight='weight').values()),[2,5,3])
+ assert_equal(G.degree(weight='weight'),{1:2,2:5,3:3})
+ assert_equal(G.degree(1,weight='weight'),2)
+ assert_equal(G.degree([1],weight='weight'),{1:2})
+
+ assert_equal(list(G.degree(weight='other').values()),[3,7,4])
+ assert_equal(G.degree(weight='other'),{1:3,2:7,3:4})
+ assert_equal(G.degree(1,weight='other'),3)
+ assert_equal(G.degree([1],weight='other'),{1:3})
+
+ def add_attributes(self,G):
+ G.graph['foo']=[]
+ G.node[0]['foo']=[]
+ G.remove_edge(1,2)
+ ll=[]
+ G.add_edge(1,2,foo=ll)
+ G.add_edge(2,1,foo=ll)
+ # attr_dict must be dict
+ assert_raises(networkx.NetworkXError,G.add_edge,0,1,attr_dict=[])
+
+ def test_name(self):
+ G=self.Graph(name='')
+ assert_equal(G.name,"")
+ G=self.Graph(name='test')
+ assert_equal(G.__str__(),"test")
+ assert_equal(G.name,"test")
+
+ def test_copy(self):
+ G=self.K3
+ self.add_attributes(G)
+ H=G.copy()
+ self.is_deepcopy(H,G)
+ H=G.__class__(G)
+ self.is_shallow_copy(H,G)
+
+ def test_copy_attr(self):
+ G=self.Graph(foo=[])
+ G.add_node(0,foo=[])
+ G.add_edge(1,2,foo=[])
+ H=G.copy()
+ self.is_deepcopy(H,G)
+ H=G.__class__(G) # just copy
+ self.is_shallow_copy(H,G)
+
+ def is_deepcopy(self,H,G):
+ self.graphs_equal(H,G)
+ self.different_attrdict(H,G)
+ self.deep_copy_attrdict(H,G)
+
+ def deep_copy_attrdict(self,H,G):
+ self.deepcopy_graph_attr(H,G)
+ self.deepcopy_node_attr(H,G)
+ self.deepcopy_edge_attr(H,G)
+
+ def deepcopy_graph_attr(self,H,G):
+ assert_equal(G.graph['foo'],H.graph['foo'])
+ G.graph['foo'].append(1)
+ assert_not_equal(G.graph['foo'],H.graph['foo'])
+
+ def deepcopy_node_attr(self,H,G):
+ assert_equal(G.node[0]['foo'],H.node[0]['foo'])
+ G.node[0]['foo'].append(1)
+ assert_not_equal(G.node[0]['foo'],H.node[0]['foo'])
+
+ def deepcopy_edge_attr(self,H,G):
+ assert_equal(G[1][2]['foo'],H[1][2]['foo'])
+ G[1][2]['foo'].append(1)
+ assert_not_equal(G[1][2]['foo'],H[1][2]['foo'])
+
+ def is_shallow_copy(self,H,G):
+ self.graphs_equal(H,G)
+ self.different_attrdict(H,G)
+ self.shallow_copy_attrdict(H,G)
+
+ def shallow_copy_attrdict(self,H,G):
+ self.shallow_copy_graph_attr(H,G)
+ self.shallow_copy_node_attr(H,G)
+ self.shallow_copy_edge_attr(H,G)
+
+ def shallow_copy_graph_attr(self,H,G):
+ assert_equal(G.graph['foo'],H.graph['foo'])
+ G.graph['foo'].append(1)
+ assert_equal(G.graph['foo'],H.graph['foo'])
+
+ def shallow_copy_node_attr(self,H,G):
+ assert_equal(G.node[0]['foo'],H.node[0]['foo'])
+ G.node[0]['foo'].append(1)
+ assert_equal(G.node[0]['foo'],H.node[0]['foo'])
+
+ def shallow_copy_edge_attr(self,H,G):
+ assert_equal(G[1][2]['foo'],H[1][2]['foo'])
+ G[1][2]['foo'].append(1)
+ assert_equal(G[1][2]['foo'],H[1][2]['foo'])
+
+ def same_attrdict(self, H, G):
+ old_foo=H[1][2]['foo']
+ H.add_edge(1,2,foo='baz')
+ assert_equal(G.edge,H.edge)
+ H.add_edge(1,2,foo=old_foo)
+ assert_equal(G.edge,H.edge)
+ old_foo=H.node[0]['foo']
+ H.node[0]['foo']='baz'
+ assert_equal(G.node,H.node)
+ H.node[0]['foo']=old_foo
+ assert_equal(G.node,H.node)
+
+ def different_attrdict(self, H, G):
+ old_foo=H[1][2]['foo']
+ H.add_edge(1,2,foo='baz')
+ assert_not_equal(G.edge,H.edge)
+ H.add_edge(1,2,foo=old_foo)
+ assert_equal(G.edge,H.edge)
+ old_foo=H.node[0]['foo']
+ H.node[0]['foo']='baz'
+ assert_not_equal(G.node,H.node)
+ H.node[0]['foo']=old_foo
+ assert_equal(G.node,H.node)
+
+ def graphs_equal(self,H,G):
+ assert_equal(G.adj,H.adj)
+ assert_equal(G.edge,H.edge)
+ assert_equal(G.node,H.node)
+ assert_equal(G.graph,H.graph)
+ assert_equal(G.name,H.name)
+ if not G.is_directed() and not H.is_directed():
+ assert_true(H.adj[1][2] is H.adj[2][1])
+ assert_true(G.adj[1][2] is G.adj[2][1])
+ else: # at least one is directed
+ if not G.is_directed():
+ G.pred=G.adj
+ G.succ=G.adj
+ if not H.is_directed():
+ H.pred=H.adj
+ H.succ=H.adj
+ assert_equal(G.pred,H.pred)
+ assert_equal(G.succ,H.succ)
+ assert_true(H.succ[1][2] is H.pred[2][1])
+ assert_true(G.succ[1][2] is G.pred[2][1])
+
+ def test_graph_attr(self):
+ G=self.K3
+ G.graph['foo']='bar'
+ assert_equal(G.graph['foo'], 'bar')
+ del G.graph['foo']
+ assert_equal(G.graph, {})
+ H=self.Graph(foo='bar')
+ assert_equal(H.graph['foo'], 'bar')
+
+ def test_node_attr(self):
+ G=self.K3
+ G.add_node(1,foo='bar')
+ assert_equal(G.nodes(), [0,1,2])
+ assert_equal(G.nodes(data=True), [(0,{}),(1,{'foo':'bar'}),(2,{})])
+ G.node[1]['foo']='baz'
+ assert_equal(G.nodes(data=True), [(0,{}),(1,{'foo':'baz'}),(2,{})])
+
+ def test_node_attr2(self):
+ G=self.K3
+ a={'foo':'bar'}
+ G.add_node(3,attr_dict=a)
+ assert_equal(G.nodes(), [0,1,2,3])
+ assert_equal(G.nodes(data=True),
+ [(0,{}),(1,{}),(2,{}),(3,{'foo':'bar'})])
+
+ def test_edge_attr(self):
+ G=self.Graph()
+ G.add_edge(1,2,foo='bar')
+ assert_equal(G.edges(data=True), [(1,2,{'foo':'bar'})])
+
+ def test_edge_attr2(self):
+ G=self.Graph()
+ G.add_edges_from([(1,2),(3,4)],foo='foo')
+ assert_equal(sorted(G.edges(data=True)),
+ [(1,2,{'foo':'foo'}),(3,4,{'foo':'foo'})])
+
+ def test_edge_attr3(self):
+ G=self.Graph()
+ G.add_edges_from([(1,2,{'weight':32}),(3,4,{'weight':64})],foo='foo')
+ assert_equal(G.edges(data=True),
+ [(1,2,{'foo':'foo','weight':32}),\
+ (3,4,{'foo':'foo','weight':64})])
+
+ G.remove_edges_from([(1,2),(3,4)])
+ G.add_edge(1,2,data=7,spam='bar',bar='foo')
+ assert_equal(G.edges(data=True),
+ [(1,2,{'data':7,'spam':'bar','bar':'foo'})])
+
+ def test_edge_attr4(self):
+ G=self.Graph()
+ G.add_edge(1,2,data=7,spam='bar',bar='foo')
+ assert_equal(G.edges(data=True),
+ [(1,2,{'data':7,'spam':'bar','bar':'foo'})])
+ G[1][2]['data']=10 # OK to set data like this
+ assert_equal(G.edges(data=True),
+ [(1,2,{'data':10,'spam':'bar','bar':'foo'})])
+
+ G.edge[1][2]['data']=20 # another spelling, "edge"
+ assert_equal(G.edges(data=True),
+ [(1,2,{'data':20,'spam':'bar','bar':'foo'})])
+ G.edge[1][2]['listdata']=[20,200]
+ G.edge[1][2]['weight']=20
+ assert_equal(G.edges(data=True),
+ [(1,2,{'data':20,'spam':'bar',
+ 'bar':'foo','listdata':[20,200],'weight':20})])
+
+ def test_attr_dict_not_dict(self):
+ # attr_dict must be dict
+ G=self.Graph()
+ edges=[(1,2)]
+ assert_raises(networkx.NetworkXError,G.add_edges_from,edges,
+ attr_dict=[])
+
+ def test_to_undirected(self):
+ G=self.K3
+ self.add_attributes(G)
+ H=networkx.Graph(G)
+ self.is_shallow_copy(H,G)
+ H=G.to_undirected()
+ self.is_deepcopy(H,G)
+
+ def test_to_directed(self):
+ G=self.K3
+ self.add_attributes(G)
+ H=networkx.DiGraph(G)
+ self.is_shallow_copy(H,G)
+ H=G.to_directed()
+ self.is_deepcopy(H,G)
+
+ def test_subgraph(self):
+ G=self.K3
+ self.add_attributes(G)
+ H=G.subgraph([0,1,2,5])
+# assert_equal(H.name, 'Subgraph of ('+G.name+')')
+ H.name=G.name
+ self.graphs_equal(H,G)
+ self.same_attrdict(H,G)
+ self.shallow_copy_attrdict(H,G)
+
+ H=G.subgraph(0)
+ assert_equal(H.adj,{0:{}})
+ H=G.subgraph([])
+ assert_equal(H.adj,{})
+ assert_not_equal(G.adj,{})
+
+ def test_selfloops_attr(self):
+ G=self.K3.copy()
+ G.add_edge(0,0)
+ G.add_edge(1,1,weight=2)
+ assert_equal(G.selfloop_edges(data=True),
+ [(0,0,{}),(1,1,{'weight':2})])
+
+
+class TestGraph(BaseAttrGraphTester):
+ """Tests specific to dict-of-dict-of-dict graph data structure"""
+ def setUp(self):
+ self.Graph=networkx.Graph
+ # build dict-of-dict-of-dict K3
+ ed1,ed2,ed3 = ({},{},{})
+ self.k3adj={0: {1: ed1, 2: ed2},
+ 1: {0: ed1, 2: ed3},
+ 2: {0: ed2, 1: ed3}}
+ self.k3edges=[(0, 1), (0, 2), (1, 2)]
+ self.k3nodes=[0, 1, 2]
+ self.K3=self.Graph()
+ self.K3.adj=self.K3.edge=self.k3adj
+ self.K3.node={}
+ self.K3.node[0]={}
+ self.K3.node[1]={}
+ self.K3.node[2]={}
+
+ def test_data_input(self):
+ G=self.Graph(data={1:[2],2:[1]}, name="test")
+ assert_equal(G.name,"test")
+ assert_equal(sorted(G.adj.items()),[(1, {2: {}}), (2, {1: {}})])
+ G=self.Graph({1:[2],2:[1]}, name="test")
+ assert_equal(G.name,"test")
+ assert_equal(sorted(G.adj.items()),[(1, {2: {}}), (2, {1: {}})])
+
+ def test_adjacency_iter(self):
+ G=self.K3
+ assert_equal(dict(G.adjacency_iter()),
+ {0: {1: {}, 2: {}}, 1: {0: {}, 2: {}}, 2: {0: {}, 1: {}}})
+
+ def test_getitem(self):
+ G=self.K3
+ assert_equal(G[0],{1: {}, 2: {}})
+ assert_raises(KeyError, G.__getitem__, 'j')
+ assert_raises((TypeError,networkx.NetworkXError), G.__getitem__, ['A'])
+
+ def test_add_node(self):
+ G=self.Graph()
+ G.add_node(0)
+ assert_equal(G.adj,{0:{}})
+ # test add attributes
+ G.add_node(1,c='red')
+ G.add_node(2,{'c':'blue'})
+ G.add_node(3,{'c':'blue'},c='red')
+ assert_raises(networkx.NetworkXError, G.add_node, 4, [])
+ assert_raises(networkx.NetworkXError, G.add_node, 4, 4)
+ assert_equal(G.node[1]['c'],'red')
+ assert_equal(G.node[2]['c'],'blue')
+ assert_equal(G.node[3]['c'],'red')
+ # test updating attributes
+ G.add_node(1,c='blue')
+ G.add_node(2,{'c':'red'})
+ G.add_node(3,{'c':'red'},c='blue')
+ assert_equal(G.node[1]['c'],'blue')
+ assert_equal(G.node[2]['c'],'red')
+ assert_equal(G.node[3]['c'],'blue')
+
+ def test_add_nodes_from(self):
+ G=self.Graph()
+ G.add_nodes_from([0,1,2])
+ assert_equal(G.adj,{0:{},1:{},2:{}})
+ # test add attributes
+ G.add_nodes_from([0,1,2],c='red')
+ assert_equal(G.node[0]['c'],'red')
+ assert_equal(G.node[2]['c'],'red')
+ # test that attribute dicts are not the same
+ assert(G.node[0] is not G.node[1])
+ # test updating attributes
+ G.add_nodes_from([0,1,2],c='blue')
+ assert_equal(G.node[0]['c'],'blue')
+ assert_equal(G.node[2]['c'],'blue')
+ assert(G.node[0] is not G.node[1])
+ # test tuple input
+ H=self.Graph()
+ H.add_nodes_from(G.nodes(data=True))
+ assert_equal(H.node[0]['c'],'blue')
+ assert_equal(H.node[2]['c'],'blue')
+ assert(H.node[0] is not H.node[1])
+ # specific overrides general
+ H.add_nodes_from([0,(1,{'c':'green'}),(3,{'c':'cyan'})],c='red')
+ assert_equal(H.node[0]['c'],'red')
+ assert_equal(H.node[1]['c'],'green')
+ assert_equal(H.node[2]['c'],'blue')
+ assert_equal(H.node[3]['c'],'cyan')
+
+ def test_remove_node(self):
+ G=self.K3
+ G.remove_node(0)
+ assert_equal(G.adj,{1:{2:{}},2:{1:{}}})
+ assert_raises((KeyError,networkx.NetworkXError), G.remove_node,-1)
+
+ # generator here to implement list,set,string...
+ def test_remove_nodes_from(self):
+ G=self.K3
+ G.remove_nodes_from([0,1])
+ assert_equal(G.adj,{2:{}})
+ G.remove_nodes_from([-1]) # silent fail
+
+ def test_add_edge(self):
+ G=self.Graph()
+ G.add_edge(0,1)
+ assert_equal(G.adj,{0: {1: {}}, 1: {0: {}}})
+ G=self.Graph()
+ G.add_edge(*(0,1))
+ assert_equal(G.adj,{0: {1: {}}, 1: {0: {}}})
+
+ def test_add_edges_from(self):
+ G=self.Graph()
+ G.add_edges_from([(0,1),(0,2,{'weight':3})])
+ assert_equal(G.adj,{0: {1:{}, 2:{'weight':3}}, 1: {0:{}}, \
+ 2:{0:{'weight':3}}})
+ G=self.Graph()
+ G.add_edges_from([(0,1),(0,2,{'weight':3}),(1,2,{'data':4})],data=2)
+ assert_equal(G.adj,{\
+ 0: {1:{'data':2}, 2:{'weight':3,'data':2}}, \
+ 1: {0:{'data':2}, 2:{'data':4}}, \
+ 2: {0:{'weight':3,'data':2}, 1:{'data':4}} \
+ })
+
+ assert_raises(networkx.NetworkXError,
+ G.add_edges_from,[(0,)]) # too few in tuple
+ assert_raises(networkx.NetworkXError,
+ G.add_edges_from,[(0,1,2,3)]) # too many in tuple
+ assert_raises(TypeError, G.add_edges_from,[0]) # not a tuple
+
+
+ def test_remove_edge(self):
+ G=self.K3
+ G.remove_edge(0,1)
+ assert_equal(G.adj,{0:{2:{}},1:{2:{}},2:{0:{},1:{}}})
+ assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,-1,0)
+
+ def test_remove_edges_from(self):
+ G=self.K3
+ G.remove_edges_from([(0,1)])
+ assert_equal(G.adj,{0:{2:{}},1:{2:{}},2:{0:{},1:{}}})
+ G.remove_edges_from([(0,0)]) # silent fail
+
+ def test_clear(self):
+ G=self.K3
+ G.clear()
+ assert_equal(G.adj,{})
+
+ def test_edges_data(self):
+ G=self.K3
+ assert_equal(sorted(G.edges(data=True)),[(0,1,{}),(0,2,{}),(1,2,{})])
+ assert_equal(sorted(G.edges(0,data=True)),[(0,1,{}),(0,2,{})])
+ assert_raises((KeyError,networkx.NetworkXError), G.edges,-1)
+
+
+ def test_get_edge_data(self):
+ G=self.K3
+ assert_equal(G.get_edge_data(0,1),{})
+ assert_equal(G[0][1],{})
+ assert_equal(G.get_edge_data(10,20),None)
+ assert_equal(G.get_edge_data(-1,0),None)
+ assert_equal(G.get_edge_data(-1,0,default=1),1)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_graph_historical.py b/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_graph_historical.py
new file mode 100644
index 0000000..0ade19a
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_graph_historical.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+"""Original NetworkX graph tests"""
+from nose.tools import *
+import networkx
+import networkx as nx
+
+from historical_tests import HistoricalTests
+
+class TestGraphHistorical(HistoricalTests):
+
+ def setUp(self):
+ HistoricalTests.setUp(self)
+ self.G=nx.Graph
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_multidigraph.py b/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_multidigraph.py
new file mode 100644
index 0000000..e8c2543
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_multidigraph.py
@@ -0,0 +1,327 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx
+from test_multigraph import BaseMultiGraphTester, TestMultiGraph
+
+class BaseMultiDiGraphTester(BaseMultiGraphTester):
+ def test_edges(self):
+ G=self.K3
+ assert_equal(sorted(G.edges()),[(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
+ assert_equal(sorted(G.edges(0)),[(0,1),(0,2)])
+ assert_raises((KeyError,networkx.NetworkXError), G.edges,-1)
+
+ def test_edges_data(self):
+ G=self.K3
+ assert_equal(sorted(G.edges(data=True)),
+ [(0,1,{}),(0,2,{}),(1,0,{}),(1,2,{}),(2,0,{}),(2,1,{})])
+ assert_equal(sorted(G.edges(0,data=True)),[(0,1,{}),(0,2,{})])
+ assert_raises((KeyError,networkx.NetworkXError), G.neighbors,-1)
+
+
+ def test_edges_iter(self):
+ G=self.K3
+ assert_equal(sorted(G.edges_iter()),
+ [(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
+ assert_equal(sorted(G.edges_iter(0)),[(0,1),(0,2)])
+ G.add_edge(0,1)
+ assert_equal(sorted(G.edges_iter()),
+ [(0,1),(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
+
+ def test_out_edges(self):
+ G=self.K3
+ assert_equal(sorted(G.out_edges()),
+ [(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
+ assert_equal(sorted(G.out_edges(0)),[(0,1),(0,2)])
+ assert_raises((KeyError,networkx.NetworkXError), G.out_edges,-1)
+ assert_equal(sorted(G.out_edges(0,keys=True)),[(0,1,0),(0,2,0)])
+
+ def test_out_edges_iter(self):
+ G=self.K3
+ assert_equal(sorted(G.out_edges_iter()),
+ [(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
+ assert_equal(sorted(G.out_edges_iter(0)),[(0,1),(0,2)])
+ G.add_edge(0,1,2)
+ assert_equal(sorted(G.out_edges_iter()),
+ [(0,1),(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
+
+ def test_in_edges(self):
+ G=self.K3
+ assert_equal(sorted(G.in_edges()),
+ [(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
+ assert_equal(sorted(G.in_edges(0)),[(1,0),(2,0)])
+ assert_raises((KeyError,networkx.NetworkXError), G.in_edges,-1)
+ G.add_edge(0,1,2)
+ assert_equal(sorted(G.in_edges()),
+ [(0,1),(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
+ assert_equal(sorted(G.in_edges(0,keys=True)),[(1,0,0),(2,0,0)])
+
+ def test_in_edges_iter(self):
+ G=self.K3
+ assert_equal(sorted(G.in_edges_iter()),
+ [(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
+ assert_equal(sorted(G.in_edges_iter(0)),[(1,0),(2,0)])
+ G.add_edge(0,1,2)
+ assert_equal(sorted(G.in_edges_iter()),
+ [(0,1),(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
+
+ assert_equal(sorted(G.in_edges_iter(data=True,keys=False)),
+ [(0,1,{}),(0,1,{}),(0,2,{}),(1,0,{}),(1,2,{}),
+ (2,0,{}),(2,1,{})])
+
+
+ def is_shallow(self,H,G):
+ # graph
+ assert_equal(G.graph['foo'],H.graph['foo'])
+ G.graph['foo'].append(1)
+ assert_equal(G.graph['foo'],H.graph['foo'])
+ # node
+ assert_equal(G.node[0]['foo'],H.node[0]['foo'])
+ G.node[0]['foo'].append(1)
+ assert_equal(G.node[0]['foo'],H.node[0]['foo'])
+ # edge
+ assert_equal(G[1][2][0]['foo'],H[1][2][0]['foo'])
+ G[1][2][0]['foo'].append(1)
+ assert_equal(G[1][2][0]['foo'],H[1][2][0]['foo'])
+
+ def is_deep(self,H,G):
+ # graph
+ assert_equal(G.graph['foo'],H.graph['foo'])
+ G.graph['foo'].append(1)
+ assert_not_equal(G.graph['foo'],H.graph['foo'])
+ # node
+ assert_equal(G.node[0]['foo'],H.node[0]['foo'])
+ G.node[0]['foo'].append(1)
+ assert_not_equal(G.node[0]['foo'],H.node[0]['foo'])
+ # edge
+ assert_equal(G[1][2][0]['foo'],H[1][2][0]['foo'])
+ G[1][2][0]['foo'].append(1)
+ assert_not_equal(G[1][2][0]['foo'],H[1][2][0]['foo'])
+
+ def test_to_undirected(self):
+ # MultiDiGraph -> MultiGraph changes number of edges so it is
+ # not a copy operation... use is_shallow, not is_shallow_copy
+ G=self.K3
+ self.add_attributes(G)
+ H=networkx.MultiGraph(G)
+ self.is_shallow(H,G)
+ H=G.to_undirected()
+ self.is_deep(H,G)
+
+ def test_has_successor(self):
+ G=self.K3
+ assert_equal(G.has_successor(0,1),True)
+ assert_equal(G.has_successor(0,-1),False)
+
+ def test_successors(self):
+ G=self.K3
+ assert_equal(sorted(G.successors(0)),[1,2])
+ assert_raises((KeyError,networkx.NetworkXError), G.successors,-1)
+
+ def test_successors_iter(self):
+ G=self.K3
+ assert_equal(sorted(G.successors_iter(0)),[1,2])
+ assert_raises((KeyError,networkx.NetworkXError), G.successors_iter,-1)
+
+ def test_has_predecessor(self):
+ G=self.K3
+ assert_equal(G.has_predecessor(0,1),True)
+ assert_equal(G.has_predecessor(0,-1),False)
+
+ def test_predecessors(self):
+ G=self.K3
+ assert_equal(sorted(G.predecessors(0)),[1,2])
+ assert_raises((KeyError,networkx.NetworkXError), G.predecessors,-1)
+
+ def test_predecessors_iter(self):
+ G=self.K3
+ assert_equal(sorted(G.predecessors_iter(0)),[1,2])
+ assert_raises((KeyError,networkx.NetworkXError), G.predecessors_iter,-1)
+
+
+ def test_degree(self):
+ G=self.K3
+ assert_equal(list(G.degree().values()),[4,4,4])
+ assert_equal(G.degree(),{0:4,1:4,2:4})
+ assert_equal(G.degree(0),4)
+ assert_equal(G.degree([0]),{0:4})
+ assert_raises((KeyError,networkx.NetworkXError), G.degree,-1)
+
+ def test_degree_iter(self):
+ G=self.K3
+ assert_equal(list(G.degree_iter()),[(0,4),(1,4),(2,4)])
+ assert_equal(dict(G.degree_iter()),{0:4,1:4,2:4})
+ assert_equal(list(G.degree_iter(0)),[(0,4)])
+ G.add_edge(0,1,weight=0.3,other=1.2)
+ assert_equal(list(G.degree_iter(weight='weight')),[(0,4.3),(1,4.3),(2,4)])
+ assert_equal(list(G.degree_iter(weight='other')),[(0,5.2),(1,5.2),(2,4)])
+
+
+ def test_in_degree(self):
+ G=self.K3
+ assert_equal(list(G.in_degree().values()),[2,2,2])
+ assert_equal(G.in_degree(),{0:2,1:2,2:2})
+ assert_equal(G.in_degree(0),2)
+ assert_equal(G.in_degree([0]),{0:2})
+ assert_raises((KeyError,networkx.NetworkXError), G.in_degree,-1)
+
+ def test_in_degree_iter(self):
+ G=self.K3
+ assert_equal(list(G.in_degree_iter()),[(0,2),(1,2),(2,2)])
+ assert_equal(dict(G.in_degree_iter()),{0:2,1:2,2:2})
+ assert_equal(list(G.in_degree_iter(0)),[(0,2)])
+ assert_equal(list(G.in_degree_iter(0,weight='weight')),[(0,2)])
+
+ def test_out_degree(self):
+ G=self.K3
+ assert_equal(list(G.out_degree().values()),[2,2,2])
+ assert_equal(G.out_degree(),{0:2,1:2,2:2})
+ assert_equal(G.out_degree(0),2)
+ assert_equal(G.out_degree([0]),{0:2})
+ assert_raises((KeyError,networkx.NetworkXError), G.out_degree,-1)
+
+ def test_out_degree_iter(self):
+ G=self.K3
+ assert_equal(list(G.out_degree_iter()),[(0,2),(1,2),(2,2)])
+ assert_equal(dict(G.out_degree_iter()),{0:2,1:2,2:2})
+ assert_equal(list(G.out_degree_iter(0)),[(0,2)])
+ assert_equal(list(G.out_degree_iter(0,weight='weight')),[(0,2)])
+
+
+ def test_size(self):
+ G=self.K3
+ assert_equal(G.size(),6)
+ assert_equal(G.number_of_edges(),6)
+ G.add_edge(0,1,weight=0.3,other=1.2)
+ assert_equal(G.size(weight='weight'),6.3)
+ assert_equal(G.size(weight='other'),7.2)
+
+ def test_to_undirected_reciprocal(self):
+ G=self.Graph()
+ G.add_edge(1,2)
+ assert_true(G.to_undirected().has_edge(1,2))
+ assert_false(G.to_undirected(reciprocal=True).has_edge(1,2))
+ G.add_edge(2,1)
+ assert_true(G.to_undirected(reciprocal=True).has_edge(1,2))
+
+ def test_reverse_copy(self):
+ G=networkx.MultiDiGraph([(0,1),(0,1)])
+ R=G.reverse()
+ assert_equal(sorted(R.edges()),[(1,0),(1,0)])
+ R.remove_edge(1,0)
+ assert_equal(sorted(R.edges()),[(1,0)])
+ assert_equal(sorted(G.edges()),[(0,1),(0,1)])
+
+ def test_reverse_nocopy(self):
+ G=networkx.MultiDiGraph([(0,1),(0,1)])
+ R=G.reverse(copy=False)
+ assert_equal(sorted(R.edges()),[(1,0),(1,0)])
+ R.remove_edge(1,0)
+ assert_equal(sorted(R.edges()),[(1,0)])
+ assert_equal(sorted(G.edges()),[(1,0)])
+
+
+class TestMultiDiGraph(BaseMultiDiGraphTester,TestMultiGraph):
+ def setUp(self):
+ self.Graph=networkx.MultiDiGraph
+ # build K3
+ self.k3edges=[(0, 1), (0, 2), (1, 2)]
+ self.k3nodes=[0, 1, 2]
+ self.K3=self.Graph()
+ self.K3.adj={0:{},1:{},2:{}}
+ self.K3.succ=self.K3.adj
+ self.K3.pred={0:{},1:{},2:{}}
+ for u in self.k3nodes:
+ for v in self.k3nodes:
+ if u==v: continue
+ d={0:{}}
+ self.K3.succ[u][v]=d
+ self.K3.pred[v][u]=d
+ self.K3.adj=self.K3.succ
+ self.K3.edge=self.K3.adj
+ self.K3.node={}
+ self.K3.node[0]={}
+ self.K3.node[1]={}
+ self.K3.node[2]={}
+
+
+ def test_add_edge(self):
+ G=self.Graph()
+ G.add_edge(0,1)
+ assert_equal(G.adj,{0: {1: {0:{}}}, 1: {}})
+ assert_equal(G.succ,{0: {1: {0:{}}}, 1: {}})
+ assert_equal(G.pred,{0: {}, 1: {0:{0:{}}}})
+ G=self.Graph()
+ G.add_edge(*(0,1))
+ assert_equal(G.adj,{0: {1: {0:{}}}, 1: {}})
+ assert_equal(G.succ,{0: {1: {0:{}}}, 1: {}})
+ assert_equal(G.pred,{0: {}, 1: {0:{0:{}}}})
+
+ def test_add_edges_from(self):
+ G=self.Graph()
+ G.add_edges_from([(0,1),(0,1,{'weight':3})])
+ assert_equal(G.adj,{0: {1: {0:{},1:{'weight':3}}}, 1: {}})
+ assert_equal(G.succ,{0: {1: {0:{},1:{'weight':3}}}, 1: {}})
+ assert_equal(G.pred,{0: {}, 1: {0:{0:{},1:{'weight':3}}}})
+
+ G.add_edges_from([(0,1),(0,1,{'weight':3})],weight=2)
+ assert_equal(G.succ,{0: {1: {0:{},
+ 1:{'weight':3},
+ 2:{'weight':2},
+ 3:{'weight':3}}},
+ 1: {}})
+ assert_equal(G.pred,{0: {}, 1: {0:{0:{},1:{'weight':3},
+ 2:{'weight':2},
+ 3:{'weight':3}}}})
+
+ assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,)]) # too few in tuple
+ assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,1,2,3,4)]) # too many in tuple
+ assert_raises(TypeError, G.add_edges_from,[0]) # not a tuple
+
+ def test_remove_edge(self):
+ G=self.K3
+ G.remove_edge(0,1)
+ assert_equal(G.succ,{0:{2:{0:{}}},
+ 1:{0:{0:{}},2:{0:{}}},
+ 2:{0:{0:{}},1:{0:{}}}})
+ assert_equal(G.pred,{0:{1:{0:{}}, 2:{0:{}}},
+ 1:{2:{0:{}}},
+ 2:{0:{0:{}},1:{0:{}}}})
+ assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,-1,0)
+ assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,0,2,
+ key=1)
+
+
+ def test_remove_multiedge(self):
+ G=self.K3
+ G.add_edge(0,1,key='parallel edge')
+ G.remove_edge(0,1,key='parallel edge')
+ assert_equal(G.adj,{0: {1: {0:{}}, 2: {0:{}}},
+ 1: {0: {0:{}}, 2: {0:{}}},
+ 2: {0: {0:{}}, 1: {0:{}}}})
+
+ assert_equal(G.succ,{0: {1: {0:{}}, 2: {0:{}}},
+ 1: {0: {0:{}}, 2: {0:{}}},
+ 2: {0: {0:{}}, 1: {0:{}}}})
+
+ assert_equal(G.pred,{0:{1: {0:{}},2:{0:{}}},
+ 1:{0:{0:{}},2:{0:{}}},
+ 2:{0:{0:{}},1:{0:{}}}})
+ G.remove_edge(0,1)
+ assert_equal(G.succ,{0:{2:{0:{}}},
+ 1:{0:{0:{}},2:{0:{}}},
+ 2:{0:{0:{}},1:{0:{}}}})
+ assert_equal(G.pred,{0:{1:{0:{}}, 2:{0:{}}},
+ 1:{2:{0:{}}},
+ 2:{0:{0:{}},1:{0:{}}}})
+ assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,-1,0)
+
+ def test_remove_edges_from(self):
+ G=self.K3
+ G.remove_edges_from([(0,1)])
+ assert_equal(G.succ,{0:{2:{0:{}}},
+ 1:{0:{0:{}},2:{0:{}}},
+ 2:{0:{0:{}},1:{0:{}}}})
+ assert_equal(G.pred,{0:{1:{0:{}}, 2:{0:{}}},
+ 1:{2:{0:{}}},
+ 2:{0:{0:{}},1:{0:{}}}})
+ G.remove_edges_from([(0,0)]) # silent fail
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_multigraph.py b/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_multigraph.py
new file mode 100644
index 0000000..6c9adbc
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/classes/tests/test_multigraph.py
@@ -0,0 +1,244 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx
+from test_graph import BaseAttrGraphTester, TestGraph
+
+class BaseMultiGraphTester(BaseAttrGraphTester):
+ def test_has_edge(self):
+ G=self.K3
+ assert_equal(G.has_edge(0,1),True)
+ assert_equal(G.has_edge(0,-1),False)
+ assert_equal(G.has_edge(0,1,0),True)
+ assert_equal(G.has_edge(0,1,1),False)
+
+ def test_get_edge_data(self):
+ G=self.K3
+ assert_equal(G.get_edge_data(0,1),{0:{}})
+ assert_equal(G[0][1],{0:{}})
+ assert_equal(G[0][1][0],{})
+ assert_equal(G.get_edge_data(10,20),None)
+ assert_equal(G.get_edge_data(0,1,0),{})
+
+
+ def test_adjacency_iter(self):
+ G=self.K3
+ assert_equal(dict(G.adjacency_iter()),
+ {0: {1: {0:{}}, 2: {0:{}}},
+ 1: {0: {0:{}}, 2: {0:{}}},
+ 2: {0: {0:{}}, 1: {0:{}}}})
+
+ def deepcopy_edge_attr(self,H,G):
+ assert_equal(G[1][2][0]['foo'],H[1][2][0]['foo'])
+ G[1][2][0]['foo'].append(1)
+ assert_not_equal(G[1][2][0]['foo'],H[1][2][0]['foo'])
+
+ def shallow_copy_edge_attr(self,H,G):
+ assert_equal(G[1][2][0]['foo'],H[1][2][0]['foo'])
+ G[1][2][0]['foo'].append(1)
+ assert_equal(G[1][2][0]['foo'],H[1][2][0]['foo'])
+
+ def same_attrdict(self, H, G):
+ # same attrdict in the edgedata
+ old_foo=H[1][2][0]['foo']
+ H.add_edge(1,2,0,foo='baz')
+ assert_equal(G.edge,H.edge)
+ H.add_edge(1,2,0,foo=old_foo)
+ assert_equal(G.edge,H.edge)
+ # but not same edgedata dict
+ H.add_edge(1,2,foo='baz')
+ assert_not_equal(G.edge,H.edge)
+
+ old_foo=H.node[0]['foo']
+ H.node[0]['foo']='baz'
+ assert_equal(G.node,H.node)
+ H.node[0]['foo']=old_foo
+ assert_equal(G.node,H.node)
+
+ def different_attrdict(self, H, G):
+ # used by graph_equal_but_different
+ old_foo=H[1][2][0]['foo']
+ H.add_edge(1,2,0,foo='baz')
+ assert_not_equal(G.edge,H.edge)
+ H.add_edge(1,2,0,foo=old_foo)
+ assert_equal(G.edge,H.edge)
+ HH=H.copy()
+ H.add_edge(1,2,foo='baz')
+ assert_not_equal(G.edge,H.edge)
+ H=HH
+ old_foo=H.node[0]['foo']
+ H.node[0]['foo']='baz'
+ assert_not_equal(G.node,H.node)
+ H.node[0]['foo']=old_foo
+ assert_equal(G.node,H.node)
+
+ def test_to_undirected(self):
+ G=self.K3
+ self.add_attributes(G)
+ H=networkx.MultiGraph(G)
+ self.is_shallow_copy(H,G)
+ H=G.to_undirected()
+ self.is_deepcopy(H,G)
+
+ def test_to_directed(self):
+ G=self.K3
+ self.add_attributes(G)
+ H=networkx.MultiDiGraph(G)
+ self.is_shallow_copy(H,G)
+ H=G.to_directed()
+ self.is_deepcopy(H,G)
+
+ def test_selfloops(self):
+ G=self.K3
+ G.add_edge(0,0)
+ assert_equal(G.nodes_with_selfloops(),[0])
+ assert_equal(G.selfloop_edges(),[(0,0)])
+ assert_equal(G.selfloop_edges(data=True),[(0,0,{})])
+ assert_equal(G.number_of_selfloops(),1)
+
+ def test_selfloops2(self):
+ G=self.K3
+ G.add_edge(0,0)
+ G.add_edge(0,0)
+ G.add_edge(0,0,key='parallel edge')
+ G.remove_edge(0,0,key='parallel edge')
+ assert_equal(G.number_of_edges(0,0),2)
+ G.remove_edge(0,0)
+ assert_equal(G.number_of_edges(0,0),1)
+
+ def test_edge_attr4(self):
+ G=self.Graph()
+ G.add_edge(1,2,key=0,data=7,spam='bar',bar='foo')
+ assert_equal(G.edges(data=True),
+ [(1,2,{'data':7,'spam':'bar','bar':'foo'})])
+ G[1][2][0]['data']=10 # OK to set data like this
+ assert_equal(G.edges(data=True),
+ [(1,2,{'data':10,'spam':'bar','bar':'foo'})])
+
+ G.edge[1][2][0]['data']=20 # another spelling, "edge"
+ assert_equal(G.edges(data=True),
+ [(1,2,{'data':20,'spam':'bar','bar':'foo'})])
+ G.edge[1][2][0]['listdata']=[20,200]
+ G.edge[1][2][0]['weight']=20
+ assert_equal(G.edges(data=True),
+ [(1,2,{'data':20,'spam':'bar',
+ 'bar':'foo','listdata':[20,200],'weight':20})])
+
+
+class TestMultiGraph(BaseMultiGraphTester,TestGraph):
+ def setUp(self):
+ self.Graph=networkx.MultiGraph
+ # build K3
+ ed1,ed2,ed3 = ({0:{}},{0:{}},{0:{}})
+ self.k3adj={0: {1: ed1, 2: ed2},
+ 1: {0: ed1, 2: ed3},
+ 2: {0: ed2, 1: ed3}}
+ self.k3edges=[(0, 1), (0, 2), (1, 2)]
+ self.k3nodes=[0, 1, 2]
+ self.K3=self.Graph()
+ self.K3.adj = self.K3.edge = self.k3adj
+ self.K3.node={}
+ self.K3.node[0]={}
+ self.K3.node[1]={}
+ self.K3.node[2]={}
+
+ def test_data_input(self):
+ G=self.Graph(data={1:[2],2:[1]}, name="test")
+ assert_equal(G.name,"test")
+ assert_equal(sorted(G.adj.items()),[(1, {2: {0:{}}}), (2, {1: {0:{}}})])
+
+ def test_getitem(self):
+ G=self.K3
+ assert_equal(G[0],{1: {0:{}}, 2: {0:{}}})
+ assert_raises(KeyError, G.__getitem__, 'j')
+ assert_raises((TypeError,networkx.NetworkXError), G.__getitem__, ['A'])
+
+ def test_remove_node(self):
+ G=self.K3
+ G.remove_node(0)
+ assert_equal(G.adj,{1:{2:{0:{}}},2:{1:{0:{}}}})
+ assert_raises((KeyError,networkx.NetworkXError), G.remove_node,-1)
+
+ def test_add_edge(self):
+ G=self.Graph()
+ G.add_edge(0,1)
+ assert_equal(G.adj,{0: {1: {0:{}}}, 1: {0: {0:{}}}})
+ G=self.Graph()
+ G.add_edge(*(0,1))
+ assert_equal(G.adj,{0: {1: {0:{}}}, 1: {0: {0:{}}}})
+
+ def test_add_edge_conflicting_key(self):
+ G=self.Graph()
+ G.add_edge(0,1,key=1)
+ G.add_edge(0,1)
+ assert_equal(G.number_of_edges(),2)
+ G=self.Graph()
+ G.add_edges_from([(0,1,1,{})])
+ G.add_edges_from([(0,1)])
+ assert_equal(G.number_of_edges(),2)
+
+
+
+ def test_add_edges_from(self):
+ G=self.Graph()
+ G.add_edges_from([(0,1),(0,1,{'weight':3})])
+ assert_equal(G.adj,{0: {1: {0:{},1:{'weight':3}}},
+ 1: {0: {0:{},1:{'weight':3}}}})
+ G.add_edges_from([(0,1),(0,1,{'weight':3})],weight=2)
+ assert_equal(G.adj,{0: {1: {0:{},1:{'weight':3},
+ 2:{'weight':2},3:{'weight':3}}},
+ 1: {0: {0:{},1:{'weight':3},
+ 2:{'weight':2},3:{'weight':3}}}})
+
+ # too few in tuple
+ assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,)])
+ # too many in tuple
+ assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,1,2,3,4)])
+ assert_raises(TypeError, G.add_edges_from,[0]) # not a tuple
+
+
+ def test_remove_edge(self):
+ G=self.K3
+ G.remove_edge(0,1)
+ assert_equal(G.adj,{0: {2: {0: {}}},
+ 1: {2: {0: {}}},
+ 2: {0: {0: {}},
+ 1: {0: {}}}})
+
+ assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,-1,0)
+ assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,0,2,
+ key=1)
+
+
+
+ def test_remove_edges_from(self):
+ G=self.K3.copy()
+ G.remove_edges_from([(0,1)])
+ assert_equal(G.adj,{0:{2:{0:{}}},1:{2:{0:{}}},2:{0:{0:{}},1:{0:{}}}})
+ G.remove_edges_from([(0,0)]) # silent fail
+ self.K3.add_edge(0,1)
+ G=self.K3.copy()
+ G.remove_edges_from(G.edges(data=True,keys=True))
+ assert_equal(G.adj,{0:{},1:{},2:{}})
+ G=self.K3.copy()
+ G.remove_edges_from(G.edges(data=False,keys=True))
+ assert_equal(G.adj,{0:{},1:{},2:{}})
+ G=self.K3.copy()
+ G.remove_edges_from(G.edges(data=False,keys=False))
+ assert_equal(G.adj,{0:{},1:{},2:{}})
+ G=self.K3.copy()
+ G.remove_edges_from([(0,1,0),(0,2,0,{}),(1,2)])
+ assert_equal(G.adj,{0:{1:{1:{}}},1:{0:{1:{}}},2:{}})
+
+
+
+ def test_remove_multiedge(self):
+ G=self.K3
+ G.add_edge(0,1,key='parallel edge')
+ G.remove_edge(0,1,key='parallel edge')
+ assert_equal(G.adj,{0: {1: {0:{}}, 2: {0:{}}},
+ 1: {0: {0:{}}, 2: {0:{}}},
+ 2: {0: {0:{}}, 1: {0:{}}}})
+ G.remove_edge(0,1)
+ assert_equal(G.adj,{0:{2:{0:{}}},1:{2:{0:{}}},2:{0:{0:{}},1:{0:{}}}})
+ assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,-1,0)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/convert.py b/lib/python2.7/site-packages/setoolsgui/networkx/convert.py
new file mode 100644
index 0000000..6333b9f
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/convert.py
@@ -0,0 +1,847 @@
+"""
+This module provides functions to convert
+NetworkX graphs to and from other formats.
+
+The preferred way of converting data to a NetworkX graph
+is through the graph constuctor. The constructor calls
+the to_networkx_graph() function which attempts to guess the
+input type and convert it automatically.
+
+Examples
+--------
+
+Create a 10 node random graph from a numpy matrix
+
+>>> import numpy
+>>> a=numpy.reshape(numpy.random.random_integers(0,1,size=100),(10,10))
+>>> D=nx.DiGraph(a)
+
+or equivalently
+
+>>> D=nx.to_networkx_graph(a,create_using=nx.DiGraph())
+
+Create a graph with a single edge from a dictionary of dictionaries
+
+>>> d={0: {1: 1}} # dict-of-dicts single edge (0,1)
+>>> G=nx.Graph(d)
+
+
+See Also
+--------
+nx_pygraphviz, nx_pydot
+
+"""
+__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Dan Schult(dschult@colgate.edu)'])
+# Copyright (C) 2006-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+import warnings
+import networkx as nx
+
+__all__ = ['to_networkx_graph',
+ 'from_dict_of_dicts', 'to_dict_of_dicts',
+ 'from_dict_of_lists', 'to_dict_of_lists',
+ 'from_edgelist', 'to_edgelist',
+ 'from_numpy_matrix', 'to_numpy_matrix',
+ 'to_numpy_recarray',
+ 'from_scipy_sparse_matrix', 'to_scipy_sparse_matrix']
+
+def _prep_create_using(create_using):
+ """Return a graph object ready to be populated.
+
+ If create_using is None return the default (just networkx.Graph())
+ If create_using.clear() works, assume it returns a graph object.
+ Otherwise raise an exception because create_using is not a networkx graph.
+
+ """
+ if create_using is None:
+ G=nx.Graph()
+ else:
+ G=create_using
+ try:
+ G.clear()
+ except:
+ raise TypeError("Input graph is not a networkx graph type")
+ return G
+
+def to_networkx_graph(data,create_using=None,multigraph_input=False):
+ """Make a NetworkX graph from a known data structure.
+
+ The preferred way to call this is automatically
+ from the class constructor
+
+ >>> d={0: {1: {'weight':1}}} # dict-of-dicts single edge (0,1)
+ >>> G=nx.Graph(d)
+
+ instead of the equivalent
+
+ >>> G=nx.from_dict_of_dicts(d)
+
+ Parameters
+ ----------
+ data : a object to be converted
+ Current known types are:
+ any NetworkX graph
+ dict-of-dicts
+ dist-of-lists
+ list of edges
+ numpy matrix
+ numpy ndarray
+ scipy sparse matrix
+ pygraphviz agraph
+
+ create_using : NetworkX graph
+ Use specified graph for result. Otherwise a new graph is created.
+
+ multigraph_input : bool (default False)
+ If True and data is a dict_of_dicts,
+ try to create a multigraph assuming dict_of_dict_of_lists.
+ If data and create_using are both multigraphs then create
+ a multigraph from a multigraph.
+
+ """
+ # NX graph
+ if hasattr(data,"adj"):
+ try:
+ result= from_dict_of_dicts(data.adj,\
+ create_using=create_using,\
+ multigraph_input=data.is_multigraph())
+ if hasattr(data,'graph') and isinstance(data.graph,dict):
+ result.graph=data.graph.copy()
+ if hasattr(data,'node') and isinstance(data.node,dict):
+ result.node=dict( (n,dd.copy()) for n,dd in data.node.items() )
+ return result
+ except:
+ raise nx.NetworkXError("Input is not a correct NetworkX graph.")
+
+ # pygraphviz agraph
+ if hasattr(data,"is_strict"):
+ try:
+ return nx.from_agraph(data,create_using=create_using)
+ except:
+ raise nx.NetworkXError("Input is not a correct pygraphviz graph.")
+
+ # dict of dicts/lists
+ if isinstance(data,dict):
+ try:
+ return from_dict_of_dicts(data,create_using=create_using,\
+ multigraph_input=multigraph_input)
+ except:
+ try:
+ return from_dict_of_lists(data,create_using=create_using)
+ except:
+ raise TypeError("Input is not known type.")
+
+ # list or generator of edges
+ if (isinstance(data,list)
+ or hasattr(data,'next')
+ or hasattr(data, '__next__')):
+ try:
+ return from_edgelist(data,create_using=create_using)
+ except:
+ raise nx.NetworkXError("Input is not a valid edge list")
+
+ # numpy matrix or ndarray
+ try:
+ import numpy
+ if isinstance(data,numpy.matrix) or \
+ isinstance(data,numpy.ndarray):
+ try:
+ return from_numpy_matrix(data,create_using=create_using)
+ except:
+ raise nx.NetworkXError(\
+ "Input is not a correct numpy matrix or array.")
+ except ImportError:
+ warnings.warn('numpy not found, skipping conversion test.',
+ ImportWarning)
+
+ # scipy sparse matrix - any format
+ try:
+ import scipy
+ if hasattr(data,"format"):
+ try:
+ return from_scipy_sparse_matrix(data,create_using=create_using)
+ except:
+ raise nx.NetworkXError(\
+ "Input is not a correct scipy sparse matrix type.")
+ except ImportError:
+ warnings.warn('scipy not found, skipping conversion test.',
+ ImportWarning)
+
+
+ raise nx.NetworkXError(\
+ "Input is not a known data type for conversion.")
+
+ return
+
+
+def convert_to_undirected(G):
+ """Return a new undirected representation of the graph G."""
+ return G.to_undirected()
+
+
+def convert_to_directed(G):
+ """Return a new directed representation of the graph G."""
+ return G.to_directed()
+
+
+def to_dict_of_lists(G,nodelist=None):
+ """Return adjacency representation of graph as a dictionary of lists.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ nodelist : list
+ Use only nodes specified in nodelist
+
+ Notes
+ -----
+ Completely ignores edge data for MultiGraph and MultiDiGraph.
+
+ """
+ if nodelist is None:
+ nodelist=G
+
+ d = {}
+ for n in nodelist:
+ d[n]=[nbr for nbr in G.neighbors(n) if nbr in nodelist]
+ return d
+
+def from_dict_of_lists(d,create_using=None):
+ """Return a graph from a dictionary of lists.
+
+ Parameters
+ ----------
+ d : dictionary of lists
+ A dictionary of lists adjacency representation.
+
+ create_using : NetworkX graph
+ Use specified graph for result. Otherwise a new graph is created.
+
+ Examples
+ --------
+ >>> dol= {0:[1]} # single edge (0,1)
+ >>> G=nx.from_dict_of_lists(dol)
+
+ or
+ >>> G=nx.Graph(dol) # use Graph constructor
+
+ """
+ G=_prep_create_using(create_using)
+ G.add_nodes_from(d)
+ if G.is_multigraph() and not G.is_directed():
+ # a dict_of_lists can't show multiedges. BUT for undirected graphs,
+ # each edge shows up twice in the dict_of_lists.
+ # So we need to treat this case separately.
+ seen={}
+ for node,nbrlist in d.items():
+ for nbr in nbrlist:
+ if nbr not in seen:
+ G.add_edge(node,nbr)
+ seen[node]=1 # don't allow reverse edge to show up
+ else:
+ G.add_edges_from( ((node,nbr) for node,nbrlist in d.items()
+ for nbr in nbrlist) )
+ return G
+
+
+def to_dict_of_dicts(G,nodelist=None,edge_data=None):
+ """Return adjacency representation of graph as a dictionary of dictionaries.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ nodelist : list
+ Use only nodes specified in nodelist
+
+ edge_data : list, optional
+ If provided, the value of the dictionary will be
+ set to edge_data for all edges. This is useful to make
+ an adjacency matrix type representation with 1 as the edge data.
+ If edgedata is None, the edgedata in G is used to fill the values.
+ If G is a multigraph, the edgedata is a dict for each pair (u,v).
+ """
+ dod={}
+ if nodelist is None:
+ if edge_data is None:
+ for u,nbrdict in G.adjacency_iter():
+ dod[u]=nbrdict.copy()
+ else: # edge_data is not None
+ for u,nbrdict in G.adjacency_iter():
+ dod[u]=dod.fromkeys(nbrdict, edge_data)
+ else: # nodelist is not None
+ if edge_data is None:
+ for u in nodelist:
+ dod[u]={}
+ for v,data in ((v,data) for v,data in G[u].items() if v in nodelist):
+ dod[u][v]=data
+ else: # nodelist and edge_data are not None
+ for u in nodelist:
+ dod[u]={}
+ for v in ( v for v in G[u] if v in nodelist):
+ dod[u][v]=edge_data
+ return dod
+
+def from_dict_of_dicts(d,create_using=None,multigraph_input=False):
+ """Return a graph from a dictionary of dictionaries.
+
+ Parameters
+ ----------
+ d : dictionary of dictionaries
+ A dictionary of dictionaries adjacency representation.
+
+ create_using : NetworkX graph
+ Use specified graph for result. Otherwise a new graph is created.
+
+ multigraph_input : bool (default False)
+ When True, the values of the inner dict are assumed
+ to be containers of edge data for multiple edges.
+ Otherwise this routine assumes the edge data are singletons.
+
+ Examples
+ --------
+ >>> dod= {0: {1:{'weight':1}}} # single edge (0,1)
+ >>> G=nx.from_dict_of_dicts(dod)
+
+ or
+ >>> G=nx.Graph(dod) # use Graph constructor
+
+ """
+ G=_prep_create_using(create_using)
+ G.add_nodes_from(d)
+ # is dict a MultiGraph or MultiDiGraph?
+ if multigraph_input:
+ # make a copy of the list of edge data (but not the edge data)
+ if G.is_directed():
+ if G.is_multigraph():
+ G.add_edges_from( (u,v,key,data)
+ for u,nbrs in d.items()
+ for v,datadict in nbrs.items()
+ for key,data in datadict.items()
+ )
+ else:
+ G.add_edges_from( (u,v,data)
+ for u,nbrs in d.items()
+ for v,datadict in nbrs.items()
+ for key,data in datadict.items()
+ )
+ else: # Undirected
+ if G.is_multigraph():
+ seen=set() # don't add both directions of undirected graph
+ for u,nbrs in d.items():
+ for v,datadict in nbrs.items():
+ if (u,v) not in seen:
+ G.add_edges_from( (u,v,key,data)
+ for key,data in datadict.items()
+ )
+ seen.add((v,u))
+ else:
+ seen=set() # don't add both directions of undirected graph
+ for u,nbrs in d.items():
+ for v,datadict in nbrs.items():
+ if (u,v) not in seen:
+ G.add_edges_from( (u,v,data)
+ for key,data in datadict.items() )
+ seen.add((v,u))
+
+ else: # not a multigraph to multigraph transfer
+ if G.is_multigraph() and not G.is_directed():
+ # d can have both representations u-v, v-u in dict. Only add one.
+ # We don't need this check for digraphs since we add both directions,
+ # or for Graph() since it is done implicitly (parallel edges not allowed)
+ seen=set()
+ for u,nbrs in d.items():
+ for v,data in nbrs.items():
+ if (u,v) not in seen:
+ G.add_edge(u,v,attr_dict=data)
+ seen.add((v,u))
+ else:
+ G.add_edges_from( ( (u,v,data)
+ for u,nbrs in d.items()
+ for v,data in nbrs.items()) )
+ return G
+
+def to_edgelist(G,nodelist=None):
+ """Return a list of edges in the graph.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ nodelist : list
+ Use only nodes specified in nodelist
+
+ """
+ if nodelist is None:
+ return G.edges(data=True)
+ else:
+ return G.edges(nodelist,data=True)
+
+def from_edgelist(edgelist,create_using=None):
+ """Return a graph from a list of edges.
+
+ Parameters
+ ----------
+ edgelist : list or iterator
+ Edge tuples
+
+ create_using : NetworkX graph
+ Use specified graph for result. Otherwise a new graph is created.
+
+ Examples
+ --------
+ >>> edgelist= [(0,1)] # single edge (0,1)
+ >>> G=nx.from_edgelist(edgelist)
+
+ or
+ >>> G=nx.Graph(edgelist) # use Graph constructor
+
+ """
+ G=_prep_create_using(create_using)
+ G.add_edges_from(edgelist)
+ return G
+
+def to_numpy_matrix(G, nodelist=None, dtype=None, order=None,
+ multigraph_weight=sum, weight='weight'):
+ """Return the graph adjacency matrix as a NumPy matrix.
+
+ Parameters
+ ----------
+ G : graph
+ The NetworkX graph used to construct the NumPy matrix.
+
+ nodelist : list, optional
+ The rows and columns are ordered according to the nodes in `nodelist`.
+ If `nodelist` is None, then the ordering is produced by G.nodes().
+
+ dtype : NumPy data type, optional
+ A valid single NumPy data type used to initialize the array.
+ This must be a simple type such as int or numpy.float64 and
+ not a compound data type (see to_numpy_recarray)
+ If None, then the NumPy default is used.
+
+ order : {'C', 'F'}, optional
+ Whether to store multidimensional data in C- or Fortran-contiguous
+ (row- or column-wise) order in memory. If None, then the NumPy default
+ is used.
+
+ multigraph_weight : {sum, min, max}, optional
+ An operator that determines how weights in multigraphs are handled.
+ The default is to sum the weights of the multiple edges.
+
+ weight : string or None optional (default='weight')
+ The edge attribute that holds the numerical value used for
+ the edge weight. If None then all edge weights are 1.
+
+
+ Returns
+ -------
+ M : NumPy matrix
+ Graph adjacency matrix.
+
+ See Also
+ --------
+ to_numpy_recarray, from_numpy_matrix
+
+ Notes
+ -----
+ The matrix entries are assigned with weight edge attribute. When
+ an edge does not have the weight attribute, the value of the entry is 1.
+ For multiple edges, the values of the entries are the sums of the edge
+ attributes for each edge.
+
+ When `nodelist` does not contain every node in `G`, the matrix is built
+ from the subgraph of `G` that is induced by the nodes in `nodelist`.
+
+ Examples
+ --------
+ >>> G = nx.MultiDiGraph()
+ >>> G.add_edge(0,1,weight=2)
+ >>> G.add_edge(1,0)
+ >>> G.add_edge(2,2,weight=3)
+ >>> G.add_edge(2,2)
+ >>> nx.to_numpy_matrix(G, nodelist=[0,1,2])
+ matrix([[ 0., 2., 0.],
+ [ 1., 0., 0.],
+ [ 0., 0., 4.]])
+ """
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError(\
+ "to_numpy_matrix() requires numpy: http://scipy.org/ ")
+
+ if nodelist is None:
+ nodelist = G.nodes()
+
+ nodeset = set(nodelist)
+ if len(nodelist) != len(nodeset):
+ msg = "Ambiguous ordering: `nodelist` contained duplicates."
+ raise nx.NetworkXError(msg)
+
+ nlen=len(nodelist)
+ undirected = not G.is_directed()
+ index=dict(zip(nodelist,range(nlen)))
+
+ if G.is_multigraph():
+ # Handle MultiGraphs and MultiDiGraphs
+ # array of nan' to start with, any leftover nans will be converted to 0
+ # nans are used so we can use sum, min, max for multigraphs
+ M = np.zeros((nlen,nlen), dtype=dtype, order=order)+np.nan
+ # use numpy nan-aware operations
+ operator={sum:np.nansum, min:np.nanmin, max:np.nanmax}
+ try:
+ op=operator[multigraph_weight]
+ except:
+ raise ValueError('multigraph_weight must be sum, min, or max')
+
+ for u,v,attrs in G.edges_iter(data=True):
+ if (u in nodeset) and (v in nodeset):
+ i,j = index[u],index[v]
+ e_weight = attrs.get(weight, 1)
+ M[i,j] = op([e_weight,M[i,j]])
+ if undirected:
+ M[j,i] = M[i,j]
+ # convert any nans to zeros
+ M = np.asmatrix(np.nan_to_num(M))
+ else:
+ # Graph or DiGraph, this is much faster than above
+ M = np.zeros((nlen,nlen), dtype=dtype, order=order)
+ for u,nbrdict in G.adjacency_iter():
+ for v,d in nbrdict.items():
+ try:
+ M[index[u],index[v]]=d.get(weight,1)
+ except KeyError:
+ pass
+ M = np.asmatrix(M)
+ return M
+
+
+def from_numpy_matrix(A,create_using=None):
+ """Return a graph from numpy matrix.
+
+ The numpy matrix is interpreted as an adjacency matrix for the graph.
+
+ Parameters
+ ----------
+ A : numpy matrix
+ An adjacency matrix representation of a graph
+
+ create_using : NetworkX graph
+ Use specified graph for result. The default is Graph()
+
+ Notes
+ -----
+ If the numpy matrix has a single data type for each matrix entry it
+ will be converted to an appropriate Python data type.
+
+ If the numpy matrix has a user-specified compound data type the names
+ of the data fields will be used as attribute keys in the resulting
+ NetworkX graph.
+
+ See Also
+ --------
+ to_numpy_matrix, to_numpy_recarray
+
+ Examples
+ --------
+ Simple integer weights on edges:
+
+ >>> import numpy
+ >>> A=numpy.matrix([[1,1],[2,1]])
+ >>> G=nx.from_numpy_matrix(A)
+
+ User defined compound data type on edges:
+
+ >>> import numpy
+ >>> dt=[('weight',float),('cost',int)]
+ >>> A=numpy.matrix([[(1.0,2)]],dtype=dt)
+ >>> G=nx.from_numpy_matrix(A)
+ >>> G.edges(data=True)
+ [(0, 0, {'cost': 2, 'weight': 1.0})]
+ """
+ kind_to_python_type={'f':float,
+ 'i':int,
+ 'u':int,
+ 'b':bool,
+ 'c':complex,
+ 'S':str,
+ 'V':'void'}
+
+ try: # Python 3.x
+ blurb = chr(1245) # just to trigger the exception
+ kind_to_python_type['U']=str
+ except ValueError: # Python 2.6+
+ kind_to_python_type['U']=unicode
+
+ # This should never fail if you have created a numpy matrix with numpy...
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError(\
+ "from_numpy_matrix() requires numpy: http://scipy.org/ ")
+
+ G=_prep_create_using(create_using)
+ n,m=A.shape
+ if n!=m:
+ raise nx.NetworkXError("Adjacency matrix is not square.",
+ "nx,ny=%s"%(A.shape,))
+ dt=A.dtype
+ try:
+ python_type=kind_to_python_type[dt.kind]
+ except:
+ raise TypeError("Unknown numpy data type: %s"%dt)
+
+ # make sure we get isolated nodes
+ G.add_nodes_from(range(n))
+ # get a list of edges
+ x,y=np.asarray(A).nonzero()
+
+ # handle numpy constructed data type
+ if python_type is 'void':
+ fields=sorted([(offset,dtype,name) for name,(dtype,offset) in
+ A.dtype.fields.items()])
+ for (u,v) in zip(x,y):
+ attr={}
+ for (offset,dtype,name),val in zip(fields,A[u,v]):
+ attr[name]=kind_to_python_type[dtype.kind](val)
+ G.add_edge(u,v,attr)
+ else: # basic data type
+ G.add_edges_from( ((u,v,{'weight':python_type(A[u,v])})
+ for (u,v) in zip(x,y)) )
+ return G
+
+
+def to_numpy_recarray(G,nodelist=None,
+ dtype=[('weight',float)],
+ order=None):
+ """Return the graph adjacency matrix as a NumPy recarray.
+
+ Parameters
+ ----------
+ G : graph
+ The NetworkX graph used to construct the NumPy matrix.
+
+ nodelist : list, optional
+ The rows and columns are ordered according to the nodes in `nodelist`.
+ If `nodelist` is None, then the ordering is produced by G.nodes().
+
+ dtype : NumPy data-type, optional
+ A valid NumPy named dtype used to initialize the NumPy recarray.
+ The data type names are assumed to be keys in the graph edge attribute
+ dictionary.
+
+ order : {'C', 'F'}, optional
+ Whether to store multidimensional data in C- or Fortran-contiguous
+ (row- or column-wise) order in memory. If None, then the NumPy default
+ is used.
+
+ Returns
+ -------
+ M : NumPy recarray
+ The graph with specified edge data as a Numpy recarray
+
+ Notes
+ -----
+ When `nodelist` does not contain every node in `G`, the matrix is built
+ from the subgraph of `G` that is induced by the nodes in `nodelist`.
+
+ Examples
+ --------
+ >>> G = nx.Graph()
+ >>> G.add_edge(1,2,weight=7.0,cost=5)
+ >>> A=nx.to_numpy_recarray(G,dtype=[('weight',float),('cost',int)])
+ >>> print(A.weight)
+ [[ 0. 7.]
+ [ 7. 0.]]
+ >>> print(A.cost)
+ [[0 5]
+ [5 0]]
+ """
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError(\
+ "to_numpy_matrix() requires numpy: http://scipy.org/ ")
+
+ if G.is_multigraph():
+ raise nx.NetworkXError("Not implemented for multigraphs.")
+
+ if nodelist is None:
+ nodelist = G.nodes()
+
+ nodeset = set(nodelist)
+ if len(nodelist) != len(nodeset):
+ msg = "Ambiguous ordering: `nodelist` contained duplicates."
+ raise nx.NetworkXError(msg)
+
+ nlen=len(nodelist)
+ undirected = not G.is_directed()
+ index=dict(zip(nodelist,range(nlen)))
+ M = np.zeros((nlen,nlen), dtype=dtype, order=order)
+
+ names=M.dtype.names
+ for u,v,attrs in G.edges_iter(data=True):
+ if (u in nodeset) and (v in nodeset):
+ i,j = index[u],index[v]
+ values=tuple([attrs[n] for n in names])
+ M[i,j] = values
+ if undirected:
+ M[j,i] = M[i,j]
+
+ return M.view(np.recarray)
+
+
+def to_scipy_sparse_matrix(G, nodelist=None, dtype=None,
+ weight='weight', format='csr'):
+ """Return the graph adjacency matrix as a SciPy sparse matrix.
+
+ Parameters
+ ----------
+ G : graph
+ The NetworkX graph used to construct the NumPy matrix.
+
+ nodelist : list, optional
+ The rows and columns are ordered according to the nodes in `nodelist`.
+ If `nodelist` is None, then the ordering is produced by G.nodes().
+
+ dtype : NumPy data-type, optional
+ A valid NumPy dtype used to initialize the array. If None, then the
+ NumPy default is used.
+
+ weight : string or None optional (default='weight')
+ The edge attribute that holds the numerical value used for
+ the edge weight. If None then all edge weights are 1.
+
+ format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'}
+ The type of the matrix to be returned (default 'csr'). For
+ some algorithms different implementations of sparse matrices
+ can perform better. See [1]_ for details.
+
+ Returns
+ -------
+ M : SciPy sparse matrix
+ Graph adjacency matrix.
+
+ Notes
+ -----
+ The matrix entries are populated using the edge attribute held in
+ parameter weight. When an edge does not have that attribute, the
+ value of the entry is 1.
+
+ For multiple edges the matrix values are the sums of the edge weights.
+
+ When `nodelist` does not contain every node in `G`, the matrix is built
+ from the subgraph of `G` that is induced by the nodes in `nodelist`.
+
+ Uses coo_matrix format. To convert to other formats specify the
+ format= keyword.
+
+ Examples
+ --------
+ >>> G = nx.MultiDiGraph()
+ >>> G.add_edge(0,1,weight=2)
+ >>> G.add_edge(1,0)
+ >>> G.add_edge(2,2,weight=3)
+ >>> G.add_edge(2,2)
+ >>> S = nx.to_scipy_sparse_matrix(G, nodelist=[0,1,2])
+ >>> print(S.todense())
+ [[0 2 0]
+ [1 0 0]
+ [0 0 4]]
+
+ References
+ ----------
+ .. [1] Scipy Dev. References, "Sparse Matrices",
+ http://docs.scipy.org/doc/scipy/reference/sparse.html
+ """
+ try:
+ from scipy import sparse
+ except ImportError:
+ raise ImportError(\
+ "to_scipy_sparse_matrix() requires scipy: http://scipy.org/ ")
+
+ if nodelist is None:
+ nodelist = G
+ nlen = len(nodelist)
+ if nlen == 0:
+ raise nx.NetworkXError("Graph has no nodes or edges")
+
+ if len(nodelist) != len(set(nodelist)):
+ msg = "Ambiguous ordering: `nodelist` contained duplicates."
+ raise nx.NetworkXError(msg)
+
+ index = dict(zip(nodelist,range(nlen)))
+ if G.number_of_edges() == 0:
+ row,col,data=[],[],[]
+ else:
+ row,col,data=zip(*((index[u],index[v],d.get(weight,1))
+ for u,v,d in G.edges_iter(nodelist, data=True)
+ if u in index and v in index))
+ if G.is_directed():
+ M = sparse.coo_matrix((data,(row,col)),shape=(nlen,nlen), dtype=dtype)
+ else:
+ # symmetrize matrix
+ M = sparse.coo_matrix((data+data,(row+col,col+row)),shape=(nlen,nlen),
+ dtype=dtype)
+ try:
+ return M.asformat(format)
+ except AttributeError:
+ raise nx.NetworkXError("Unknown sparse matrix format: %s"%format)
+
+def from_scipy_sparse_matrix(A,create_using=None):
+ """Return a graph from scipy sparse matrix adjacency list.
+
+ Parameters
+ ----------
+ A : scipy sparse matrix
+ An adjacency matrix representation of a graph
+
+ create_using : NetworkX graph
+ Use specified graph for result. The default is Graph()
+
+ Examples
+ --------
+ >>> import scipy.sparse
+ >>> A=scipy.sparse.eye(2,2,1)
+ >>> G=nx.from_scipy_sparse_matrix(A)
+
+ """
+ G=_prep_create_using(create_using)
+
+ # convert all formats to lil - not the most efficient way
+ AA=A.tolil()
+ n,m=AA.shape
+
+ if n!=m:
+ raise nx.NetworkXError(\
+ "Adjacency matrix is not square. nx,ny=%s"%(A.shape,))
+ G.add_nodes_from(range(n)) # make sure we get isolated nodes
+
+ for i,row in enumerate(AA.rows):
+ for pos,j in enumerate(row):
+ G.add_edge(i,j,**{'weight':AA.data[i][pos]})
+ return G
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import numpy
+ except:
+ raise SkipTest("NumPy not available")
+ try:
+ import scipy
+ except:
+ raise SkipTest("SciPy not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/drawing/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/drawing/__init__.py
new file mode 100644
index 0000000..211457e
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/drawing/__init__.py
@@ -0,0 +1,20 @@
+# graph drawing and interface to graphviz
+import sys
+from networkx.drawing.layout import *
+from networkx.drawing.nx_pylab import *
+
+# graphviz interface
+# prefer pygraphviz/agraph (it's faster)
+from networkx.drawing.nx_agraph import *
+try:
+ import pydot
+ import networkx.drawing.nx_pydot
+ from networkx.drawing.nx_pydot import *
+except ImportError:
+ pass
+try:
+ import pygraphviz
+ from networkx.drawing.nx_agraph import *
+except ImportError:
+ pass
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/drawing/layout.py b/lib/python2.7/site-packages/setoolsgui/networkx/drawing/layout.py
new file mode 100644
index 0000000..671d3b6
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/drawing/layout.py
@@ -0,0 +1,540 @@
+"""
+******
+Layout
+******
+
+Node positioning algorithms for graph drawing.
+"""
+# Copyright (C) 2004-2012 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+__author__ = """Aric Hagberg (hagberg@lanl.gov)\nDan Schult(dschult@colgate.edu)"""
+__all__ = ['circular_layout',
+ 'random_layout',
+ 'shell_layout',
+ 'spring_layout',
+ 'spectral_layout',
+ 'fruchterman_reingold_layout']
+
+def random_layout(G,dim=2):
+ """Position nodes uniformly at random in the unit square.
+
+ For every node, a position is generated by choosing each of dim
+ coordinates uniformly at random on the interval [0.0, 1.0).
+
+ NumPy (http://scipy.org) is required for this function.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ A position will be assigned to every node in G.
+
+ dim : int
+ Dimension of layout.
+
+ Returns
+ -------
+ dict :
+ A dictionary of positions keyed by node
+
+ Examples
+ --------
+ >>> G = nx.lollipop_graph(4, 3)
+ >>> pos = nx.random_layout(G)
+
+ """
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError("random_layout() requires numpy: http://scipy.org/ ")
+ n=len(G)
+ pos=np.asarray(np.random.random((n,dim)),dtype=np.float32)
+ return dict(zip(G,pos))
+
+
+def circular_layout(G, dim=2, scale=1):
+ # dim=2 only
+ """Position nodes on a circle.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ dim : int
+ Dimension of layout, currently only dim=2 is supported
+
+ scale : float
+ Scale factor for positions
+
+ Returns
+ -------
+ dict :
+ A dictionary of positions keyed by node
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> pos=nx.circular_layout(G)
+
+ Notes
+ ------
+ This algorithm currently only works in two dimensions and does not
+ try to minimize edge crossings.
+
+ """
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError("circular_layout() requires numpy: http://scipy.org/ ")
+ if len(G)==0:
+ return {}
+ if len(G)==1:
+ return {G.nodes()[0]:(1,)*dim}
+ t=np.arange(0,2.0*np.pi,2.0*np.pi/len(G),dtype=np.float32)
+ pos=np.transpose(np.array([np.cos(t),np.sin(t)]))
+ pos=_rescale_layout(pos,scale=scale)
+ return dict(zip(G,pos))
+
+def shell_layout(G,nlist=None,dim=2,scale=1):
+ """Position nodes in concentric circles.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ nlist : list of lists
+ List of node lists for each shell.
+
+ dim : int
+ Dimension of layout, currently only dim=2 is supported
+
+ scale : float
+ Scale factor for positions
+
+ Returns
+ -------
+ dict :
+ A dictionary of positions keyed by node
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> shells=[[0],[1,2,3]]
+ >>> pos=nx.shell_layout(G,shells)
+
+ Notes
+ ------
+ This algorithm currently only works in two dimensions and does not
+ try to minimize edge crossings.
+
+ """
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError("shell_layout() requires numpy: http://scipy.org/ ")
+ if len(G)==0:
+ return {}
+ if len(G)==1:
+ return {G.nodes()[0]:(1,)*dim}
+ if nlist==None:
+ nlist=[G.nodes()] # draw the whole graph in one shell
+
+ if len(nlist[0])==1:
+ radius=0.0 # single node at center
+ else:
+ radius=1.0 # else start at r=1
+
+ npos={}
+ for nodes in nlist:
+ t=np.arange(0,2.0*np.pi,2.0*np.pi/len(nodes),dtype=np.float32)
+ pos=np.transpose(np.array([radius*np.cos(t),radius*np.sin(t)]))
+ npos.update(zip(nodes,pos))
+ radius+=1.0
+
+ # FIXME: rescale
+ return npos
+
+
+def fruchterman_reingold_layout(G,dim=2,k=None,
+ pos=None,
+ fixed=None,
+ iterations=50,
+ weight='weight',
+ scale=1.0):
+ """Position nodes using Fruchterman-Reingold force-directed algorithm.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ dim : int
+ Dimension of layout
+
+ k : float (default=None)
+ Optimal distance between nodes. If None the distance is set to
+ 1/sqrt(n) where n is the number of nodes. Increase this value
+ to move nodes farther apart.
+
+
+ pos : dict or None optional (default=None)
+ Initial positions for nodes as a dictionary with node as keys
+ and values as a list or tuple. If None, then nuse random initial
+ positions.
+
+ fixed : list or None optional (default=None)
+ Nodes to keep fixed at initial position.
+
+ iterations : int optional (default=50)
+ Number of iterations of spring-force relaxation
+
+ weight : string or None optional (default='weight')
+ The edge attribute that holds the numerical value used for
+ the edge weight. If None, then all edge weights are 1.
+
+ scale : float (default=1.0)
+ Scale factor for positions. The nodes are positioned
+ in a box of size [0,scale] x [0,scale].
+
+
+ Returns
+ -------
+ dict :
+ A dictionary of positions keyed by node
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> pos=nx.spring_layout(G)
+
+ # The same using longer function name
+ >>> pos=nx.fruchterman_reingold_layout(G)
+ """
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError("fruchterman_reingold_layout() requires numpy: http://scipy.org/ ")
+ if fixed is not None:
+ nfixed=dict(zip(G,range(len(G))))
+ fixed=np.asarray([nfixed[v] for v in fixed])
+
+ if pos is not None:
+ pos_arr=np.asarray(np.random.random((len(G),dim)))
+ for i,n in enumerate(G):
+ if n in pos:
+ pos_arr[i]=np.asarray(pos[n])
+ else:
+ pos_arr=None
+
+ if len(G)==0:
+ return {}
+ if len(G)==1:
+ return {G.nodes()[0]:(1,)*dim}
+
+ try:
+ # Sparse matrix
+ if len(G) < 500: # sparse solver for large graphs
+ raise ValueError
+ A=nx.to_scipy_sparse_matrix(G,weight=weight,dtype='f')
+ pos=_sparse_fruchterman_reingold(A,dim,k,pos_arr,fixed,iterations)
+ except:
+ A=nx.to_numpy_matrix(G,weight=weight)
+ pos=_fruchterman_reingold(A,dim,k,pos_arr,fixed,iterations)
+ if fixed is None:
+ pos=_rescale_layout(pos,scale=scale)
+ return dict(zip(G,pos))
+
+spring_layout=fruchterman_reingold_layout
+
+def _fruchterman_reingold(A, dim=2, k=None, pos=None, fixed=None,
+ iterations=50):
+ # Position nodes in adjacency matrix A using Fruchterman-Reingold
+ # Entry point for NetworkX graph is fruchterman_reingold_layout()
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError("_fruchterman_reingold() requires numpy: http://scipy.org/ ")
+
+ try:
+ nnodes,_=A.shape
+ except AttributeError:
+ raise nx.NetworkXError(
+ "fruchterman_reingold() takes an adjacency matrix as input")
+
+ A=np.asarray(A) # make sure we have an array instead of a matrix
+
+ if pos==None:
+ # random initial positions
+ pos=np.asarray(np.random.random((nnodes,dim)),dtype=A.dtype)
+ else:
+ # make sure positions are of same type as matrix
+ pos=pos.astype(A.dtype)
+
+ # optimal distance between nodes
+ if k is None:
+ k=np.sqrt(1.0/nnodes)
+ # the initial "temperature" is about .1 of domain area (=1x1)
+ # this is the largest step allowed in the dynamics.
+ t=0.1
+ # simple cooling scheme.
+ # linearly step down by dt on each iteration so last iteration is size dt.
+ dt=t/float(iterations+1)
+ delta = np.zeros((pos.shape[0],pos.shape[0],pos.shape[1]),dtype=A.dtype)
+ # the inscrutable (but fast) version
+ # this is still O(V^2)
+ # could use multilevel methods to speed this up significantly
+ for iteration in range(iterations):
+ # matrix of difference between points
+ for i in range(pos.shape[1]):
+ delta[:,:,i]= pos[:,i,None]-pos[:,i]
+ # distance between points
+ distance=np.sqrt((delta**2).sum(axis=-1))
+ # enforce minimum distance of 0.01
+ distance=np.where(distance<0.01,0.01,distance)
+ # displacement "force"
+ displacement=np.transpose(np.transpose(delta)*\
+ (k*k/distance**2-A*distance/k))\
+ .sum(axis=1)
+ # update positions
+ length=np.sqrt((displacement**2).sum(axis=1))
+ length=np.where(length<0.01,0.1,length)
+ delta_pos=np.transpose(np.transpose(displacement)*t/length)
+ if fixed is not None:
+ # don't change positions of fixed nodes
+ delta_pos[fixed]=0.0
+ pos+=delta_pos
+ # cool temperature
+ t-=dt
+ pos=_rescale_layout(pos)
+ return pos
+
+
+def _sparse_fruchterman_reingold(A, dim=2, k=None, pos=None, fixed=None,
+ iterations=50):
+ # Position nodes in adjacency matrix A using Fruchterman-Reingold
+ # Entry point for NetworkX graph is fruchterman_reingold_layout()
+ # Sparse version
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError("_sparse_fruchterman_reingold() requires numpy: http://scipy.org/ ")
+ try:
+ nnodes,_=A.shape
+ except AttributeError:
+ raise nx.NetworkXError(
+ "fruchterman_reingold() takes an adjacency matrix as input")
+ try:
+ from scipy.sparse import spdiags,coo_matrix
+ except ImportError:
+ raise ImportError("_sparse_fruchterman_reingold() scipy numpy: http://scipy.org/ ")
+ # make sure we have a LIst of Lists representation
+ try:
+ A=A.tolil()
+ except:
+ A=(coo_matrix(A)).tolil()
+
+ if pos==None:
+ # random initial positions
+ pos=np.asarray(np.random.random((nnodes,dim)),dtype=A.dtype)
+ else:
+ # make sure positions are of same type as matrix
+ pos=pos.astype(A.dtype)
+
+ # no fixed nodes
+ if fixed==None:
+ fixed=[]
+
+ # optimal distance between nodes
+ if k is None:
+ k=np.sqrt(1.0/nnodes)
+ # the initial "temperature" is about .1 of domain area (=1x1)
+ # this is the largest step allowed in the dynamics.
+ t=0.1
+ # simple cooling scheme.
+ # linearly step down by dt on each iteration so last iteration is size dt.
+ dt=t/float(iterations+1)
+
+ displacement=np.zeros((dim,nnodes))
+ for iteration in range(iterations):
+ displacement*=0
+ # loop over rows
+ for i in range(A.shape[0]):
+ if i in fixed:
+ continue
+ # difference between this row's node position and all others
+ delta=(pos[i]-pos).T
+ # distance between points
+ distance=np.sqrt((delta**2).sum(axis=0))
+ # enforce minimum distance of 0.01
+ distance=np.where(distance<0.01,0.01,distance)
+ # the adjacency matrix row
+ Ai=np.asarray(A.getrowview(i).toarray())
+ # displacement "force"
+ displacement[:,i]+=\
+ (delta*(k*k/distance**2-Ai*distance/k)).sum(axis=1)
+ # update positions
+ length=np.sqrt((displacement**2).sum(axis=0))
+ length=np.where(length<0.01,0.1,length)
+ pos+=(displacement*t/length).T
+ # cool temperature
+ t-=dt
+ pos=_rescale_layout(pos)
+ return pos
+
+
+def spectral_layout(G, dim=2, weight='weight', scale=1):
+ """Position nodes using the eigenvectors of the graph Laplacian.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ dim : int
+ Dimension of layout
+
+ weight : string or None optional (default='weight')
+ The edge attribute that holds the numerical value used for
+ the edge weight. If None, then all edge weights are 1.
+
+ scale : float
+ Scale factor for positions
+
+ Returns
+ -------
+ dict :
+ A dictionary of positions keyed by node
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> pos=nx.spectral_layout(G)
+
+ Notes
+ -----
+ Directed graphs will be considered as unidrected graphs when
+ positioning the nodes.
+
+ For larger graphs (>500 nodes) this will use the SciPy sparse
+ eigenvalue solver (ARPACK).
+ """
+ # handle some special cases that break the eigensolvers
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError("spectral_layout() requires numpy: http://scipy.org/ ")
+ if len(G)<=2:
+ if len(G)==0:
+ pos=np.array([])
+ elif len(G)==1:
+ pos=np.array([[1,1]])
+ else:
+ pos=np.array([[0,0.5],[1,0.5]])
+ return dict(zip(G,pos))
+ try:
+ # Sparse matrix
+ if len(G)< 500: # dense solver is faster for small graphs
+ raise ValueError
+ A=nx.to_scipy_sparse_matrix(G, weight=weight,dtype='f')
+ # Symmetrize directed graphs
+ if G.is_directed():
+ A=A+np.transpose(A)
+ pos=_sparse_spectral(A,dim)
+ except (ImportError,ValueError):
+ # Dense matrix
+ A=nx.to_numpy_matrix(G, weight=weight)
+ # Symmetrize directed graphs
+ if G.is_directed():
+ A=A+np.transpose(A)
+ pos=_spectral(A,dim)
+
+ pos=_rescale_layout(pos,scale)
+ return dict(zip(G,pos))
+
+
+def _spectral(A, dim=2):
+ # Input adjacency matrix A
+ # Uses dense eigenvalue solver from numpy
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError("spectral_layout() requires numpy: http://scipy.org/ ")
+ try:
+ nnodes,_=A.shape
+ except AttributeError:
+ raise nx.NetworkXError(\
+ "spectral() takes an adjacency matrix as input")
+
+ # form Laplacian matrix
+ # make sure we have an array instead of a matrix
+ A=np.asarray(A)
+ I=np.identity(nnodes,dtype=A.dtype)
+ D=I*np.sum(A,axis=1) # diagonal of degrees
+ L=D-A
+
+ eigenvalues,eigenvectors=np.linalg.eig(L)
+ # sort and keep smallest nonzero
+ index=np.argsort(eigenvalues)[1:dim+1] # 0 index is zero eigenvalue
+ return np.real(eigenvectors[:,index])
+
+def _sparse_spectral(A,dim=2):
+ # Input adjacency matrix A
+ # Uses sparse eigenvalue solver from scipy
+ # Could use multilevel methods here, see Koren "On spectral graph drawing"
+ try:
+ import numpy as np
+ from scipy.sparse import spdiags
+ except ImportError:
+ raise ImportError("_sparse_spectral() requires scipy & numpy: http://scipy.org/ ")
+ try:
+ from scipy.sparse.linalg.eigen import eigsh
+ except ImportError:
+ # scipy <0.9.0 names eigsh differently
+ from scipy.sparse.linalg import eigen_symmetric as eigsh
+ try:
+ nnodes,_=A.shape
+ except AttributeError:
+ raise nx.NetworkXError(\
+ "sparse_spectral() takes an adjacency matrix as input")
+
+ # form Laplacian matrix
+ data=np.asarray(A.sum(axis=1).T)
+ D=spdiags(data,0,nnodes,nnodes)
+ L=D-A
+
+ k=dim+1
+ # number of Lanczos vectors for ARPACK solver.What is the right scaling?
+ ncv=max(2*k+1,int(np.sqrt(nnodes)))
+ # return smallest k eigenvalues and eigenvectors
+ eigenvalues,eigenvectors=eigsh(L,k,which='SM',ncv=ncv)
+ index=np.argsort(eigenvalues)[1:k] # 0 index is zero eigenvalue
+ return np.real(eigenvectors[:,index])
+
+
+def _rescale_layout(pos,scale=1):
+ # rescale to (0,pscale) in all axes
+
+ # shift origin to (0,0)
+ lim=0 # max coordinate for all axes
+ for i in range(pos.shape[1]):
+ pos[:,i]-=pos[:,i].min()
+ lim=max(pos[:,i].max(),lim)
+ # rescale to (0,scale) in all directions, preserves aspect
+ for i in range(pos.shape[1]):
+ pos[:,i]*=scale/lim
+ return pos
+
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import numpy
+ except:
+ raise SkipTest("NumPy not available")
+ try:
+ import scipy
+ except:
+ raise SkipTest("SciPy not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/drawing/nx_agraph.py b/lib/python2.7/site-packages/setoolsgui/networkx/drawing/nx_agraph.py
new file mode 100644
index 0000000..ff61253
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/drawing/nx_agraph.py
@@ -0,0 +1,447 @@
+"""
+***************
+Graphviz AGraph
+***************
+
+Interface to pygraphviz AGraph class.
+
+Examples
+--------
+>>> G=nx.complete_graph(5)
+>>> A=nx.to_agraph(G)
+>>> H=nx.from_agraph(A)
+
+See Also
+--------
+Pygraphviz: http://networkx.lanl.gov/pygraphviz
+"""
+# Copyright (C) 2004-2012 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import os
+import sys
+import tempfile
+import networkx as nx
+__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
+__all__ = ['from_agraph', 'to_agraph',
+ 'write_dot', 'read_dot',
+ 'graphviz_layout',
+ 'pygraphviz_layout',
+ 'view_pygraphviz']
+
+def from_agraph(A,create_using=None):
+ """Return a NetworkX Graph or DiGraph from a PyGraphviz graph.
+
+ Parameters
+ ----------
+ A : PyGraphviz AGraph
+ A graph created with PyGraphviz
+
+ create_using : NetworkX graph class instance
+ The output is created using the given graph class instance
+
+ Examples
+ --------
+ >>> K5=nx.complete_graph(5)
+ >>> A=nx.to_agraph(K5)
+ >>> G=nx.from_agraph(A)
+ >>> G=nx.from_agraph(A)
+
+
+ Notes
+ -----
+ The Graph G will have a dictionary G.graph_attr containing
+ the default graphviz attributes for graphs, nodes and edges.
+
+ Default node attributes will be in the dictionary G.node_attr
+ which is keyed by node.
+
+ Edge attributes will be returned as edge data in G. With
+ edge_attr=False the edge data will be the Graphviz edge weight
+ attribute or the value 1 if no edge weight attribute is found.
+
+ """
+ if create_using is None:
+ if A.is_directed():
+ if A.is_strict():
+ create_using=nx.DiGraph()
+ else:
+ create_using=nx.MultiDiGraph()
+ else:
+ if A.is_strict():
+ create_using=nx.Graph()
+ else:
+ create_using=nx.MultiGraph()
+
+ # assign defaults
+ N=nx.empty_graph(0,create_using)
+ N.name=''
+ if A.name is not None:
+ N.name=A.name
+
+ # add nodes, attributes to N.node_attr
+ for n in A.nodes():
+ str_attr=dict((str(k),v) for k,v in n.attr.items())
+ N.add_node(str(n),**str_attr)
+
+ # add edges, assign edge data as dictionary of attributes
+ for e in A.edges():
+ u,v=str(e[0]),str(e[1])
+ attr=dict(e.attr)
+ str_attr=dict((str(k),v) for k,v in attr.items())
+ if not N.is_multigraph():
+ if e.name is not None:
+ str_attr['key']=e.name
+ N.add_edge(u,v,**str_attr)
+ else:
+ N.add_edge(u,v,key=e.name,**str_attr)
+
+ # add default attributes for graph, nodes, and edges
+ # hang them on N.graph_attr
+ N.graph['graph']=dict(A.graph_attr)
+ N.graph['node']=dict(A.node_attr)
+ N.graph['edge']=dict(A.edge_attr)
+ return N
+
+def to_agraph(N):
+ """Return a pygraphviz graph from a NetworkX graph N.
+
+ Parameters
+ ----------
+ N : NetworkX graph
+ A graph created with NetworkX
+
+ Examples
+ --------
+ >>> K5=nx.complete_graph(5)
+ >>> A=nx.to_agraph(K5)
+
+ Notes
+ -----
+ If N has an dict N.graph_attr an attempt will be made first
+ to copy properties attached to the graph (see from_agraph)
+ and then updated with the calling arguments if any.
+
+ """
+ try:
+ import pygraphviz
+ except ImportError:
+ raise ImportError('requires pygraphviz ',
+ 'http://networkx.lanl.gov/pygraphviz ',
+ '(not available for Python3)')
+ directed=N.is_directed()
+ strict=N.number_of_selfloops()==0 and not N.is_multigraph()
+ A=pygraphviz.AGraph(name=N.name,strict=strict,directed=directed)
+
+ # default graph attributes
+ A.graph_attr.update(N.graph.get('graph',{}))
+ A.node_attr.update(N.graph.get('node',{}))
+ A.edge_attr.update(N.graph.get('edge',{}))
+
+ # add nodes
+ for n,nodedata in N.nodes(data=True):
+ A.add_node(n,**nodedata)
+
+ # loop over edges
+
+ if N.is_multigraph():
+ for u,v,key,edgedata in N.edges_iter(data=True,keys=True):
+ str_edgedata=dict((k,str(v)) for k,v in edgedata.items())
+ A.add_edge(u,v,key=str(key),**str_edgedata)
+ else:
+ for u,v,edgedata in N.edges_iter(data=True):
+ str_edgedata=dict((k,str(v)) for k,v in edgedata.items())
+ A.add_edge(u,v,**str_edgedata)
+
+
+ return A
+
+def write_dot(G,path):
+ """Write NetworkX graph G to Graphviz dot format on path.
+
+ Parameters
+ ----------
+ G : graph
+ A networkx graph
+ path : filename
+ Filename or file handle to write
+ """
+ try:
+ import pygraphviz
+ except ImportError:
+ raise ImportError('requires pygraphviz ',
+ 'http://networkx.lanl.gov/pygraphviz ',
+ '(not available for Python3)')
+ A=to_agraph(G)
+ A.write(path)
+ A.clear()
+ return
+
+def read_dot(path):
+ """Return a NetworkX graph from a dot file on path.
+
+ Parameters
+ ----------
+ path : file or string
+ File name or file handle to read.
+ """
+ try:
+ import pygraphviz
+ except ImportError:
+ raise ImportError('read_dot() requires pygraphviz ',
+ 'http://networkx.lanl.gov/pygraphviz ',
+ '(not available for Python3)')
+ A=pygraphviz.AGraph(file=path)
+ return from_agraph(A)
+
+
+def graphviz_layout(G,prog='neato',root=None, args=''):
+ """Create node positions for G using Graphviz.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ A graph created with NetworkX
+ prog : string
+ Name of Graphviz layout program
+ root : string, optional
+ Root node for twopi layout
+ args : string, optional
+ Extra arguments to Graphviz layout program
+
+ Returns : dictionary
+ Dictionary of x,y, positions keyed by node.
+
+ Examples
+ --------
+ >>> G=nx.petersen_graph()
+ >>> pos=nx.graphviz_layout(G)
+ >>> pos=nx.graphviz_layout(G,prog='dot')
+
+ Notes
+ -----
+ This is a wrapper for pygraphviz_layout.
+
+ """
+ return pygraphviz_layout(G,prog=prog,root=root,args=args)
+
+def pygraphviz_layout(G,prog='neato',root=None, args=''):
+ """Create node positions for G using Graphviz.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ A graph created with NetworkX
+ prog : string
+ Name of Graphviz layout program
+ root : string, optional
+ Root node for twopi layout
+ args : string, optional
+ Extra arguments to Graphviz layout program
+
+ Returns : dictionary
+ Dictionary of x,y, positions keyed by node.
+
+ Examples
+ --------
+ >>> G=nx.petersen_graph()
+ >>> pos=nx.graphviz_layout(G)
+ >>> pos=nx.graphviz_layout(G,prog='dot')
+
+ """
+ try:
+ import pygraphviz
+ except ImportError:
+ raise ImportError('requires pygraphviz ',
+ 'http://networkx.lanl.gov/pygraphviz ',
+ '(not available for Python3)')
+ if root is not None:
+ args+="-Groot=%s"%root
+ A=to_agraph(G)
+ A.layout(prog=prog,args=args)
+ node_pos={}
+ for n in G:
+ node=pygraphviz.Node(A,n)
+ try:
+ xx,yy=node.attr["pos"].split(',')
+ node_pos[n]=(float(xx),float(yy))
+ except:
+ print("no position for node",n)
+ node_pos[n]=(0.0,0.0)
+ return node_pos
+
+@nx.utils.open_file(5, 'w')
+def view_pygraphviz(G, edgelabel=None, prog='dot', args='',
+ suffix='', path=None):
+ """Views the graph G using the specified layout algorithm.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ The machine to draw.
+ edgelabel : str, callable, None
+ If a string, then it specifes the edge attribute to be displayed
+ on the edge labels. If a callable, then it is called for each
+ edge and it should return the string to be displayed on the edges.
+ The function signature of `edgelabel` should be edgelabel(data),
+ where `data` is the edge attribute dictionary.
+ prog : string
+ Name of Graphviz layout program.
+ args : str
+ Additional arguments to pass to the Graphviz layout program.
+ suffix : str
+ If `filename` is None, we save to a temporary file. The value of
+ `suffix` will appear at the tail end of the temporary filename.
+ path : str, None
+ The filename used to save the image. If None, save to a temporary
+ file. File formats are the same as those from pygraphviz.agraph.draw.
+
+ Returns
+ -------
+ path : str
+ The filename of the generated image.
+ A : PyGraphviz graph
+ The PyGraphviz graph instance used to generate the image.
+
+ Notes
+ -----
+ If this function is called in succession too quickly, sometimes the
+ image is not displayed. So you might consider time.sleep(.5) between
+ calls if you experience problems.
+
+ """
+ if not len(G):
+ raise nx.NetworkXException("An empty graph cannot be drawn.")
+
+ import pygraphviz
+
+ # If we are providing default values for graphviz, these must be set
+ # before any nodes or edges are added to the PyGraphviz graph object.
+ # The reason for this is that default values only affect incoming objects.
+ # If you change the default values after the objects have been added,
+ # then they inherit no value and are set only if explicitly set.
+
+ # to_agraph() uses these values.
+ attrs = ['edge', 'node', 'graph']
+ for attr in attrs:
+ if attr not in G.graph:
+ G.graph[attr] = {}
+
+ # These are the default values.
+ edge_attrs = {'fontsize': '10'}
+ node_attrs = {'style': 'filled',
+ 'fillcolor': '#0000FF40',
+ 'height': '0.75',
+ 'width': '0.75',
+ 'shape': 'circle'}
+ graph_attrs = {}
+
+ def update_attrs(which, attrs):
+ # Update graph attributes. Return list of those which were added.
+ added = []
+ for k,v in attrs.items():
+ if k not in G.graph[which]:
+ G.graph[which][k] = v
+ added.append(k)
+
+ def clean_attrs(which, added):
+ # Remove added attributes
+ for attr in added:
+ del G.graph[which][attr]
+ if not G.graph[which]:
+ del G.graph[which]
+
+ # Update all default values
+ update_attrs('edge', edge_attrs)
+ update_attrs('node', node_attrs)
+ update_attrs('graph', graph_attrs)
+
+ # Convert to agraph, so we inherit default values
+ A = to_agraph(G)
+
+ # Remove the default values we added to the original graph.
+ clean_attrs('edge', edge_attrs)
+ clean_attrs('node', node_attrs)
+ clean_attrs('graph', graph_attrs)
+
+ # If the user passed in an edgelabel, we update the labels for all edges.
+ if edgelabel is not None:
+ if not hasattr(edgelabel, '__call__'):
+ def func(data):
+ return ''.join([" ", str(data[edgelabel]), " "])
+ else:
+ func = edgelabel
+
+ # update all the edge labels
+ if G.is_multigraph():
+ for u,v,key,data in G.edges_iter(keys=True, data=True):
+ # PyGraphviz doesn't convert the key to a string. See #339
+ edge = A.get_edge(u,v,str(key))
+ edge.attr['label'] = str(func(data))
+ else:
+ for u,v,data in G.edges_iter(data=True):
+ edge = A.get_edge(u,v)
+ edge.attr['label'] = str(func(data))
+
+ if path is None:
+ ext = 'png'
+ if suffix:
+ suffix = '_%s.%s' % (suffix, ext)
+ else:
+ suffix = '.%s' % (ext,)
+ path = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
+ else:
+ # Assume the decorator worked and it is a file-object.
+ pass
+
+ display_pygraphviz(A, path=path, prog=prog, args=args)
+
+ return path.name, A
+
+def display_pygraphviz(graph, path, format=None, prog=None, args=''):
+ """Internal function to display a graph in OS dependent manner.
+
+ Parameters
+ ----------
+ graph : PyGraphviz graph
+ A PyGraphviz AGraph instance.
+ path : file object
+ An already opened file object that will be closed.
+ format : str, None
+ An attempt is made to guess the output format based on the extension
+ of the filename. If that fails, the value of `format` is used.
+ prog : string
+ Name of Graphviz layout program.
+ args : str
+ Additional arguments to pass to the Graphviz layout program.
+
+ Notes
+ -----
+ If this function is called in succession too quickly, sometimes the
+ image is not displayed. So you might consider time.sleep(.5) between
+ calls if you experience problems.
+
+ """
+ if format is None:
+ filename = path.name
+ format = os.path.splitext(filename)[1].lower()[1:]
+ if not format:
+ # Let the draw() function use its default
+ format = None
+
+ # Save to a file and display in the default viewer.
+ # We must close the file before viewing it.
+ graph.draw(path, format, prog, args)
+ path.close()
+ nx.utils.default_opener(filename)
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import pygraphviz
+ except:
+ raise SkipTest("pygraphviz not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/drawing/nx_pydot.py b/lib/python2.7/site-packages/setoolsgui/networkx/drawing/nx_pydot.py
new file mode 100644
index 0000000..183f6ab
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/drawing/nx_pydot.py
@@ -0,0 +1,287 @@
+"""
+*****
+Pydot
+*****
+
+Import and export NetworkX graphs in Graphviz dot format using pydot.
+
+Either this module or nx_pygraphviz can be used to interface with graphviz.
+
+See Also
+--------
+Pydot: http://code.google.com/p/pydot/
+Graphviz: http://www.research.att.com/sw/tools/graphviz/
+DOT Language: http://www.graphviz.org/doc/info/lang.html
+"""
+# Copyright (C) 2004-2013 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+from networkx.utils import open_file, make_str
+import networkx as nx
+__author__ = """Aric Hagberg (aric.hagberg@gmail.com)"""
+__all__ = ['write_dot', 'read_dot', 'graphviz_layout', 'pydot_layout',
+ 'to_pydot', 'from_pydot']
+
+@open_file(1,mode='w')
+def write_dot(G,path):
+ """Write NetworkX graph G to Graphviz dot format on path.
+
+ Path can be a string or a file handle.
+ """
+ try:
+ import pydot
+ except ImportError:
+ raise ImportError("write_dot() requires pydot",
+ "http://code.google.com/p/pydot/")
+ P=to_pydot(G)
+ path.write(P.to_string())
+ return
+
+@open_file(0,mode='r')
+def read_dot(path):
+ """Return a NetworkX MultiGraph or MultiDiGraph from a dot file on path.
+
+ Parameters
+ ----------
+ path : filename or file handle
+
+ Returns
+ -------
+ G : NetworkX multigraph
+ A MultiGraph or MultiDiGraph.
+
+ Notes
+ -----
+ Use G=nx.Graph(nx.read_dot(path)) to return a Graph instead of a MultiGraph.
+ """
+ try:
+ import pydot
+ except ImportError:
+ raise ImportError("read_dot() requires pydot",
+ "http://code.google.com/p/pydot/")
+
+ data=path.read()
+ P=pydot.graph_from_dot_data(data)
+ return from_pydot(P)
+
+def from_pydot(P):
+ """Return a NetworkX graph from a Pydot graph.
+
+ Parameters
+ ----------
+ P : Pydot graph
+ A graph created with Pydot
+
+ Returns
+ -------
+ G : NetworkX multigraph
+ A MultiGraph or MultiDiGraph.
+
+ Examples
+ --------
+ >>> K5=nx.complete_graph(5)
+ >>> A=nx.to_pydot(K5)
+ >>> G=nx.from_pydot(A) # return MultiGraph
+ >>> G=nx.Graph(nx.from_pydot(A)) # make a Graph instead of MultiGraph
+
+ """
+ if P.get_strict(None): # pydot bug: get_strict() shouldn't take argument
+ multiedges=False
+ else:
+ multiedges=True
+
+ if P.get_type()=='graph': # undirected
+ if multiedges:
+ create_using=nx.MultiGraph()
+ else:
+ create_using=nx.Graph()
+ else:
+ if multiedges:
+ create_using=nx.MultiDiGraph()
+ else:
+ create_using=nx.DiGraph()
+
+ # assign defaults
+ N=nx.empty_graph(0,create_using)
+ N.name=P.get_name()
+
+ # add nodes, attributes to N.node_attr
+ for p in P.get_node_list():
+ n=p.get_name().strip('"')
+ if n in ('node','graph','edge'):
+ continue
+ N.add_node(n,**p.get_attributes())
+
+ # add edges
+ for e in P.get_edge_list():
+ u=e.get_source().strip('"')
+ v=e.get_destination().strip('"')
+ attr=e.get_attributes()
+ N.add_edge(u,v,**attr)
+
+ # add default attributes for graph, nodes, edges
+ N.graph['graph']=P.get_attributes()
+ try:
+ N.graph['node']=P.get_node_defaults()[0]
+ except:# IndexError,TypeError:
+ N.graph['node']={}
+ try:
+ N.graph['edge']=P.get_edge_defaults()[0]
+ except:# IndexError,TypeError:
+ N.graph['edge']={}
+ return N
+
+def to_pydot(N, strict=True):
+ """Return a pydot graph from a NetworkX graph N.
+
+ Parameters
+ ----------
+ N : NetworkX graph
+ A graph created with NetworkX
+
+ Examples
+ --------
+ >>> K5=nx.complete_graph(5)
+ >>> P=nx.to_pydot(K5)
+
+ Notes
+ -----
+
+ """
+ try:
+ import pydot
+ except ImportError:
+ raise ImportError('to_pydot() requires pydot: '
+ 'http://code.google.com/p/pydot/')
+
+ # set Graphviz graph type
+ if N.is_directed():
+ graph_type='digraph'
+ else:
+ graph_type='graph'
+ strict=N.number_of_selfloops()==0 and not N.is_multigraph()
+
+ name = N.graph.get('name')
+ graph_defaults=N.graph.get('graph',{})
+ if name is None:
+ P = pydot.Dot(graph_type=graph_type,strict=strict,**graph_defaults)
+ else:
+ P = pydot.Dot('"%s"'%name,graph_type=graph_type,strict=strict,
+ **graph_defaults)
+ try:
+ P.set_node_defaults(**N.graph['node'])
+ except KeyError:
+ pass
+ try:
+ P.set_edge_defaults(**N.graph['edge'])
+ except KeyError:
+ pass
+
+ for n,nodedata in N.nodes_iter(data=True):
+ str_nodedata=dict((k,make_str(v)) for k,v in nodedata.items())
+ p=pydot.Node(make_str(n),**str_nodedata)
+ P.add_node(p)
+
+ if N.is_multigraph():
+ for u,v,key,edgedata in N.edges_iter(data=True,keys=True):
+ str_edgedata=dict((k,make_str(v)) for k,v in edgedata.items())
+ edge=pydot.Edge(make_str(u),make_str(v),key=make_str(key),**str_edgedata)
+ P.add_edge(edge)
+
+ else:
+ for u,v,edgedata in N.edges_iter(data=True):
+ str_edgedata=dict((k,make_str(v)) for k,v in edgedata.items())
+ edge=pydot.Edge(make_str(u),make_str(v),**str_edgedata)
+ P.add_edge(edge)
+ return P
+
+
+def pydot_from_networkx(N):
+ """Create a Pydot graph from a NetworkX graph."""
+ from warnings import warn
+ warn('pydot_from_networkx is replaced by to_pydot', DeprecationWarning)
+ return to_pydot(N)
+
+def networkx_from_pydot(D, create_using=None):
+ """Create a NetworkX graph from a Pydot graph."""
+ from warnings import warn
+ warn('networkx_from_pydot is replaced by from_pydot',
+ DeprecationWarning)
+ return from_pydot(D)
+
+def graphviz_layout(G,prog='neato',root=None, **kwds):
+ """Create node positions using Pydot and Graphviz.
+
+ Returns a dictionary of positions keyed by node.
+
+ Examples
+ --------
+ >>> G=nx.complete_graph(4)
+ >>> pos=nx.graphviz_layout(G)
+ >>> pos=nx.graphviz_layout(G,prog='dot')
+
+ Notes
+ -----
+ This is a wrapper for pydot_layout.
+ """
+ return pydot_layout(G=G,prog=prog,root=root,**kwds)
+
+
+def pydot_layout(G,prog='neato',root=None, **kwds):
+ """Create node positions using Pydot and Graphviz.
+
+ Returns a dictionary of positions keyed by node.
+
+ Examples
+ --------
+ >>> G=nx.complete_graph(4)
+ >>> pos=nx.pydot_layout(G)
+ >>> pos=nx.pydot_layout(G,prog='dot')
+ """
+ try:
+ import pydot
+ except ImportError:
+ raise ImportError('pydot_layout() requires pydot ',
+ 'http://code.google.com/p/pydot/')
+
+ P=to_pydot(G)
+ if root is not None :
+ P.set("root",make_str(root))
+
+ D=P.create_dot(prog=prog)
+
+ if D=="": # no data returned
+ print("Graphviz layout with %s failed"%(prog))
+ print()
+ print("To debug what happened try:")
+ print("P=pydot_from_networkx(G)")
+ print("P.write_dot(\"file.dot\")")
+ print("And then run %s on file.dot"%(prog))
+ return
+
+ Q=pydot.graph_from_dot_data(D)
+
+ node_pos={}
+ for n in G.nodes():
+ pydot_node = pydot.Node(make_str(n)).get_name().encode('utf-8')
+ node=Q.get_node(pydot_node)
+
+ if isinstance(node,list):
+ node=node[0]
+ pos=node.get_pos()[1:-1] # strip leading and trailing double quotes
+ if pos != None:
+ xx,yy=pos.split(",")
+ node_pos[n]=(float(xx),float(yy))
+ return node_pos
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import pydot
+ import dot_parser
+ except:
+ raise SkipTest("pydot not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/drawing/nx_pylab.py b/lib/python2.7/site-packages/setoolsgui/networkx/drawing/nx_pylab.py
new file mode 100644
index 0000000..c7d0cf6
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/drawing/nx_pylab.py
@@ -0,0 +1,896 @@
+"""
+**********
+Matplotlib
+**********
+
+Draw networks with matplotlib.
+
+See Also
+--------
+
+matplotlib: http://matplotlib.sourceforge.net/
+
+pygraphviz: http://networkx.lanl.gov/pygraphviz/
+
+"""
+# Copyright (C) 2004-2012 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+from networkx.drawing.layout import shell_layout,\
+ circular_layout,spectral_layout,spring_layout,random_layout
+__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
+__all__ = ['draw',
+ 'draw_networkx',
+ 'draw_networkx_nodes',
+ 'draw_networkx_edges',
+ 'draw_networkx_labels',
+ 'draw_networkx_edge_labels',
+ 'draw_circular',
+ 'draw_random',
+ 'draw_spectral',
+ 'draw_spring',
+ 'draw_shell',
+ 'draw_graphviz']
+
+def draw(G, pos=None, ax=None, hold=None, **kwds):
+ """Draw the graph G with Matplotlib.
+
+ Draw the graph as a simple representation with no node
+ labels or edge labels and using the full Matplotlib figure area
+ and no axis labels by default. See draw_networkx() for more
+ full-featured drawing that allows title, axis labels etc.
+
+ Parameters
+ ----------
+ G : graph
+ A networkx graph
+
+ pos : dictionary, optional
+ A dictionary with nodes as keys and positions as values.
+ If not specified a spring layout positioning will be computed.
+ See networkx.layout for functions that compute node positions.
+
+ ax : Matplotlib Axes object, optional
+ Draw the graph in specified Matplotlib axes.
+
+ hold : bool, optional
+ Set the Matplotlib hold state. If True subsequent draw
+ commands will be added to the current axes.
+
+ **kwds : optional keywords
+ See networkx.draw_networkx() for a description of optional keywords.
+
+ Examples
+ --------
+ >>> G=nx.dodecahedral_graph()
+ >>> nx.draw(G)
+ >>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
+
+ See Also
+ --------
+ draw_networkx()
+ draw_networkx_nodes()
+ draw_networkx_edges()
+ draw_networkx_labels()
+ draw_networkx_edge_labels()
+
+ Notes
+ -----
+ This function has the same name as pylab.draw and pyplot.draw
+ so beware when using
+
+ >>> from networkx import *
+
+ since you might overwrite the pylab.draw function.
+
+ With pyplot use
+
+ >>> import matplotlib.pyplot as plt
+ >>> import networkx as nx
+ >>> G=nx.dodecahedral_graph()
+ >>> nx.draw(G) # networkx draw()
+ >>> plt.draw() # pyplot draw()
+
+ Also see the NetworkX drawing examples at
+ http://networkx.lanl.gov/gallery.html
+ """
+ try:
+ import matplotlib.pyplot as plt
+ except ImportError:
+ raise ImportError("Matplotlib required for draw()")
+ except RuntimeError:
+ print("Matplotlib unable to open display")
+ raise
+
+ if ax is None:
+ cf = plt.gcf()
+ else:
+ cf = ax.get_figure()
+ cf.set_facecolor('w')
+ if ax is None:
+ if cf._axstack() is None:
+ ax=cf.add_axes((0,0,1,1))
+ else:
+ ax=cf.gca()
+
+ # allow callers to override the hold state by passing hold=True|False
+ b = plt.ishold()
+ h = kwds.pop('hold', None)
+ if h is not None:
+ plt.hold(h)
+ try:
+ draw_networkx(G,pos=pos,ax=ax,**kwds)
+ ax.set_axis_off()
+ plt.draw_if_interactive()
+ except:
+ plt.hold(b)
+ raise
+ plt.hold(b)
+ return
+
+
+def draw_networkx(G, pos=None, with_labels=True, **kwds):
+ """Draw the graph G using Matplotlib.
+
+ Draw the graph with Matplotlib with options for node positions,
+ labeling, titles, and many other drawing features.
+ See draw() for simple drawing without labels or axes.
+
+ Parameters
+ ----------
+ G : graph
+ A networkx graph
+
+ pos : dictionary, optional
+ A dictionary with nodes as keys and positions as values.
+ If not specified a spring layout positioning will be computed.
+ See networkx.layout for functions that compute node positions.
+
+ with_labels : bool, optional (default=True)
+ Set to True to draw labels on the nodes.
+
+ ax : Matplotlib Axes object, optional
+ Draw the graph in the specified Matplotlib axes.
+
+ nodelist : list, optional (default G.nodes())
+ Draw only specified nodes
+
+ edgelist : list, optional (default=G.edges())
+ Draw only specified edges
+
+ node_size : scalar or array, optional (default=300)
+ Size of nodes. If an array is specified it must be the
+ same length as nodelist.
+
+ node_color : color string, or array of floats, (default='r')
+ Node color. Can be a single color format string,
+ or a sequence of colors with the same length as nodelist.
+ If numeric values are specified they will be mapped to
+ colors using the cmap and vmin,vmax parameters. See
+ matplotlib.scatter for more details.
+
+ node_shape : string, optional (default='o')
+ The shape of the node. Specification is as matplotlib.scatter
+ marker, one of 'so^>v<dph8'.
+
+ alpha : float, optional (default=1.0)
+ The node transparency
+
+ cmap : Matplotlib colormap, optional (default=None)
+ Colormap for mapping intensities of nodes
+
+ vmin,vmax : float, optional (default=None)
+ Minimum and maximum for node colormap scaling
+
+ linewidths : [None | scalar | sequence]
+ Line width of symbol border (default =1.0)
+
+ width : float, optional (default=1.0)
+ Line width of edges
+
+ edge_color : color string, or array of floats (default='r')
+ Edge color. Can be a single color format string,
+ or a sequence of colors with the same length as edgelist.
+ If numeric values are specified they will be mapped to
+ colors using the edge_cmap and edge_vmin,edge_vmax parameters.
+
+ edge_ cmap : Matplotlib colormap, optional (default=None)
+ Colormap for mapping intensities of edges
+
+ edge_vmin,edge_vmax : floats, optional (default=None)
+ Minimum and maximum for edge colormap scaling
+
+ style : string, optional (default='solid')
+ Edge line style (solid|dashed|dotted,dashdot)
+
+ labels : dictionary, optional (default=None)
+ Node labels in a dictionary keyed by node of text labels
+
+ font_size : int, optional (default=12)
+ Font size for text labels
+
+ font_color : string, optional (default='k' black)
+ Font color string
+
+ font_weight : string, optional (default='normal')
+ Font weight
+
+ font_family : string, optional (default='sans-serif')
+ Font family
+
+ label : string, optional
+ Label for graph legend
+
+ Examples
+ --------
+ >>> G=nx.dodecahedral_graph()
+ >>> nx.draw(G)
+ >>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
+
+ >>> import matplotlib.pyplot as plt
+ >>> limits=plt.axis('off') # turn of axis
+
+ Also see the NetworkX drawing examples at
+ http://networkx.lanl.gov/gallery.html
+
+ See Also
+ --------
+ draw()
+ draw_networkx_nodes()
+ draw_networkx_edges()
+ draw_networkx_labels()
+ draw_networkx_edge_labels()
+ """
+ try:
+ import matplotlib.pyplot as plt
+ except ImportError:
+ raise ImportError("Matplotlib required for draw()")
+ except RuntimeError:
+ print("Matplotlib unable to open display")
+ raise
+
+ if pos is None:
+ pos=nx.drawing.spring_layout(G) # default to spring layout
+
+ node_collection=draw_networkx_nodes(G, pos, **kwds)
+ edge_collection=draw_networkx_edges(G, pos, **kwds)
+ if with_labels:
+ draw_networkx_labels(G, pos, **kwds)
+ plt.draw_if_interactive()
+
+def draw_networkx_nodes(G, pos,
+ nodelist=None,
+ node_size=300,
+ node_color='r',
+ node_shape='o',
+ alpha=1.0,
+ cmap=None,
+ vmin=None,
+ vmax=None,
+ ax=None,
+ linewidths=None,
+ label = None,
+ **kwds):
+ """Draw the nodes of the graph G.
+
+ This draws only the nodes of the graph G.
+
+ Parameters
+ ----------
+ G : graph
+ A networkx graph
+
+ pos : dictionary
+ A dictionary with nodes as keys and positions as values.
+ If not specified a spring layout positioning will be computed.
+ See networkx.layout for functions that compute node positions.
+
+ ax : Matplotlib Axes object, optional
+ Draw the graph in the specified Matplotlib axes.
+
+ nodelist : list, optional
+ Draw only specified nodes (default G.nodes())
+
+ node_size : scalar or array
+ Size of nodes (default=300). If an array is specified it must be the
+ same length as nodelist.
+
+ node_color : color string, or array of floats
+ Node color. Can be a single color format string (default='r'),
+ or a sequence of colors with the same length as nodelist.
+ If numeric values are specified they will be mapped to
+ colors using the cmap and vmin,vmax parameters. See
+ matplotlib.scatter for more details.
+
+ node_shape : string
+ The shape of the node. Specification is as matplotlib.scatter
+ marker, one of 'so^>v<dph8' (default='o').
+
+ alpha : float
+ The node transparency (default=1.0)
+
+ cmap : Matplotlib colormap
+ Colormap for mapping intensities of nodes (default=None)
+
+ vmin,vmax : floats
+ Minimum and maximum for node colormap scaling (default=None)
+
+ linewidths : [None | scalar | sequence]
+ Line width of symbol border (default =1.0)
+
+ label : [None| string]
+ Label for legend
+
+ Examples
+ --------
+ >>> G=nx.dodecahedral_graph()
+ >>> nodes=nx.draw_networkx_nodes(G,pos=nx.spring_layout(G))
+
+ Also see the NetworkX drawing examples at
+ http://networkx.lanl.gov/gallery.html
+
+ See Also
+ --------
+ draw()
+ draw_networkx()
+ draw_networkx_edges()
+ draw_networkx_labels()
+ draw_networkx_edge_labels()
+ """
+ try:
+ import matplotlib.pyplot as plt
+ import numpy
+ except ImportError:
+ raise ImportError("Matplotlib required for draw()")
+ except RuntimeError:
+ print("Matplotlib unable to open display")
+ raise
+
+
+ if ax is None:
+ ax=plt.gca()
+
+ if nodelist is None:
+ nodelist=G.nodes()
+
+ if not nodelist or len(nodelist)==0: # empty nodelist, no drawing
+ return None
+
+ try:
+ xy=numpy.asarray([pos[v] for v in nodelist])
+ except KeyError as e:
+ raise nx.NetworkXError('Node %s has no position.'%e)
+ except ValueError:
+ raise nx.NetworkXError('Bad value in node positions.')
+
+
+
+ node_collection=ax.scatter(xy[:,0], xy[:,1],
+ s=node_size,
+ c=node_color,
+ marker=node_shape,
+ cmap=cmap,
+ vmin=vmin,
+ vmax=vmax,
+ alpha=alpha,
+ linewidths=linewidths,
+ label=label)
+
+ node_collection.set_zorder(2)
+ return node_collection
+
+
+def draw_networkx_edges(G, pos,
+ edgelist=None,
+ width=1.0,
+ edge_color='k',
+ style='solid',
+ alpha=None,
+ edge_cmap=None,
+ edge_vmin=None,
+ edge_vmax=None,
+ ax=None,
+ arrows=True,
+ label=None,
+ **kwds):
+ """Draw the edges of the graph G.
+
+ This draws only the edges of the graph G.
+
+ Parameters
+ ----------
+ G : graph
+ A networkx graph
+
+ pos : dictionary
+ A dictionary with nodes as keys and positions as values.
+ If not specified a spring layout positioning will be computed.
+ See networkx.layout for functions that compute node positions.
+
+ edgelist : collection of edge tuples
+ Draw only specified edges(default=G.edges())
+
+ width : float
+ Line width of edges (default =1.0)
+
+ edge_color : color string, or array of floats
+ Edge color. Can be a single color format string (default='r'),
+ or a sequence of colors with the same length as edgelist.
+ If numeric values are specified they will be mapped to
+ colors using the edge_cmap and edge_vmin,edge_vmax parameters.
+
+ style : string
+ Edge line style (default='solid') (solid|dashed|dotted,dashdot)
+
+ alpha : float
+ The edge transparency (default=1.0)
+
+ edge_ cmap : Matplotlib colormap
+ Colormap for mapping intensities of edges (default=None)
+
+ edge_vmin,edge_vmax : floats
+ Minimum and maximum for edge colormap scaling (default=None)
+
+ ax : Matplotlib Axes object, optional
+ Draw the graph in the specified Matplotlib axes.
+
+ arrows : bool, optional (default=True)
+ For directed graphs, if True draw arrowheads.
+
+ label : [None| string]
+ Label for legend
+
+ Notes
+ -----
+ For directed graphs, "arrows" (actually just thicker stubs) are drawn
+ at the head end. Arrows can be turned off with keyword arrows=False.
+ Yes, it is ugly but drawing proper arrows with Matplotlib this
+ way is tricky.
+
+ Examples
+ --------
+ >>> G=nx.dodecahedral_graph()
+ >>> edges=nx.draw_networkx_edges(G,pos=nx.spring_layout(G))
+
+ Also see the NetworkX drawing examples at
+ http://networkx.lanl.gov/gallery.html
+
+ See Also
+ --------
+ draw()
+ draw_networkx()
+ draw_networkx_nodes()
+ draw_networkx_labels()
+ draw_networkx_edge_labels()
+ """
+ try:
+ import matplotlib
+ import matplotlib.pyplot as plt
+ import matplotlib.cbook as cb
+ from matplotlib.colors import colorConverter,Colormap
+ from matplotlib.collections import LineCollection
+ import numpy
+ except ImportError:
+ raise ImportError("Matplotlib required for draw()")
+ except RuntimeError:
+ print("Matplotlib unable to open display")
+ raise
+
+ if ax is None:
+ ax=plt.gca()
+
+ if edgelist is None:
+ edgelist=G.edges()
+
+ if not edgelist or len(edgelist)==0: # no edges!
+ return None
+
+ # set edge positions
+ edge_pos=numpy.asarray([(pos[e[0]],pos[e[1]]) for e in edgelist])
+
+ if not cb.iterable(width):
+ lw = (width,)
+ else:
+ lw = width
+
+ if not cb.is_string_like(edge_color) \
+ and cb.iterable(edge_color) \
+ and len(edge_color)==len(edge_pos):
+ if numpy.alltrue([cb.is_string_like(c)
+ for c in edge_color]):
+ # (should check ALL elements)
+ # list of color letters such as ['k','r','k',...]
+ edge_colors = tuple([colorConverter.to_rgba(c,alpha)
+ for c in edge_color])
+ elif numpy.alltrue([not cb.is_string_like(c)
+ for c in edge_color]):
+ # If color specs are given as (rgb) or (rgba) tuples, we're OK
+ if numpy.alltrue([cb.iterable(c) and len(c) in (3,4)
+ for c in edge_color]):
+ edge_colors = tuple(edge_color)
+ else:
+ # numbers (which are going to be mapped with a colormap)
+ edge_colors = None
+ else:
+ raise ValueError('edge_color must consist of either color names or numbers')
+ else:
+ if cb.is_string_like(edge_color) or len(edge_color)==1:
+ edge_colors = ( colorConverter.to_rgba(edge_color, alpha), )
+ else:
+ raise ValueError('edge_color must be a single color or list of exactly m colors where m is the number or edges')
+
+ edge_collection = LineCollection(edge_pos,
+ colors = edge_colors,
+ linewidths = lw,
+ antialiaseds = (1,),
+ linestyle = style,
+ transOffset = ax.transData,
+ )
+
+
+ edge_collection.set_zorder(1) # edges go behind nodes
+ edge_collection.set_label(label)
+ ax.add_collection(edge_collection)
+
+ # Note: there was a bug in mpl regarding the handling of alpha values for
+ # each line in a LineCollection. It was fixed in matplotlib in r7184 and
+ # r7189 (June 6 2009). We should then not set the alpha value globally,
+ # since the user can instead provide per-edge alphas now. Only set it
+ # globally if provided as a scalar.
+ if cb.is_numlike(alpha):
+ edge_collection.set_alpha(alpha)
+
+ if edge_colors is None:
+ if edge_cmap is not None:
+ assert(isinstance(edge_cmap, Colormap))
+ edge_collection.set_array(numpy.asarray(edge_color))
+ edge_collection.set_cmap(edge_cmap)
+ if edge_vmin is not None or edge_vmax is not None:
+ edge_collection.set_clim(edge_vmin, edge_vmax)
+ else:
+ edge_collection.autoscale()
+
+ arrow_collection=None
+
+ if G.is_directed() and arrows:
+
+ # a directed graph hack
+ # draw thick line segments at head end of edge
+ # waiting for someone else to implement arrows that will work
+ arrow_colors = edge_colors
+ a_pos=[]
+ p=1.0-0.25 # make head segment 25 percent of edge length
+ for src,dst in edge_pos:
+ x1,y1=src
+ x2,y2=dst
+ dx=x2-x1 # x offset
+ dy=y2-y1 # y offset
+ d=numpy.sqrt(float(dx**2+dy**2)) # length of edge
+ if d==0: # source and target at same position
+ continue
+ if dx==0: # vertical edge
+ xa=x2
+ ya=dy*p+y1
+ if dy==0: # horizontal edge
+ ya=y2
+ xa=dx*p+x1
+ else:
+ theta=numpy.arctan2(dy,dx)
+ xa=p*d*numpy.cos(theta)+x1
+ ya=p*d*numpy.sin(theta)+y1
+
+ a_pos.append(((xa,ya),(x2,y2)))
+
+ arrow_collection = LineCollection(a_pos,
+ colors = arrow_colors,
+ linewidths = [4*ww for ww in lw],
+ antialiaseds = (1,),
+ transOffset = ax.transData,
+ )
+
+ arrow_collection.set_zorder(1) # edges go behind nodes
+ arrow_collection.set_label(label)
+ ax.add_collection(arrow_collection)
+
+
+ # update view
+ minx = numpy.amin(numpy.ravel(edge_pos[:,:,0]))
+ maxx = numpy.amax(numpy.ravel(edge_pos[:,:,0]))
+ miny = numpy.amin(numpy.ravel(edge_pos[:,:,1]))
+ maxy = numpy.amax(numpy.ravel(edge_pos[:,:,1]))
+
+ w = maxx-minx
+ h = maxy-miny
+ padx, pady = 0.05*w, 0.05*h
+ corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
+ ax.update_datalim( corners)
+ ax.autoscale_view()
+
+# if arrow_collection:
+
+ return edge_collection
+
+
+def draw_networkx_labels(G, pos,
+ labels=None,
+ font_size=12,
+ font_color='k',
+ font_family='sans-serif',
+ font_weight='normal',
+ alpha=1.0,
+ ax=None,
+ **kwds):
+ """Draw node labels on the graph G.
+
+ Parameters
+ ----------
+ G : graph
+ A networkx graph
+
+ pos : dictionary, optional
+ A dictionary with nodes as keys and positions as values.
+ If not specified a spring layout positioning will be computed.
+ See networkx.layout for functions that compute node positions.
+
+ labels : dictionary, optional (default=None)
+ Node labels in a dictionary keyed by node of text labels
+
+ font_size : int
+ Font size for text labels (default=12)
+
+ font_color : string
+ Font color string (default='k' black)
+
+ font_family : string
+ Font family (default='sans-serif')
+
+ font_weight : string
+ Font weight (default='normal')
+
+ alpha : float
+ The text transparency (default=1.0)
+
+ ax : Matplotlib Axes object, optional
+ Draw the graph in the specified Matplotlib axes.
+
+
+ Examples
+ --------
+ >>> G=nx.dodecahedral_graph()
+ >>> labels=nx.draw_networkx_labels(G,pos=nx.spring_layout(G))
+
+ Also see the NetworkX drawing examples at
+ http://networkx.lanl.gov/gallery.html
+
+
+ See Also
+ --------
+ draw()
+ draw_networkx()
+ draw_networkx_nodes()
+ draw_networkx_edges()
+ draw_networkx_edge_labels()
+ """
+ try:
+ import matplotlib.pyplot as plt
+ import matplotlib.cbook as cb
+ except ImportError:
+ raise ImportError("Matplotlib required for draw()")
+ except RuntimeError:
+ print("Matplotlib unable to open display")
+ raise
+
+ if ax is None:
+ ax=plt.gca()
+
+ if labels is None:
+ labels=dict( (n,n) for n in G.nodes())
+
+ # set optional alignment
+ horizontalalignment=kwds.get('horizontalalignment','center')
+ verticalalignment=kwds.get('verticalalignment','center')
+
+ text_items={} # there is no text collection so we'll fake one
+ for n, label in labels.items():
+ (x,y)=pos[n]
+ if not cb.is_string_like(label):
+ label=str(label) # this will cause "1" and 1 to be labeled the same
+ t=ax.text(x, y,
+ label,
+ size=font_size,
+ color=font_color,
+ family=font_family,
+ weight=font_weight,
+ horizontalalignment=horizontalalignment,
+ verticalalignment=verticalalignment,
+ transform = ax.transData,
+ clip_on=True,
+ )
+ text_items[n]=t
+
+ return text_items
+
+def draw_networkx_edge_labels(G, pos,
+ edge_labels=None,
+ label_pos=0.5,
+ font_size=10,
+ font_color='k',
+ font_family='sans-serif',
+ font_weight='normal',
+ alpha=1.0,
+ bbox=None,
+ ax=None,
+ rotate=True,
+ **kwds):
+ """Draw edge labels.
+
+ Parameters
+ ----------
+ G : graph
+ A networkx graph
+
+ pos : dictionary, optional
+ A dictionary with nodes as keys and positions as values.
+ If not specified a spring layout positioning will be computed.
+ See networkx.layout for functions that compute node positions.
+
+ ax : Matplotlib Axes object, optional
+ Draw the graph in the specified Matplotlib axes.
+
+ alpha : float
+ The text transparency (default=1.0)
+
+ edge_labels : dictionary
+ Edge labels in a dictionary keyed by edge two-tuple of text
+ labels (default=None). Only labels for the keys in the dictionary
+ are drawn.
+
+ label_pos : float
+ Position of edge label along edge (0=head, 0.5=center, 1=tail)
+
+ font_size : int
+ Font size for text labels (default=12)
+
+ font_color : string
+ Font color string (default='k' black)
+
+ font_weight : string
+ Font weight (default='normal')
+
+ font_family : string
+ Font family (default='sans-serif')
+
+ bbox : Matplotlib bbox
+ Specify text box shape and colors.
+
+ clip_on : bool
+ Turn on clipping at axis boundaries (default=True)
+
+ Examples
+ --------
+ >>> G=nx.dodecahedral_graph()
+ >>> edge_labels=nx.draw_networkx_edge_labels(G,pos=nx.spring_layout(G))
+
+ Also see the NetworkX drawing examples at
+ http://networkx.lanl.gov/gallery.html
+
+ See Also
+ --------
+ draw()
+ draw_networkx()
+ draw_networkx_nodes()
+ draw_networkx_edges()
+ draw_networkx_labels()
+ """
+ try:
+ import matplotlib.pyplot as plt
+ import matplotlib.cbook as cb
+ import numpy
+ except ImportError:
+ raise ImportError("Matplotlib required for draw()")
+ except RuntimeError:
+ print("Matplotlib unable to open display")
+ raise
+
+ if ax is None:
+ ax=plt.gca()
+ if edge_labels is None:
+ labels=dict( ((u,v), d) for u,v,d in G.edges(data=True) )
+ else:
+ labels = edge_labels
+ text_items={}
+ for (n1,n2), label in labels.items():
+ (x1,y1)=pos[n1]
+ (x2,y2)=pos[n2]
+ (x,y) = (x1 * label_pos + x2 * (1.0 - label_pos),
+ y1 * label_pos + y2 * (1.0 - label_pos))
+
+ if rotate:
+ angle=numpy.arctan2(y2-y1,x2-x1)/(2.0*numpy.pi)*360 # degrees
+ # make label orientation "right-side-up"
+ if angle > 90:
+ angle-=180
+ if angle < - 90:
+ angle+=180
+ # transform data coordinate angle to screen coordinate angle
+ xy=numpy.array((x,y))
+ trans_angle=ax.transData.transform_angles(numpy.array((angle,)),
+ xy.reshape((1,2)))[0]
+ else:
+ trans_angle=0.0
+ # use default box of white with white border
+ if bbox is None:
+ bbox = dict(boxstyle='round',
+ ec=(1.0, 1.0, 1.0),
+ fc=(1.0, 1.0, 1.0),
+ )
+ if not cb.is_string_like(label):
+ label=str(label) # this will cause "1" and 1 to be labeled the same
+
+ # set optional alignment
+ horizontalalignment=kwds.get('horizontalalignment','center')
+ verticalalignment=kwds.get('verticalalignment','center')
+
+ t=ax.text(x, y,
+ label,
+ size=font_size,
+ color=font_color,
+ family=font_family,
+ weight=font_weight,
+ horizontalalignment=horizontalalignment,
+ verticalalignment=verticalalignment,
+ rotation=trans_angle,
+ transform = ax.transData,
+ bbox = bbox,
+ zorder = 1,
+ clip_on=True,
+ )
+ text_items[(n1,n2)]=t
+
+ return text_items
+
+def draw_circular(G, **kwargs):
+ """Draw the graph G with a circular layout."""
+ draw(G,circular_layout(G),**kwargs)
+
+def draw_random(G, **kwargs):
+ """Draw the graph G with a random layout."""
+ draw(G,random_layout(G),**kwargs)
+
+def draw_spectral(G, **kwargs):
+ """Draw the graph G with a spectral layout."""
+ draw(G,spectral_layout(G),**kwargs)
+
+def draw_spring(G, **kwargs):
+ """Draw the graph G with a spring layout."""
+ draw(G,spring_layout(G),**kwargs)
+
+def draw_shell(G, **kwargs):
+ """Draw networkx graph with shell layout."""
+ nlist = kwargs.get('nlist', None)
+ if nlist != None:
+ del(kwargs['nlist'])
+ draw(G,shell_layout(G,nlist=nlist),**kwargs)
+
+def draw_graphviz(G, prog="neato", **kwargs):
+ """Draw networkx graph with graphviz layout."""
+ pos=nx.drawing.graphviz_layout(G,prog)
+ draw(G,pos,**kwargs)
+
+def draw_nx(G,pos,**kwds):
+ """For backward compatibility; use draw or draw_networkx."""
+ draw(G,pos,**kwds)
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import matplotlib as mpl
+ mpl.use('PS',warn=False)
+ import matplotlib.pyplot as plt
+ except:
+ raise SkipTest("matplotlib not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/drawing/tests/test_agraph.py b/lib/python2.7/site-packages/setoolsgui/networkx/drawing/tests/test_agraph.py
new file mode 100644
index 0000000..b2f28a3
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/drawing/tests/test_agraph.py
@@ -0,0 +1,75 @@
+"""Unit tests for PyGraphviz intefaace.
+"""
+import os
+import tempfile
+
+from nose import SkipTest
+from nose.tools import assert_true,assert_equal
+
+import networkx as nx
+
+class TestAGraph(object):
+ @classmethod
+ def setupClass(cls):
+ global pygraphviz
+ try:
+ import pygraphviz
+ except ImportError:
+ raise SkipTest('PyGraphviz not available.')
+
+ def build_graph(self, G):
+ G.add_edge('A','B')
+ G.add_edge('A','C')
+ G.add_edge('A','C')
+ G.add_edge('B','C')
+ G.add_edge('A','D')
+ G.add_node('E')
+ return G
+
+ def assert_equal(self, G1, G2):
+ assert_true( sorted(G1.nodes())==sorted(G2.nodes()) )
+ assert_true( sorted(G1.edges())==sorted(G2.edges()) )
+
+
+ def agraph_checks(self, G):
+ G = self.build_graph(G)
+ A=nx.to_agraph(G)
+ H=nx.from_agraph(A)
+ self.assert_equal(G, H)
+
+ fname=tempfile.mktemp()
+ nx.drawing.nx_agraph.write_dot(H,fname)
+ Hin=nx.drawing.nx_agraph.read_dot(fname)
+ os.unlink(fname)
+ self.assert_equal(H,Hin)
+
+
+ (fd,fname)=tempfile.mkstemp()
+ fh=open(fname,'w')
+ nx.drawing.nx_agraph.write_dot(H,fh)
+ fh.close()
+
+ fh=open(fname,'r')
+ Hin=nx.drawing.nx_agraph.read_dot(fh)
+ fh.close()
+ os.unlink(fname)
+ self.assert_equal(H,Hin)
+
+ def test_from_agraph_name(self):
+ G=nx.Graph(name='test')
+ A=nx.to_agraph(G)
+ H=nx.from_agraph(A)
+ assert_equal(G.name,'test')
+
+
+ def testUndirected(self):
+ self.agraph_checks(nx.Graph())
+
+ def testDirected(self):
+ self.agraph_checks(nx.DiGraph())
+
+ def testMultiUndirected(self):
+ self.agraph_checks(nx.MultiGraph())
+
+ def testMultiDirected(self):
+ self.agraph_checks(nx.MultiDiGraph())
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/drawing/tests/test_layout.py b/lib/python2.7/site-packages/setoolsgui/networkx/drawing/tests/test_layout.py
new file mode 100644
index 0000000..0327782
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/drawing/tests/test_layout.py
@@ -0,0 +1,61 @@
+"""Unit tests for layout functions."""
+import sys
+from nose import SkipTest
+from nose.tools import assert_equal
+import networkx as nx
+
+class TestLayout(object):
+ numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
+ @classmethod
+ def setupClass(cls):
+ global numpy
+ try:
+ import numpy
+ except ImportError:
+ raise SkipTest('numpy not available.')
+
+
+ def setUp(self):
+ self.Gi=nx.grid_2d_graph(5,5)
+ self.Gs=nx.Graph()
+ self.Gs.add_path('abcdef')
+ self.bigG=nx.grid_2d_graph(25,25) #bigger than 500 nodes for sparse
+
+ def test_smoke_int(self):
+ G=self.Gi
+ vpos=nx.random_layout(G)
+ vpos=nx.circular_layout(G)
+ vpos=nx.spring_layout(G)
+ vpos=nx.fruchterman_reingold_layout(G)
+ vpos=nx.spectral_layout(G)
+ vpos=nx.spectral_layout(self.bigG)
+ vpos=nx.shell_layout(G)
+
+ def test_smoke_string(self):
+ G=self.Gs
+ vpos=nx.random_layout(G)
+ vpos=nx.circular_layout(G)
+ vpos=nx.spring_layout(G)
+ vpos=nx.fruchterman_reingold_layout(G)
+ vpos=nx.spectral_layout(G)
+ vpos=nx.shell_layout(G)
+
+
+ def test_adjacency_interface_numpy(self):
+ A=nx.to_numpy_matrix(self.Gs)
+ pos=nx.drawing.layout._fruchterman_reingold(A)
+ pos=nx.drawing.layout._fruchterman_reingold(A,dim=3)
+ assert_equal(pos.shape,(6,3))
+
+ def test_adjacency_interface_scipy(self):
+ try:
+ import scipy
+ except ImportError:
+ raise SkipTest('scipy not available.')
+
+ A=nx.to_scipy_sparse_matrix(self.Gs,dtype='f')
+ pos=nx.drawing.layout._sparse_fruchterman_reingold(A)
+ pos=nx.drawing.layout._sparse_spectral(A)
+
+ pos=nx.drawing.layout._sparse_fruchterman_reingold(A,dim=3)
+ assert_equal(pos.shape,(6,3))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/drawing/tests/test_pydot.py b/lib/python2.7/site-packages/setoolsgui/networkx/drawing/tests/test_pydot.py
new file mode 100644
index 0000000..9cdffee
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/drawing/tests/test_pydot.py
@@ -0,0 +1,62 @@
+"""
+ Unit tests for pydot drawing functions.
+"""
+import os
+import tempfile
+
+from nose import SkipTest
+from nose.tools import assert_true
+
+import networkx as nx
+
+class TestPydot(object):
+ @classmethod
+ def setupClass(cls):
+ global pydot
+ try:
+ import pydot
+ import dot_parser
+ except ImportError:
+ raise SkipTest('pydot not available.')
+
+ def build_graph(self, G):
+ G.add_edge('A','B')
+ G.add_edge('A','C')
+ G.add_edge('B','C')
+ G.add_edge('A','D')
+ G.add_node('E')
+ return G, nx.to_pydot(G)
+
+ def assert_equal(self, G1, G2):
+ assert_true( sorted(G1.nodes())==sorted(G2.nodes()) )
+ assert_true( sorted(G1.edges())==sorted(G2.edges()) )
+
+ def pydot_checks(self, G):
+ H, P = self.build_graph(G)
+ G2 = H.__class__(nx.from_pydot(P))
+ self.assert_equal(H, G2)
+
+ fname = tempfile.mktemp()
+ assert_true( P.write_raw(fname) )
+
+ Pin = pydot.graph_from_dot_file(fname)
+
+ n1 = sorted([p.get_name() for p in P.get_node_list()])
+ n2 = sorted([p.get_name() for p in Pin.get_node_list()])
+ assert_true( n1 == n2 )
+
+ e1=[(e.get_source(),e.get_destination()) for e in P.get_edge_list()]
+ e2=[(e.get_source(),e.get_destination()) for e in Pin.get_edge_list()]
+ assert_true( sorted(e1)==sorted(e2) )
+
+ Hin = nx.drawing.nx_pydot.read_dot(fname)
+ Hin = H.__class__(Hin)
+ self.assert_equal(H, Hin)
+# os.unlink(fname)
+
+
+ def testUndirected(self):
+ self.pydot_checks(nx.Graph())
+
+ def testDirected(self):
+ self.pydot_checks(nx.DiGraph())
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/drawing/tests/test_pylab.py b/lib/python2.7/site-packages/setoolsgui/networkx/drawing/tests/test_pylab.py
new file mode 100644
index 0000000..9c55590
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/drawing/tests/test_pylab.py
@@ -0,0 +1,40 @@
+"""
+ Unit tests for matplotlib drawing functions.
+"""
+
+import os
+
+from nose import SkipTest
+
+import networkx as nx
+
+class TestPylab(object):
+ @classmethod
+ def setupClass(cls):
+ global plt
+ try:
+ import matplotlib as mpl
+ mpl.use('PS',warn=False)
+ import matplotlib.pyplot as plt
+ except ImportError:
+ raise SkipTest('matplotlib not available.')
+ except RuntimeError:
+ raise SkipTest('matplotlib not available.')
+
+ def setUp(self):
+ self.G=nx.barbell_graph(5,10)
+
+
+ def test_draw(self):
+ N=self.G
+ nx.draw_spring(N)
+ plt.savefig("test.ps")
+ nx.draw_random(N)
+ plt.savefig("test.ps")
+ nx.draw_circular(N)
+ plt.savefig("test.ps")
+ nx.draw_spectral(N)
+ plt.savefig("test.ps")
+ nx.draw_spring(N.to_directed())
+ plt.savefig("test.ps")
+ os.unlink('test.ps')
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/exception.py b/lib/python2.7/site-packages/setoolsgui/networkx/exception.py
new file mode 100644
index 0000000..0267038
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/exception.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+"""
+**********
+Exceptions
+**********
+
+Base exceptions and errors for NetworkX.
+
+"""
+__author__ = """Aric Hagberg (hagberg@lanl.gov)\nPieter Swart (swart@lanl.gov)\nDan Schult(dschult@colgate.edu)\nLoïc Séguin-C. <loicseguin@gmail.com>"""
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+#
+
+# Exception handling
+
+# the root of all Exceptions
+class NetworkXException(Exception):
+ """Base class for exceptions in NetworkX."""
+
+class NetworkXError(NetworkXException):
+ """Exception for a serious error in NetworkX"""
+
+class NetworkXPointlessConcept(NetworkXException):
+ """Harary, F. and Read, R. "Is the Null Graph a Pointless Concept?"
+In Graphs and Combinatorics Conference, George Washington University.
+New York: Springer-Verlag, 1973.
+"""
+
+class NetworkXAlgorithmError(NetworkXException):
+ """Exception for unexpected termination of algorithms."""
+
+class NetworkXUnfeasible(NetworkXAlgorithmError):
+ """Exception raised by algorithms trying to solve a problem
+ instance that has no feasible solution."""
+
+class NetworkXNoPath(NetworkXUnfeasible):
+ """Exception for algorithms that should return a path when running
+ on graphs where such a path does not exist."""
+
+class NetworkXUnbounded(NetworkXAlgorithmError):
+ """Exception raised by algorithms trying to solve a maximization
+ or a minimization problem instance that is unbounded."""
+
+class NetworkXNotImplemented(NetworkXException):
+ """Exception raised by algorithms not implemented for a type of graph."""
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/external/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/external/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/external/__init__.py
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/external/decorator/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/external/decorator/__init__.py
new file mode 100644
index 0000000..154bfd6
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/external/decorator/__init__.py
@@ -0,0 +1,8 @@
+"""
+ Hack for including decorator-3.3.1 in NetworkX.
+"""
+import sys
+if sys.version >= '3':
+ from .decorator3._decorator3 import *
+else:
+ from .decorator2._decorator2 import *
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/external/decorator/decorator2/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/external/decorator/decorator2/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/external/decorator/decorator2/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/external/decorator/decorator2/_decorator2.py b/lib/python2.7/site-packages/setoolsgui/networkx/external/decorator/decorator2/_decorator2.py
new file mode 100644
index 0000000..2e8c123
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/external/decorator/decorator2/_decorator2.py
@@ -0,0 +1,210 @@
+########################## LICENCE ###############################
+##
+## Copyright (c) 2005-2011, Michele Simionato
+## All rights reserved.
+##
+## Redistributions of source code must retain the above copyright
+## notice, this list of conditions and the following disclaimer.
+## Redistributions in bytecode form must reproduce the above copyright
+## notice, this list of conditions and the following disclaimer in
+## the documentation and/or other materials provided with the
+## distribution.
+
+## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+## HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+## INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+## BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+## OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+## TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+## USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+## DAMAGE.
+
+"""
+Decorator module, see http://pypi.python.org/pypi/decorator
+for the documentation.
+"""
+
+__version__ = '3.3.2'
+
+__all__ = ["decorator", "FunctionMaker", "partial"]
+
+import sys, re, inspect
+
+try:
+ from functools import partial
+except ImportError: # for Python version < 2.5
+ class partial(object):
+ "A simple replacement of functools.partial"
+ def __init__(self, func, *args, **kw):
+ self.func = func
+ self.args = args
+ self.keywords = kw
+ def __call__(self, *otherargs, **otherkw):
+ kw = self.keywords.copy()
+ kw.update(otherkw)
+ return self.func(*(self.args + otherargs), **kw)
+
+if sys.version >= '3':
+ from inspect import getfullargspec
+else:
+ class getfullargspec(object):
+ "A quick and dirty replacement for getfullargspec for Python 2.X"
+ def __init__(self, f):
+ self.args, self.varargs, self.varkw, self.defaults = \
+ inspect.getargspec(f)
+ self.kwonlyargs = []
+ self.kwonlydefaults = None
+ self.annotations = getattr(f, '__annotations__', {})
+ def __iter__(self):
+ yield self.args
+ yield self.varargs
+ yield self.varkw
+ yield self.defaults
+
+DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(')
+
+# basic functionality
+class FunctionMaker(object):
+ """
+ An object with the ability to create functions with a given signature.
+ It has attributes name, doc, module, signature, defaults, dict and
+ methods update and make.
+ """
+ def __init__(self, func=None, name=None, signature=None,
+ defaults=None, doc=None, module=None, funcdict=None):
+ self.shortsignature = signature
+ if func:
+ # func can be a class or a callable, but not an instance method
+ self.name = func.__name__
+ if self.name == '<lambda>': # small hack for lambda functions
+ self.name = '_lambda_'
+ self.doc = func.__doc__
+ self.module = func.__module__
+ if inspect.isfunction(func):
+ argspec = getfullargspec(func)
+ for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
+ 'kwonlydefaults', 'annotations'):
+ setattr(self, a, getattr(argspec, a))
+ for i, arg in enumerate(self.args):
+ setattr(self, 'arg%d' % i, arg)
+ self.signature = inspect.formatargspec(
+ formatvalue=lambda val: "", *argspec)[1:-1]
+ allargs = list(self.args)
+ if self.varargs:
+ allargs.append('*' + self.varargs)
+ if self.varkw:
+ allargs.append('**' + self.varkw)
+ try:
+ self.shortsignature = ', '.join(allargs)
+ except TypeError: # exotic signature, valid only in Python 2.X
+ self.shortsignature = self.signature
+ self.dict = func.__dict__.copy()
+ # func=None happens when decorating a caller
+ if name:
+ self.name = name
+ if signature is not None:
+ self.signature = signature
+ if defaults:
+ self.defaults = defaults
+ if doc:
+ self.doc = doc
+ if module:
+ self.module = module
+ if funcdict:
+ self.dict = funcdict
+ # check existence required attributes
+ assert hasattr(self, 'name')
+ if not hasattr(self, 'signature'):
+ raise TypeError('You are decorating a non function: %s' % func)
+
+ def update(self, func, **kw):
+ "Update the signature of func with the data in self"
+ func.__name__ = self.name
+ func.__doc__ = getattr(self, 'doc', None)
+ func.__dict__ = getattr(self, 'dict', {})
+ func.func_defaults = getattr(self, 'defaults', ())
+ func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
+ callermodule = sys._getframe(3).f_globals.get('__name__', '?')
+ func.__module__ = getattr(self, 'module', callermodule)
+ func.__dict__.update(kw)
+
+ def make(self, src_templ, evaldict=None, addsource=False, **attrs):
+ "Make a new function from a given template and update the signature"
+ src = src_templ % vars(self) # expand name and signature
+ evaldict = evaldict or {}
+ mo = DEF.match(src)
+ if mo is None:
+ raise SyntaxError('not a valid function template\n%s' % src)
+ name = mo.group(1) # extract the function name
+ names = set([name] + [arg.strip(' *') for arg in
+ self.shortsignature.split(',')])
+ for n in names:
+ if n in ('_func_', '_call_'):
+ raise NameError('%s is overridden in\n%s' % (n, src))
+ if not src.endswith('\n'): # add a newline just for safety
+ src += '\n' # this is needed in old versions of Python
+ try:
+ code = compile(src, '<string>', 'single')
+ # print >> sys.stderr, 'Compiling %s' % src
+ exec code in evaldict
+ except:
+ print >> sys.stderr, 'Error in generated code:'
+ print >> sys.stderr, src
+ raise
+ func = evaldict[name]
+ if addsource:
+ attrs['__source__'] = src
+ self.update(func, **attrs)
+ return func
+
+ @classmethod
+ def create(cls, obj, body, evaldict, defaults=None,
+ doc=None, module=None, addsource=True, **attrs):
+ """
+ Create a function from the strings name, signature and body.
+ evaldict is the evaluation dictionary. If addsource is true an attribute
+ __source__ is added to the result. The attributes attrs are added,
+ if any.
+ """
+ if isinstance(obj, str): # "name(signature)"
+ name, rest = obj.strip().split('(', 1)
+ signature = rest[:-1] #strip a right parens
+ func = None
+ else: # a function
+ name = None
+ signature = None
+ func = obj
+ self = cls(func, name, signature, defaults, doc, module)
+ ibody = '\n'.join(' ' + line for line in body.splitlines())
+ return self.make('def %(name)s(%(signature)s):\n' + ibody,
+ evaldict, addsource, **attrs)
+
+def decorator(caller, func=None):
+ """
+ decorator(caller) converts a caller function into a decorator;
+ decorator(caller, func) decorates a function using a caller.
+ """
+ if func is not None: # returns a decorated function
+ evaldict = func.func_globals.copy()
+ evaldict['_call_'] = caller
+ evaldict['_func_'] = func
+ return FunctionMaker.create(
+ func, "return _call_(_func_, %(shortsignature)s)",
+ evaldict, undecorated=func, __wrapped__=func)
+ else: # returns a decorator
+ if isinstance(caller, partial):
+ return partial(decorator, caller)
+ # otherwise assume caller is a function
+ first = inspect.getargspec(caller)[0][0] # first arg
+ evaldict = caller.func_globals.copy()
+ evaldict['_call_'] = caller
+ evaldict['decorator'] = decorator
+ return FunctionMaker.create(
+ '%s(%s)' % (caller.__name__, first),
+ 'return decorator(_call_, %s)' % first,
+ evaldict, undecorated=caller, __wrapped__=caller,
+ doc=caller.__doc__, module=caller.__module__)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/__init__.py
new file mode 100644
index 0000000..92edd41
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/__init__.py
@@ -0,0 +1,21 @@
+"""
+A package for generating various graphs in networkx.
+
+"""
+from networkx.generators.atlas import *
+from networkx.generators.bipartite import *
+from networkx.generators.classic import *
+from networkx.generators.degree_seq import *
+from networkx.generators.directed import *
+from networkx.generators.ego import *
+from networkx.generators.geometric import *
+from networkx.generators.hybrid import *
+from networkx.generators.line import *
+from networkx.generators.random_graphs import *
+from networkx.generators.small import *
+from networkx.generators.stochastic import *
+from networkx.generators.social import *
+from networkx.generators.threshold import *
+from networkx.generators.intersection import *
+from networkx.generators.random_clustered import *
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/atlas.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/atlas.py
new file mode 100644
index 0000000..f3d9c57
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/atlas.py
@@ -0,0 +1,12336 @@
+"""
+Generators for the small graph atlas.
+
+
+See
+"An Atlas of Graphs" by Ronald C. Read and Robin J. Wilson,
+Oxford University Press, 1998.
+
+Because of its size, this module is not imported by default.
+
+"""
+# Copyright (C) 2004-2008 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+__author__ = """Pieter Swart (swart@lanl.gov)"""
+
+__all__ = ['graph_atlas_g']
+
+from networkx.generators.small import make_small_graph
+
+def graph_atlas_g():
+ """
+ Return the list [G0,G1,...,G1252] of graphs as named in the Graph Atlas.
+ G0,G1,...,G1252 are all graphs with up to 7 nodes.
+
+ The graphs are listed:
+ 1. in increasing order of number of nodes;
+ 2. for a fixed number of nodes,
+ in increasing order of the number of edges;
+ 3. for fixed numbers of nodes and edges,
+ in increasing order of the degree sequence,
+ for example 111223 < 112222;
+ 4. for fixed degree sequence, in increasing number of automorphisms.
+
+ Note that indexing is set up so that for
+ GAG=graph_atlas_g(), then
+ G123=GAG[123] and G[0]=empty_graph(0)
+
+ """
+
+ descr_list=[
+ ['edgelist', 'G0', 0, []],
+ ['edgelist', 'G1', 1, []],
+ ['edgelist', 'G2', 2, []],
+ ['edgelist', 'G3', 2, [[1, 2]]],
+ ['edgelist', 'G4', 3, []],
+ ['edgelist', 'G5', 3, [[2, 3]]],
+ ['edgelist', 'G6', 3, [[1, 2], [1, 3]]],
+ ['edgelist', 'G7', 3, [[1, 2], [1, 3], [2, 3]]],
+ ['edgelist', 'G8', 4, []],
+ ['edgelist', 'G9', 4, [[4, 3]]],
+ ['edgelist', 'G10', 4, [[4, 3], [4, 2]]],
+ ['edgelist', 'G11', 4, [[1, 2], [4, 3]]],
+ ['edgelist', 'G12', 4, [[4, 3], [2, 3], [4, 2]]],
+ ['edgelist', 'G13', 4, [[4, 1], [4, 2], [4, 3]]],
+ ['edgelist', 'G14', 4, [[1, 2], [2, 3], [1, 4]]],
+ ['edgelist', 'G15', 4, [[4, 3], [2, 3], [4, 2], [4, 1]]],
+ ['edgelist', 'G16', 4, [[1, 2], [2, 3], [3, 4], [1, 4]]],
+ ['edgelist', 'G17', 4, [[1, 2], [1, 3], [1, 4], [2, 3], [3, 4]]],
+ ['edgelist', 'G18', 4, [[1, 2], [2, 3], [1, 3], [4, 1], [4, 2], [4, 3]]],
+ ['edgelist', 'G19', 5, []],
+ ['edgelist', 'G20', 5, [[5, 4]]],
+ ['edgelist', 'G21', 5, [[2, 3], [1, 2]]],
+ ['edgelist', 'G22', 5, [[1, 3], [5, 4]]],
+ ['edgelist', 'G23', 5, [[2, 3], [1, 2], [3, 1]]],
+ ['edgelist', 'G24', 5, [[5, 4], [4, 3], [4, 2]]],
+ ['edgelist', 'G25', 5, [[4, 3], [5, 4], [1, 5]]],
+ ['edgelist', 'G26', 5, [[2, 3], [1, 2], [5, 4]]],
+ ['edgelist', 'G27', 5, [[5, 4], [2, 3], [4, 2], [4, 3]]],
+ ['edgelist', 'G28', 5, [[1, 4], [2, 1], [3, 2], [4, 3]]],
+ ['edgelist', 'G29', 5, [[5, 4], [5, 1], [5, 2], [5, 3]]],
+ ['edgelist', 'G30', 5, [[5, 1], [4, 2], [5, 4], [4, 3]]],
+ ['edgelist', 'G31', 5, [[3, 4], [2, 3], [1, 2], [5, 1]]],
+ ['edgelist', 'G32', 5, [[2, 3], [1, 2], [3, 1], [5, 4]]],
+ ['edgelist', 'G33', 5, [[1, 4], [3, 1], [4, 3], [2, 1], [3, 2]]],
+ ['edgelist', 'G34', 5, [[5, 3], [5, 4], [3, 4], [5, 2], [5, 1]]],
+ ['edgelist', 'G35', 5, [[1, 2], [2, 3], [3, 4], [1, 5], [1, 3]]],
+ ['edgelist', 'G36', 5, [[5, 1], [2, 3], [5, 4], [4, 3], [4, 2]]],
+ ['edgelist', 'G37', 5, [[2, 1], [5, 2], [3, 5], [4, 3], [2, 4]]],
+ ['edgelist', 'G38', 5, [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5]]],
+ ['edgelist', 'G39', 5, [[2, 1], [5, 2], [5, 1], [1, 4], [2, 4], [4, 5]]],
+ ['edgelist', 'G40', 5, [[2, 1], [5, 2], [3, 5], [4, 3], [2, 4], [3, 2]]],
+ ['edgelist', 'G41', 5, [[2, 1], [5, 2], [3, 5], [4, 3], [2, 4], [4, 5]]],
+ ['edgelist', 'G42', 5, [[1, 2], [5, 4], [3, 4], [5, 3], [5, 1], [5, 2]]],
+ ['edgelist', 'G43', 5, [[1, 5], [4, 1], [5, 4], [3, 4], [2, 3], [1, 2]]],
+ ['edgelist', 'G44', 5, [[3, 2], [1, 3], [4, 1], [2, 4], [5, 2], [1, 5]]],
+ ['edgelist',
+ 'G45',
+ 5,
+ [[5, 1], [2, 3], [5, 4], [4, 3], [4, 2], [5, 2], [3, 5]]],
+ ['edgelist',
+ 'G46',
+ 5,
+ [[5, 2], [3, 5], [4, 3], [2, 4], [4, 5], [1, 4], [5, 1]]],
+ ['edgelist',
+ 'G47',
+ 5,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 4], [5, 2]]],
+ ['edgelist',
+ 'G48',
+ 5,
+ [[3, 2], [1, 3], [4, 1], [2, 4], [5, 2], [1, 5], [3, 5]]],
+ ['edgelist',
+ 'G49',
+ 5,
+ [[2, 1], [5, 2], [3, 5], [4, 3], [2, 4], [5, 1], [4, 5], [1, 4]]],
+ ['edgelist',
+ 'G50',
+ 5,
+ [[1, 2], [2, 3], [3, 4], [1, 4], [5, 1], [5, 2], [5, 3], [5, 4]]],
+ ['edgelist',
+ 'G51',
+ 5,
+ [[1, 2], [4, 5], [1, 4], [1, 5], [2, 3], [2, 4], [2, 5], [3, 4], [3, 5]]],
+ ['edgelist',
+ 'G52',
+ 5,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 5],
+ [4, 5]]],
+ ['edgelist', 'G53', 6, []],
+ ['edgelist', 'G54', 6, [[6, 5]]],
+ ['edgelist', 'G55', 6, [[1, 4], [6, 5]]],
+ ['edgelist', 'G56', 6, [[2, 4], [2, 3]]],
+ ['edgelist', 'G57', 6, [[2, 4], [3, 2], [4, 3]]],
+ ['edgelist', 'G58', 6, [[1, 4], [6, 1], [5, 1]]],
+ ['edgelist', 'G59', 6, [[5, 4], [6, 5], [1, 6]]],
+ ['edgelist', 'G60', 6, [[5, 4], [6, 2], [6, 3]]],
+ ['edgelist', 'G61', 6, [[2, 3], [4, 1], [6, 5]]],
+ ['edgelist', 'G62', 6, [[1, 4], [5, 1], [6, 5], [1, 6]]],
+ ['edgelist', 'G63', 6, [[4, 1], [6, 4], [5, 6], [1, 5]]],
+ ['edgelist', 'G64', 6, [[6, 2], [6, 4], [6, 3], [1, 6]]],
+ ['edgelist', 'G65', 6, [[5, 4], [4, 2], [5, 1], [4, 3]]],
+ ['edgelist', 'G66', 6, [[1, 3], [2, 4], [3, 2], [6, 4]]],
+ ['edgelist', 'G67', 6, [[2, 4], [3, 2], [4, 3], [1, 6]]],
+ ['edgelist', 'G68', 6, [[2, 3], [1, 4], [6, 1], [5, 1]]],
+ ['edgelist', 'G69', 6, [[5, 6], [2, 3], [1, 6], [4, 5]]],
+ ['edgelist', 'G70', 6, [[1, 3], [5, 1], [4, 2], [6, 4]]],
+ ['edgelist', 'G71', 6, [[4, 1], [6, 4], [5, 6], [1, 5], [6, 1]]],
+ ['edgelist', 'G72', 6, [[6, 4], [4, 2], [4, 3], [5, 4], [5, 6]]],
+ ['edgelist', 'G73', 6, [[6, 4], [6, 5], [3, 4], [4, 5], [1, 5]]],
+ ['edgelist', 'G74', 6, [[5, 4], [2, 3], [5, 1], [4, 3], [4, 2]]],
+ ['edgelist', 'G75', 6, [[2, 5], [4, 5], [5, 1], [3, 2], [4, 3]]],
+ ['edgelist', 'G76', 6, [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5]]],
+ ['edgelist', 'G77', 6, [[6, 4], [6, 5], [6, 1], [6, 2], [6, 3]]],
+ ['edgelist', 'G78', 6, [[2, 5], [6, 2], [2, 1], [3, 2], [3, 4]]],
+ ['edgelist', 'G79', 6, [[1, 2], [4, 5], [1, 3], [4, 1], [6, 4]]],
+ ['edgelist', 'G80', 6, [[2, 1], [3, 2], [3, 5], [2, 4], [6, 4]]],
+ ['edgelist', 'G81', 6, [[5, 4], [1, 6], [5, 1], [4, 3], [4, 2]]],
+ ['edgelist', 'G82', 6, [[2, 3], [1, 2], [5, 6], [2, 4], [3, 4]]],
+ ['edgelist', 'G83', 6, [[1, 2], [1, 6], [3, 4], [4, 5], [5, 6]]],
+ ['edgelist', 'G84', 6, [[5, 4], [6, 2], [6, 3], [1, 4], [5, 1]]],
+ ['edgelist', 'G85', 6, [[2, 3], [4, 1], [6, 4], [5, 6], [1, 5]]],
+ ['edgelist', 'G86', 6, [[1, 4], [6, 1], [5, 6], [4, 5], [6, 4], [5, 1]]],
+ ['edgelist', 'G87', 6, [[2, 5], [3, 5], [5, 1], [3, 4], [4, 2], [4, 5]]],
+ ['edgelist', 'G88', 6, [[2, 5], [3, 5], [5, 1], [3, 2], [4, 2], [3, 4]]],
+ ['edgelist', 'G89', 6, [[3, 1], [6, 5], [5, 4], [6, 4], [5, 1], [3, 5]]],
+ ['edgelist', 'G90', 6, [[4, 3], [5, 4], [1, 5], [2, 1], [3, 2], [1, 4]]],
+ ['edgelist', 'G91', 6, [[5, 2], [4, 2], [5, 3], [4, 3], [3, 1], [2, 1]]],
+ ['edgelist', 'G92', 6, [[6, 3], [6, 4], [6, 5], [4, 5], [6, 2], [6, 1]]],
+ ['edgelist', 'G93', 6, [[5, 4], [5, 3], [5, 1], [2, 5], [4, 1], [6, 4]]],
+ ['edgelist', 'G94', 6, [[5, 4], [4, 6], [6, 5], [6, 2], [4, 3], [5, 1]]],
+ ['edgelist', 'G95', 6, [[5, 3], [2, 3], [5, 4], [5, 2], [5, 1], [1, 6]]],
+ ['edgelist', 'G96', 6, [[2, 3], [4, 2], [1, 4], [3, 1], [5, 1], [6, 1]]],
+ ['edgelist', 'G97', 6, [[3, 1], [5, 3], [2, 5], [3, 2], [4, 2], [6, 4]]],
+ ['edgelist', 'G98', 6, [[2, 3], [4, 2], [1, 4], [3, 1], [5, 1], [6, 4]]],
+ ['edgelist', 'G99', 6, [[6, 4], [3, 6], [3, 1], [5, 3], [5, 4], [4, 2]]],
+ ['edgelist', 'G100', 6, [[1, 3], [4, 5], [2, 1], [6, 4], [5, 6], [4, 1]]],
+ ['edgelist', 'G101', 6, [[2, 3], [4, 1], [6, 4], [5, 6], [1, 5], [6, 1]]],
+ ['edgelist', 'G102', 6, [[5, 4], [2, 3], [5, 1], [4, 3], [4, 2], [6, 1]]],
+ ['edgelist', 'G103', 6, [[2, 5], [3, 5], [5, 1], [1, 6], [4, 2], [3, 4]]],
+ ['edgelist', 'G104', 6, [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 6]]],
+ ['edgelist', 'G105', 6, [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6]]],
+ ['edgelist', 'G106', 6, [[2, 4], [3, 2], [4, 3], [1, 5], [6, 1], [5, 6]]],
+ ['edgelist',
+ 'G107',
+ 6,
+ [[1, 2], [2, 3], [1, 3], [4, 1], [4, 2], [4, 3], [1, 6]]],
+ ['edgelist',
+ 'G108',
+ 6,
+ [[2, 5], [3, 5], [3, 2], [4, 2], [3, 4], [3, 1], [1, 2]]],
+ ['edgelist',
+ 'G109',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 4], [5, 2]]],
+ ['edgelist',
+ 'G110',
+ 6,
+ [[1, 2], [4, 3], [1, 3], [4, 1], [4, 2], [6, 2], [6, 3]]],
+ ['edgelist',
+ 'G111',
+ 6,
+ [[2, 5], [3, 5], [3, 4], [1, 5], [4, 2], [5, 6], [4, 5]]],
+ ['edgelist',
+ 'G112',
+ 6,
+ [[2, 1], [5, 2], [3, 5], [4, 3], [6, 2], [3, 6], [2, 3]]],
+ ['edgelist',
+ 'G113',
+ 6,
+ [[1, 5], [3, 1], [2, 3], [4, 2], [6, 4], [4, 1], [3, 4]]],
+ ['edgelist',
+ 'G114',
+ 6,
+ [[2, 5], [3, 5], [3, 4], [3, 2], [4, 2], [5, 6], [1, 5]]],
+ ['edgelist',
+ 'G115',
+ 6,
+ [[2, 1], [5, 2], [3, 5], [4, 3], [6, 2], [3, 6], [5, 6]]],
+ ['edgelist',
+ 'G116',
+ 6,
+ [[1, 2], [2, 3], [1, 3], [4, 1], [4, 2], [4, 3], [6, 5]]],
+ ['edgelist',
+ 'G117',
+ 6,
+ [[1, 6], [5, 1], [6, 5], [1, 3], [4, 1], [4, 3], [1, 2]]],
+ ['edgelist',
+ 'G118',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 6], [5, 2]]],
+ ['edgelist',
+ 'G119',
+ 6,
+ [[1, 2], [5, 1], [2, 5], [1, 3], [4, 1], [4, 3], [4, 6]]],
+ ['edgelist',
+ 'G120',
+ 6,
+ [[2, 5], [3, 5], [5, 1], [1, 6], [4, 2], [3, 4], [4, 5]]],
+ ['edgelist',
+ 'G121',
+ 6,
+ [[3, 1], [4, 3], [5, 4], [6, 5], [3, 6], [2, 3], [5, 2]]],
+ ['edgelist',
+ 'G122',
+ 6,
+ [[2, 6], [1, 2], [5, 1], [4, 5], [3, 4], [2, 3], [1, 4]]],
+ ['edgelist',
+ 'G123',
+ 6,
+ [[2, 5], [3, 5], [5, 1], [1, 6], [4, 2], [3, 4], [3, 2]]],
+ ['edgelist',
+ 'G124',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [1, 3], [6, 2]]],
+ ['edgelist',
+ 'G125',
+ 6,
+ [[3, 1], [5, 2], [2, 3], [6, 5], [3, 6], [4, 2], [6, 4]]],
+ ['edgelist',
+ 'G126',
+ 6,
+ [[6, 1], [4, 6], [3, 4], [1, 3], [2, 4], [5, 2], [4, 5]]],
+ ['edgelist',
+ 'G127',
+ 6,
+ [[2, 4], [3, 2], [1, 3], [6, 1], [5, 6], [4, 5], [3, 4]]],
+ ['edgelist',
+ 'G128',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [1, 4]]],
+ ['edgelist',
+ 'G129',
+ 6,
+ [[5, 4], [1, 5], [2, 1], [3, 2], [4, 3], [1, 6], [6, 4]]],
+ ['edgelist',
+ 'G130',
+ 6,
+ [[2, 3], [1, 2], [3, 1], [4, 1], [5, 4], [6, 5], [4, 6]]],
+ ['edgelist',
+ 'G131',
+ 6,
+ [[1, 2], [3, 6], [1, 3], [4, 1], [4, 2], [4, 3], [3, 2], [6, 2]]],
+ ['edgelist',
+ 'G132',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [1, 4], [5, 1], [5, 2], [5, 3], [5, 4]]],
+ ['edgelist',
+ 'G133',
+ 6,
+ [[1, 2], [2, 3], [1, 3], [4, 1], [4, 2], [4, 3], [6, 1], [1, 5]]],
+ ['edgelist',
+ 'G134',
+ 6,
+ [[2, 3], [4, 2], [1, 4], [2, 1], [3, 1], [4, 3], [6, 4], [5, 1]]],
+ ['edgelist',
+ 'G135',
+ 6,
+ [[1, 2], [3, 5], [1, 3], [6, 3], [4, 2], [4, 3], [3, 2], [5, 2]]],
+ ['edgelist',
+ 'G136',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 4], [5, 2], [2, 6]]],
+ ['edgelist',
+ 'G137',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 4], [5, 2], [6, 5]]],
+ ['edgelist',
+ 'G138',
+ 6,
+ [[1, 2], [3, 6], [1, 3], [5, 1], [4, 2], [4, 3], [3, 2], [6, 2]]],
+ ['edgelist',
+ 'G139',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 4], [5, 2], [6, 1]]],
+ ['edgelist',
+ 'G140',
+ 6,
+ [[1, 2], [3, 6], [1, 3], [5, 1], [4, 2], [4, 3], [4, 1], [6, 2]]],
+ ['edgelist',
+ 'G141',
+ 6,
+ [[3, 1], [4, 3], [5, 4], [6, 5], [3, 6], [2, 3], [5, 2], [6, 4]]],
+ ['edgelist',
+ 'G142',
+ 6,
+ [[1, 2], [2, 3], [1, 3], [4, 1], [4, 2], [4, 3], [1, 6], [6, 5]]],
+ ['edgelist',
+ 'G143',
+ 6,
+ [[1, 2], [3, 6], [1, 3], [5, 1], [4, 2], [4, 3], [6, 2], [6, 4]]],
+ ['edgelist',
+ 'G144',
+ 6,
+ [[2, 5], [3, 5], [3, 4], [1, 5], [4, 2], [5, 6], [1, 6], [4, 5]]],
+ ['edgelist',
+ 'G145',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [6, 1], [6, 3], [1, 3]]],
+ ['edgelist',
+ 'G146',
+ 6,
+ [[2, 6], [5, 2], [1, 5], [6, 1], [3, 6], [5, 3], [4, 5], [6, 4]]],
+ ['edgelist',
+ 'G147',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [3, 5], [6, 3]]],
+ ['edgelist',
+ 'G148',
+ 6,
+ [[2, 4], [3, 2], [1, 3], [6, 1], [5, 6], [4, 5], [2, 5], [1, 2]]],
+ ['edgelist',
+ 'G149',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [4, 1], [6, 3], [6, 1]]],
+ ['edgelist',
+ 'G150',
+ 6,
+ [[2, 5], [3, 5], [3, 4], [1, 5], [4, 2], [5, 6], [1, 6], [3, 2]]],
+ ['edgelist',
+ 'G151',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [5, 6], [6, 4], [2, 6]]],
+ ['edgelist',
+ 'G152',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [3, 5], [6, 2]]],
+ ['edgelist',
+ 'G153',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 6], [6, 3], [6, 1]]],
+ ['edgelist',
+ 'G154',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [5, 2], [6, 3]]],
+ ['edgelist',
+ 'G155',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 4], [5, 2], [3, 5], [1, 4]]],
+ ['edgelist',
+ 'G156',
+ 6,
+ [[1, 2], [3, 6], [1, 3], [4, 1], [4, 2], [4, 3], [3, 2], [6, 2], [5, 3]]],
+ ['edgelist',
+ 'G157',
+ 6,
+ [[1, 2], [3, 6], [1, 3], [4, 1], [4, 2], [4, 3], [3, 2], [6, 2], [1, 5]]],
+ ['edgelist',
+ 'G158',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [1, 4], [5, 1], [5, 2], [5, 3], [5, 4], [5, 6]]],
+ ['edgelist',
+ 'G159',
+ 6,
+ [[3, 1], [5, 2], [2, 3], [6, 5], [3, 6], [4, 2], [6, 4], [4, 3], [5, 4]]],
+ ['edgelist',
+ 'G160',
+ 6,
+ [[1, 2], [3, 6], [1, 3], [4, 1], [4, 2], [4, 3], [3, 2], [6, 2], [5, 6]]],
+ ['edgelist',
+ 'G161',
+ 6,
+ [[2, 6], [5, 2], [1, 5], [6, 1], [3, 6], [5, 3], [4, 5], [6, 4], [5, 6]]],
+ ['edgelist',
+ 'G162',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [3, 6], [1, 6], [3, 1], [4, 1]]],
+ ['edgelist',
+ 'G163',
+ 6,
+ [[2, 4], [3, 2], [1, 3], [6, 1], [5, 6], [4, 5], [1, 5], [2, 1], [5, 2]]],
+ ['edgelist',
+ 'G164',
+ 6,
+ [[2, 4], [3, 2], [1, 3], [6, 1], [5, 6], [4, 5], [5, 2], [2, 1], [6, 2]]],
+ ['edgelist',
+ 'G165',
+ 6,
+ [[1, 2], [2, 3], [1, 3], [4, 1], [4, 2], [4, 3], [6, 5], [5, 1], [6, 1]]],
+ ['edgelist',
+ 'G166',
+ 6,
+ [[5, 4], [1, 5], [2, 1], [3, 2], [4, 3], [1, 6], [6, 4], [1, 4], [2, 6]]],
+ ['edgelist',
+ 'G167',
+ 6,
+ [[2, 4], [3, 2], [1, 3], [6, 1], [5, 6], [4, 5], [4, 3], [1, 4], [5, 1]]],
+ ['edgelist',
+ 'G168',
+ 6,
+ [[2, 4], [3, 2], [1, 3], [6, 1], [5, 6], [4, 5], [4, 3], [1, 4], [3, 5]]],
+ ['edgelist',
+ 'G169',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [3, 6], [1, 6], [3, 1], [6, 2]]],
+ ['edgelist',
+ 'G170',
+ 6,
+ [[2, 6], [5, 2], [1, 5], [6, 1], [3, 6], [5, 3], [4, 5], [6, 4], [3, 1]]],
+ ['edgelist',
+ 'G171',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [6, 1], [6, 5], [6, 3], [6, 4]]],
+ ['edgelist',
+ 'G172',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [4, 1], [6, 3], [6, 1], [6, 2]]],
+ ['edgelist',
+ 'G173',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [1, 4], [5, 3], [6, 3]]],
+ ['edgelist',
+ 'G174',
+ 6,
+ [[3, 4], [1, 3], [4, 1], [5, 4], [2, 5], [6, 2], [5, 6], [2, 1], [6, 3]]],
+ ['edgelist',
+ 'G175',
+ 6,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [1, 4], [6, 3], [5, 2]]],
+ ['edgelist',
+ 'G176',
+ 6,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [1, 3]]],
+ ['edgelist',
+ 'G177',
+ 6,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [5, 6]]],
+ ['edgelist',
+ 'G178',
+ 6,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [1, 6]]],
+ ['edgelist',
+ 'G179',
+ 6,
+ [[2, 6],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [3, 6],
+ [5, 3],
+ [4, 5],
+ [6, 4],
+ [5, 6],
+ [2, 1]]],
+ ['edgelist',
+ 'G180',
+ 6,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [6, 5],
+ [4, 6],
+ [2, 6]]],
+ ['edgelist',
+ 'G181',
+ 6,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [4, 3],
+ [1, 4],
+ [5, 1],
+ [3, 5]]],
+ ['edgelist',
+ 'G182',
+ 6,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [4, 3],
+ [1, 4],
+ [3, 5],
+ [6, 3]]],
+ ['edgelist',
+ 'G183',
+ 6,
+ [[2, 1],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [2, 4],
+ [6, 2],
+ [3, 4],
+ [2, 3]]],
+ ['edgelist',
+ 'G184',
+ 6,
+ [[5, 4],
+ [1, 5],
+ [2, 1],
+ [3, 2],
+ [4, 3],
+ [1, 6],
+ [6, 4],
+ [1, 4],
+ [2, 6],
+ [6, 3]]],
+ ['edgelist',
+ 'G185',
+ 6,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [3, 6],
+ [1, 6],
+ [3, 1],
+ [6, 2],
+ [5, 2]]],
+ ['edgelist',
+ 'G186',
+ 6,
+ [[1, 2],
+ [3, 5],
+ [1, 3],
+ [5, 6],
+ [4, 2],
+ [4, 3],
+ [5, 2],
+ [6, 2],
+ [6, 3],
+ [6, 4]]],
+ ['edgelist',
+ 'G187',
+ 6,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 1],
+ [6, 2],
+ [6, 3],
+ [6, 4],
+ [6, 5]]],
+ ['edgelist',
+ 'G188',
+ 6,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [3, 5],
+ [1, 3],
+ [2, 4],
+ [6, 2]]],
+ ['edgelist',
+ 'G189',
+ 6,
+ [[4, 5],
+ [2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [3, 5],
+ [6, 2],
+ [4, 3],
+ [1, 4]]],
+ ['edgelist',
+ 'G190',
+ 6,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [5, 2],
+ [6, 4],
+ [3, 6],
+ [2, 1]]],
+ ['edgelist',
+ 'G191',
+ 6,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [1, 3],
+ [2, 6]]],
+ ['edgelist',
+ 'G192',
+ 6,
+ [[1, 2],
+ [3, 5],
+ [1, 3],
+ [3, 2],
+ [4, 2],
+ [4, 3],
+ [5, 2],
+ [6, 2],
+ [6, 3],
+ [6, 4],
+ [1, 4]]],
+ ['edgelist',
+ 'G193',
+ 6,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 1],
+ [5, 6]]],
+ ['edgelist',
+ 'G194',
+ 6,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [5, 6],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 4],
+ [1, 3]]],
+ ['edgelist',
+ 'G195',
+ 6,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [5, 2],
+ [6, 4],
+ [3, 6],
+ [2, 1],
+ [6, 2]]],
+ ['edgelist',
+ 'G196',
+ 6,
+ [[2, 4],
+ [5, 2],
+ [4, 5],
+ [3, 4],
+ [1, 3],
+ [5, 1],
+ [6, 5],
+ [3, 6],
+ [5, 3],
+ [1, 6],
+ [2, 6]]],
+ ['edgelist',
+ 'G197',
+ 6,
+ [[4, 5],
+ [2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [3, 5],
+ [6, 2],
+ [1, 4],
+ [2, 5],
+ [1, 2]]],
+ ['edgelist',
+ 'G198',
+ 6,
+ [[2, 6],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [3, 6],
+ [5, 3],
+ [4, 5],
+ [6, 4],
+ [1, 2],
+ [3, 1],
+ [4, 3]]],
+ ['edgelist',
+ 'G199',
+ 6,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [3, 5],
+ [6, 3],
+ [2, 6],
+ [2, 5],
+ [1, 4]]],
+ ['edgelist',
+ 'G200',
+ 6,
+ [[1, 2],
+ [2, 3],
+ [1, 3],
+ [3, 4],
+ [5, 6],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 4],
+ [5, 4]]],
+ ['edgelist',
+ 'G201',
+ 6,
+ [[4, 3],
+ [2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [3, 6],
+ [3, 5],
+ [6, 2],
+ [1, 4],
+ [2, 5],
+ [1, 2],
+ [1, 5]]],
+ ['edgelist',
+ 'G202',
+ 6,
+ [[2, 6],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [3, 6],
+ [5, 3],
+ [4, 5],
+ [6, 4],
+ [1, 2],
+ [3, 1],
+ [4, 3],
+ [5, 6]]],
+ ['edgelist',
+ 'G203',
+ 6,
+ [[4, 5],
+ [2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [3, 5],
+ [6, 2],
+ [1, 4],
+ [2, 5],
+ [1, 2],
+ [3, 4]]],
+ ['edgelist',
+ 'G204',
+ 6,
+ [[1, 2],
+ [2, 3],
+ [1, 3],
+ [4, 3],
+ [4, 2],
+ [5, 1],
+ [3, 5],
+ [6, 2],
+ [1, 6],
+ [5, 6],
+ [4, 5],
+ [6, 4]]],
+ ['edgelist',
+ 'G205',
+ 6,
+ [[4, 5],
+ [2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [3, 5],
+ [6, 2],
+ [1, 4],
+ [2, 5],
+ [1, 2],
+ [3, 4],
+ [1, 5]]],
+ ['edgelist',
+ 'G206',
+ 6,
+ [[1, 2],
+ [2, 3],
+ [1, 3],
+ [4, 3],
+ [4, 2],
+ [5, 1],
+ [3, 5],
+ [6, 2],
+ [1, 6],
+ [5, 6],
+ [4, 5],
+ [6, 4],
+ [4, 1]]],
+ ['edgelist',
+ 'G207',
+ 6,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [3, 5],
+ [6, 3],
+ [2, 6],
+ [2, 5],
+ [2, 4],
+ [3, 1],
+ [5, 1],
+ [6, 4]]],
+ ['edgelist',
+ 'G208',
+ 6,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [4, 5],
+ [4, 6],
+ [5, 6]]],
+ ['edgelist', 'G209', 7, []],
+ ['edgelist', 'G210', 7, [[7, 6]]],
+ ['edgelist', 'G211', 7, [[3, 4], [2, 3]]],
+ ['edgelist', 'G212', 7, [[6, 5], [7, 1]]],
+ ['edgelist', 'G213', 7, [[1, 5], [5, 3], [3, 1]]],
+ ['edgelist', 'G214', 7, [[1, 2], [1, 7], [1, 6]]],
+ ['edgelist', 'G215', 7, [[6, 5], [7, 1], [6, 7]]],
+ ['edgelist', 'G216', 7, [[4, 3], [2, 3], [6, 7]]],
+ ['edgelist', 'G217', 7, [[4, 2], [6, 7], [1, 5]]],
+ ['edgelist', 'G218', 7, [[3, 6], [7, 3], [6, 7], [2, 3]]],
+ ['edgelist', 'G219', 7, [[2, 3], [5, 2], [6, 5], [3, 6]]],
+ ['edgelist', 'G220', 7, [[2, 1], [6, 2], [2, 3], [5, 2]]],
+ ['edgelist', 'G221', 7, [[2, 1], [3, 2], [6, 3], [7, 3]]],
+ ['edgelist', 'G222', 7, [[4, 5], [3, 4], [2, 3], [1, 2]]],
+ ['edgelist', 'G223', 7, [[5, 3], [1, 5], [3, 1], [6, 7]]],
+ ['edgelist', 'G224', 7, [[1, 2], [7, 1], [1, 6], [5, 3]]],
+ ['edgelist', 'G225', 7, [[4, 2], [6, 5], [7, 6], [1, 7]]],
+ ['edgelist', 'G226', 7, [[1, 5], [4, 1], [3, 6], [7, 3]]],
+ ['edgelist', 'G227', 7, [[3, 4], [2, 3], [7, 1], [6, 5]]],
+ ['edgelist', 'G228', 7, [[1, 5], [4, 1], [2, 4], [5, 2], [2, 1]]],
+ ['edgelist', 'G229', 7, [[3, 6], [7, 3], [6, 7], [5, 3], [4, 3]]],
+ ['edgelist', 'G230', 7, [[5, 3], [5, 1], [3, 1], [6, 5], [7, 1]]],
+ ['edgelist', 'G231', 7, [[3, 6], [7, 3], [6, 7], [2, 3], [1, 2]]],
+ ['edgelist', 'G232', 7, [[5, 2], [1, 5], [4, 1], [2, 4], [3, 2]]],
+ ['edgelist', 'G233', 7, [[2, 3], [1, 2], [5, 1], [4, 5], [3, 4]]],
+ ['edgelist', 'G234', 7, [[6, 2], [6, 1], [3, 6], [4, 6], [5, 6]]],
+ ['edgelist', 'G235', 7, [[2, 6], [7, 2], [2, 1], [3, 2], [4, 3]]],
+ ['edgelist', 'G236', 7, [[2, 6], [5, 2], [3, 4], [7, 3], [3, 2]]],
+ ['edgelist', 'G237', 7, [[2, 6], [7, 2], [2, 3], [3, 4], [5, 4]]],
+ ['edgelist', 'G238', 7, [[3, 2], [4, 3], [5, 4], [6, 5], [4, 7]]],
+ ['edgelist', 'G239', 7, [[7, 6], [3, 7], [2, 3], [6, 3], [4, 5]]],
+ ['edgelist', 'G240', 7, [[5, 4], [6, 5], [7, 6], [1, 7], [2, 1]]],
+ ['edgelist', 'G241', 7, [[1, 5], [4, 1], [3, 6], [7, 3], [6, 7]]],
+ ['edgelist', 'G242', 7, [[5, 2], [6, 3], [7, 6], [4, 7], [3, 4]]],
+ ['edgelist', 'G243', 7, [[2, 5], [4, 2], [2, 1], [3, 2], [7, 6]]],
+ ['edgelist', 'G244', 7, [[1, 5], [4, 1], [2, 1], [3, 2], [7, 6]]],
+ ['edgelist', 'G245', 7, [[1, 5], [4, 1], [3, 2], [6, 3], [7, 3]]],
+ ['edgelist', 'G246', 7, [[7, 6], [4, 5], [3, 4], [2, 3], [1, 2]]],
+ ['edgelist', 'G247', 7, [[3, 4], [2, 3], [7, 1], [6, 7], [6, 5]]],
+ ['edgelist', 'G248', 7, [[1, 2], [5, 7], [6, 5], [4, 3], [7, 6]]],
+ ['edgelist', 'G249', 7, [[2, 6], [7, 2], [6, 7], [3, 6], [2, 3], [7, 3]]],
+ ['edgelist', 'G250', 7, [[2, 5], [4, 2], [3, 4], [5, 3], [2, 1], [3, 2]]],
+ ['edgelist', 'G251', 7, [[1, 5], [4, 1], [2, 4], [3, 2], [2, 5], [4, 5]]],
+ ['edgelist', 'G252', 7, [[6, 3], [5, 6], [3, 5], [4, 3], [7, 4], [3, 7]]],
+ ['edgelist', 'G253', 7, [[2, 3], [5, 2], [6, 5], [3, 6], [1, 2], [5, 1]]],
+ ['edgelist', 'G254', 7, [[2, 3], [6, 2], [5, 6], [3, 5], [1, 3], [6, 1]]],
+ ['edgelist', 'G255', 7, [[3, 6], [7, 3], [6, 7], [3, 5], [2, 3], [4, 3]]],
+ ['edgelist', 'G256', 7, [[2, 5], [4, 2], [3, 4], [2, 3], [3, 6], [7, 3]]],
+ ['edgelist', 'G257', 7, [[6, 5], [7, 6], [2, 7], [6, 2], [4, 7], [1, 2]]],
+ ['edgelist', 'G258', 7, [[7, 6], [2, 7], [6, 2], [4, 2], [1, 4], [2, 5]]],
+ ['edgelist', 'G259', 7, [[1, 5], [4, 1], [3, 4], [5, 3], [3, 6], [7, 3]]],
+ ['edgelist', 'G260', 7, [[2, 5], [4, 2], [3, 4], [2, 3], [3, 6], [7, 6]]],
+ ['edgelist', 'G261', 7, [[3, 4], [2, 3], [4, 7], [6, 5], [7, 6], [6, 3]]],
+ ['edgelist', 'G262', 7, [[3, 6], [7, 3], [6, 7], [2, 5], [4, 2], [3, 2]]],
+ ['edgelist', 'G263', 7, [[5, 6], [1, 5], [4, 1], [3, 4], [5, 3], [7, 4]]],
+ ['edgelist', 'G264', 7, [[1, 5], [4, 1], [2, 4], [7, 6], [2, 5], [2, 1]]],
+ ['edgelist', 'G265', 7, [[2, 5], [4, 2], [3, 4], [6, 3], [7, 6], [3, 7]]],
+ ['edgelist', 'G266', 7, [[7, 4], [6, 7], [5, 6], [2, 5], [3, 2], [6, 3]]],
+ ['edgelist', 'G267', 7, [[2, 1], [4, 2], [7, 4], [6, 7], [5, 6], [2, 5]]],
+ ['edgelist', 'G268', 7, [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6]]],
+ ['edgelist', 'G269', 7, [[1, 5], [4, 1], [5, 4], [3, 6], [7, 3], [6, 7]]],
+ ['edgelist', 'G270', 7, [[7, 4], [1, 7], [7, 3], [6, 7], [7, 2], [5, 7]]],
+ ['edgelist', 'G271', 7, [[3, 5], [6, 3], [3, 4], [7, 3], [2, 3], [2, 1]]],
+ ['edgelist', 'G272', 7, [[2, 1], [3, 2], [6, 3], [2, 5], [4, 2], [7, 3]]],
+ ['edgelist', 'G273', 7, [[2, 1], [3, 2], [4, 7], [2, 4], [5, 2], [6, 5]]],
+ ['edgelist', 'G274', 7, [[2, 1], [3, 2], [6, 3], [7, 6], [2, 5], [4, 2]]],
+ ['edgelist', 'G275', 7, [[2, 1], [3, 5], [6, 3], [7, 6], [3, 7], [4, 3]]],
+ ['edgelist', 'G276', 7, [[5, 1], [2, 5], [4, 2], [3, 2], [6, 3], [7, 3]]],
+ ['edgelist', 'G277', 7, [[7, 6], [2, 3], [1, 2], [3, 1], [4, 3], [1, 5]]],
+ ['edgelist', 'G278', 7, [[1, 5], [4, 1], [2, 1], [3, 2], [6, 3], [7, 3]]],
+ ['edgelist', 'G279', 7, [[2, 1], [4, 2], [7, 4], [3, 7], [5, 2], [6, 5]]],
+ ['edgelist', 'G280', 7, [[3, 6], [7, 3], [5, 3], [2, 5], [4, 2], [1, 4]]],
+ ['edgelist', 'G281', 7, [[1, 5], [4, 1], [3, 4], [5, 3], [2, 3], [7, 6]]],
+ ['edgelist', 'G282', 7, [[1, 5], [4, 1], [3, 2], [6, 3], [7, 6], [3, 7]]],
+ ['edgelist', 'G283', 7, [[4, 5], [2, 1], [3, 2], [6, 3], [7, 6], [3, 7]]],
+ ['edgelist', 'G284', 7, [[5, 6], [1, 5], [4, 1], [7, 4], [2, 1], [3, 2]]],
+ ['edgelist', 'G285', 7, [[3, 6], [7, 3], [6, 7], [2, 5], [4, 2], [2, 1]]],
+ ['edgelist', 'G286', 7, [[5, 6], [4, 5], [3, 4], [2, 3], [1, 2], [7, 1]]],
+ ['edgelist', 'G287', 7, [[7, 5], [6, 7], [5, 6], [3, 4], [2, 3], [1, 2]]],
+ ['edgelist', 'G288', 7, [[1, 2], [5, 1], [3, 4], [6, 3], [7, 6], [4, 7]]],
+ ['edgelist', 'G289', 7, [[2, 3], [1, 2], [5, 1], [4, 5], [3, 4], [7, 6]]],
+ ['edgelist',
+ 'G290',
+ 7,
+ [[2, 5], [4, 2], [3, 4], [5, 3], [2, 1], [3, 2], [4, 5]]],
+ ['edgelist',
+ 'G291',
+ 7,
+ [[2, 3], [6, 2], [5, 6], [3, 5], [1, 3], [6, 1], [6, 3]]],
+ ['edgelist',
+ 'G292',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 4], [5, 2]]],
+ ['edgelist',
+ 'G293',
+ 7,
+ [[2, 3], [6, 2], [5, 6], [3, 5], [1, 3], [6, 1], [2, 1]]],
+ ['edgelist',
+ 'G294',
+ 7,
+ [[1, 5], [4, 1], [3, 4], [5, 3], [3, 6], [7, 3], [3, 1]]],
+ ['edgelist',
+ 'G295',
+ 7,
+ [[2, 5], [4, 2], [3, 4], [5, 3], [2, 1], [3, 2], [3, 7]]],
+ ['edgelist',
+ 'G296',
+ 7,
+ [[2, 5], [4, 2], [3, 4], [5, 3], [2, 1], [4, 5], [7, 4]]],
+ ['edgelist',
+ 'G297',
+ 7,
+ [[1, 5], [4, 1], [3, 4], [5, 3], [3, 6], [7, 3], [4, 5]]],
+ ['edgelist',
+ 'G298',
+ 7,
+ [[1, 5], [4, 1], [2, 4], [4, 7], [2, 5], [2, 1], [6, 5]]],
+ ['edgelist',
+ 'G299',
+ 7,
+ [[1, 5], [4, 1], [2, 4], [7, 6], [2, 5], [2, 1], [4, 5]]],
+ ['edgelist',
+ 'G300',
+ 7,
+ [[6, 3], [5, 6], [3, 5], [4, 3], [7, 4], [3, 7], [3, 2]]],
+ ['edgelist',
+ 'G301',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [1, 3], [3, 6]]],
+ ['edgelist',
+ 'G302',
+ 7,
+ [[6, 3], [5, 6], [3, 5], [4, 3], [7, 4], [3, 7], [4, 2]]],
+ ['edgelist',
+ 'G303',
+ 7,
+ [[2, 5], [4, 2], [3, 4], [5, 3], [3, 1], [3, 2], [7, 1]]],
+ ['edgelist',
+ 'G304',
+ 7,
+ [[2, 3], [6, 2], [5, 6], [3, 5], [1, 3], [6, 1], [4, 6]]],
+ ['edgelist',
+ 'G305',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [1, 3], [4, 6]]],
+ ['edgelist',
+ 'G306',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [1, 3], [2, 6]]],
+ ['edgelist',
+ 'G307',
+ 7,
+ [[4, 3], [5, 4], [4, 6], [3, 5], [6, 3], [7, 2], [7, 5]]],
+ ['edgelist',
+ 'G308',
+ 7,
+ [[2, 3], [6, 2], [5, 6], [3, 5], [1, 3], [6, 1], [1, 4]]],
+ ['edgelist',
+ 'G309',
+ 7,
+ [[4, 5], [2, 4], [3, 2], [7, 3], [6, 7], [2, 6], [5, 2]]],
+ ['edgelist',
+ 'G310',
+ 7,
+ [[1, 2], [5, 1], [2, 5], [3, 2], [4, 3], [6, 4], [5, 6]]],
+ ['edgelist',
+ 'G311',
+ 7,
+ [[7, 4], [6, 7], [2, 6], [3, 2], [4, 3], [5, 3], [6, 5]]],
+ ['edgelist',
+ 'G312',
+ 7,
+ [[2, 3], [5, 2], [6, 5], [7, 6], [4, 7], [3, 4], [6, 3]]],
+ ['edgelist',
+ 'G313',
+ 7,
+ [[5, 2], [4, 5], [2, 4], [3, 2], [7, 3], [6, 7], [3, 6]]],
+ ['edgelist',
+ 'G314',
+ 7,
+ [[4, 1], [7, 4], [1, 7], [2, 1], [1, 3], [6, 1], [1, 5]]],
+ ['edgelist',
+ 'G315',
+ 7,
+ [[2, 6], [7, 2], [2, 3], [4, 2], [5, 4], [2, 5], [5, 1]]],
+ ['edgelist',
+ 'G316',
+ 7,
+ [[6, 1], [7, 6], [1, 7], [6, 3], [2, 6], [7, 4], [5, 7]]],
+ ['edgelist',
+ 'G317',
+ 7,
+ [[5, 2], [1, 5], [2, 1], [3, 2], [1, 4], [7, 1], [5, 6]]],
+ ['edgelist',
+ 'G318',
+ 7,
+ [[6, 3], [7, 6], [3, 7], [3, 5], [4, 3], [2, 1], [3, 2]]],
+ ['edgelist',
+ 'G319',
+ 7,
+ [[5, 2], [1, 5], [4, 1], [2, 4], [3, 2], [2, 6], [7, 2]]],
+ ['edgelist',
+ 'G320',
+ 7,
+ [[2, 1], [5, 2], [1, 5], [6, 5], [3, 2], [4, 3], [7, 2]]],
+ ['edgelist',
+ 'G321',
+ 7,
+ [[1, 2], [5, 1], [2, 5], [3, 2], [4, 3], [6, 5], [7, 5]]],
+ ['edgelist',
+ 'G322',
+ 7,
+ [[3, 4], [6, 3], [7, 6], [4, 7], [2, 3], [5, 6], [1, 6]]],
+ ['edgelist',
+ 'G323',
+ 7,
+ [[1, 5], [4, 1], [2, 4], [5, 2], [2, 1], [3, 2], [7, 6]]],
+ ['edgelist',
+ 'G324',
+ 7,
+ [[3, 6], [7, 3], [6, 7], [5, 3], [2, 3], [1, 2], [4, 2]]],
+ ['edgelist',
+ 'G325',
+ 7,
+ [[3, 6], [7, 3], [5, 3], [2, 5], [4, 2], [3, 4], [1, 2]]],
+ ['edgelist',
+ 'G326',
+ 7,
+ [[7, 3], [6, 7], [3, 6], [2, 3], [1, 2], [5, 2], [4, 2]]],
+ ['edgelist',
+ 'G327',
+ 7,
+ [[1, 5], [4, 1], [2, 4], [5, 2], [6, 5], [3, 2], [7, 4]]],
+ ['edgelist',
+ 'G328',
+ 7,
+ [[3, 6], [7, 3], [6, 7], [5, 6], [4, 7], [2, 3], [1, 2]]],
+ ['edgelist',
+ 'G329',
+ 7,
+ [[3, 6], [7, 3], [2, 5], [2, 3], [1, 2], [5, 1], [1, 4]]],
+ ['edgelist',
+ 'G330',
+ 7,
+ [[7, 6], [2, 3], [5, 2], [1, 5], [4, 1], [2, 4], [4, 5]]],
+ ['edgelist',
+ 'G331',
+ 7,
+ [[5, 2], [1, 5], [2, 1], [4, 7], [3, 4], [1, 3], [6, 1]]],
+ ['edgelist',
+ 'G332',
+ 7,
+ [[5, 2], [1, 5], [4, 1], [2, 4], [3, 2], [6, 3], [7, 2]]],
+ ['edgelist',
+ 'G333',
+ 7,
+ [[5, 2], [1, 5], [2, 1], [3, 4], [1, 3], [6, 1], [7, 6]]],
+ ['edgelist',
+ 'G334',
+ 7,
+ [[1, 2], [6, 1], [7, 6], [4, 7], [3, 4], [1, 3], [5, 1]]],
+ ['edgelist',
+ 'G335',
+ 7,
+ [[2, 1], [5, 2], [3, 5], [4, 3], [5, 4], [1, 5], [7, 6]]],
+ ['edgelist',
+ 'G336',
+ 7,
+ [[4, 7], [3, 4], [2, 3], [1, 2], [5, 1], [2, 5], [6, 5]]],
+ ['edgelist',
+ 'G337',
+ 7,
+ [[2, 1], [6, 2], [7, 6], [3, 7], [2, 3], [4, 3], [5, 4]]],
+ ['edgelist',
+ 'G338',
+ 7,
+ [[3, 4], [2, 3], [1, 2], [5, 1], [6, 5], [7, 6], [5, 2]]],
+ ['edgelist',
+ 'G339',
+ 7,
+ [[6, 3], [7, 6], [3, 7], [2, 3], [5, 2], [1, 5], [4, 2]]],
+ ['edgelist',
+ 'G340',
+ 7,
+ [[3, 4], [2, 3], [1, 2], [5, 1], [6, 5], [7, 6], [6, 3]]],
+ ['edgelist',
+ 'G341',
+ 7,
+ [[2, 5], [1, 2], [3, 1], [4, 3], [6, 4], [1, 6], [7, 4]]],
+ ['edgelist',
+ 'G342',
+ 7,
+ [[3, 2], [4, 3], [7, 4], [6, 7], [1, 6], [3, 1], [6, 5]]],
+ ['edgelist',
+ 'G343',
+ 7,
+ [[6, 3], [7, 6], [3, 7], [2, 3], [1, 2], [5, 1], [4, 1]]],
+ ['edgelist',
+ 'G344',
+ 7,
+ [[5, 2], [1, 5], [4, 1], [2, 4], [3, 2], [6, 3], [7, 3]]],
+ ['edgelist',
+ 'G345',
+ 7,
+ [[2, 1], [3, 2], [6, 3], [5, 6], [1, 5], [5, 2], [7, 4]]],
+ ['edgelist',
+ 'G346',
+ 7,
+ [[3, 6], [7, 3], [1, 5], [4, 1], [2, 4], [5, 2], [2, 1]]],
+ ['edgelist',
+ 'G347',
+ 7,
+ [[7, 6], [1, 5], [4, 1], [2, 4], [5, 2], [3, 5], [4, 3]]],
+ ['edgelist',
+ 'G348',
+ 7,
+ [[3, 2], [6, 3], [5, 6], [1, 5], [4, 1], [7, 4], [3, 7]]],
+ ['edgelist',
+ 'G349',
+ 7,
+ [[5, 1], [4, 5], [2, 4], [3, 2], [6, 3], [7, 6], [3, 7]]],
+ ['edgelist',
+ 'G350',
+ 7,
+ [[7, 6], [3, 7], [2, 3], [5, 2], [1, 5], [4, 1], [2, 4]]],
+ ['edgelist',
+ 'G351',
+ 7,
+ [[5, 2], [1, 5], [3, 1], [4, 3], [7, 4], [6, 7], [1, 6]]],
+ ['edgelist',
+ 'G352',
+ 7,
+ [[1, 5], [4, 1], [5, 4], [3, 2], [6, 3], [7, 6], [3, 7]]],
+ ['edgelist',
+ 'G353',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [1, 7]]],
+ ['edgelist',
+ 'G354',
+ 7,
+ [[2, 1], [5, 2], [1, 5], [6, 3], [7, 6], [4, 7], [3, 4]]],
+ ['edgelist',
+ 'G355',
+ 7,
+ [[1, 2], [5, 1], [6, 5], [3, 6], [2, 3], [6, 2], [5, 2], [3, 5]]],
+ ['edgelist',
+ 'G356',
+ 7,
+ [[5, 2], [6, 5], [3, 6], [2, 3], [1, 2], [6, 1], [1, 5], [3, 1]]],
+ ['edgelist',
+ 'G357',
+ 7,
+ [[1, 5], [4, 1], [2, 4], [5, 2], [2, 1], [4, 5], [6, 2], [7, 2]]],
+ ['edgelist',
+ 'G358',
+ 7,
+ [[5, 2], [6, 5], [3, 6], [2, 3], [6, 2], [7, 6], [3, 5], [4, 3]]],
+ ['edgelist',
+ 'G359',
+ 7,
+ [[2, 4], [1, 2], [5, 1], [3, 5], [2, 3], [5, 2], [6, 5], [2, 6]]],
+ ['edgelist',
+ 'G360',
+ 7,
+ [[3, 1], [4, 3], [7, 4], [6, 7], [1, 6], [4, 1], [1, 7], [5, 1]]],
+ ['edgelist',
+ 'G361',
+ 7,
+ [[2, 1], [3, 2], [6, 3], [5, 6], [1, 5], [3, 1], [6, 1], [7, 6]]],
+ ['edgelist',
+ 'G362',
+ 7,
+ [[2, 1], [3, 2], [4, 3], [2, 4], [5, 4], [3, 5], [6, 3], [4, 6]]],
+ ['edgelist',
+ 'G363',
+ 7,
+ [[3, 1], [4, 3], [7, 4], [6, 7], [1, 6], [4, 1], [7, 1], [5, 6]]],
+ ['edgelist',
+ 'G364',
+ 7,
+ [[2, 1], [3, 2], [5, 4], [2, 6], [5, 2], [3, 5], [6, 3], [4, 6]]],
+ ['edgelist',
+ 'G365',
+ 7,
+ [[4, 6], [3, 2], [5, 4], [2, 6], [5, 2], [3, 5], [6, 3], [5, 7]]],
+ ['edgelist',
+ 'G366',
+ 7,
+ [[1, 5], [4, 1], [2, 4], [5, 2], [2, 1], [4, 5], [3, 2], [6, 3]]],
+ ['edgelist',
+ 'G367',
+ 7,
+ [[4, 6], [3, 2], [5, 4], [2, 6], [5, 2], [3, 5], [6, 3], [1, 4]]],
+ ['edgelist',
+ 'G368',
+ 7,
+ [[5, 1], [3, 5], [1, 3], [4, 1], [3, 4], [6, 3], [7, 6], [3, 7]]],
+ ['edgelist',
+ 'G369',
+ 7,
+ [[4, 3], [7, 4], [6, 7], [3, 6], [1, 3], [6, 1], [5, 6], [3, 5]]],
+ ['edgelist',
+ 'G370',
+ 7,
+ [[1, 6], [5, 1], [3, 5], [6, 3], [2, 6], [5, 2], [4, 5], [6, 4]]],
+ ['edgelist',
+ 'G371',
+ 7,
+ [[3, 4], [2, 3], [5, 2], [6, 5], [2, 6], [6, 3], [7, 6], [4, 7]]],
+ ['edgelist',
+ 'G372',
+ 7,
+ [[6, 3], [5, 6], [1, 5], [4, 1], [7, 4], [3, 7], [5, 3], [4, 3]]],
+ ['edgelist',
+ 'G373',
+ 7,
+ [[1, 5], [4, 1], [2, 4], [5, 2], [3, 5], [4, 3], [6, 5], [3, 6]]],
+ ['edgelist',
+ 'G374',
+ 7,
+ [[6, 7], [3, 6], [7, 3], [4, 3], [5, 4], [1, 5], [4, 1], [3, 5]]],
+ ['edgelist',
+ 'G375',
+ 7,
+ [[2, 1], [6, 1], [4, 3], [2, 4], [6, 3], [7, 2], [7, 3], [7, 6]]],
+ ['edgelist',
+ 'G376',
+ 7,
+ [[6, 5], [7, 6], [4, 7], [1, 4], [5, 1], [3, 5], [4, 3], [1, 3]]],
+ ['edgelist',
+ 'G377',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [5, 3], [2, 6]]],
+ ['edgelist',
+ 'G378',
+ 7,
+ [[6, 1], [7, 3], [1, 7], [2, 1], [3, 2], [6, 3], [5, 6], [5, 7]]],
+ ['edgelist',
+ 'G379',
+ 7,
+ [[1, 5], [4, 1], [2, 4], [5, 2], [2, 1], [3, 2], [2, 6], [7, 2]]],
+ ['edgelist',
+ 'G380',
+ 7,
+ [[1, 3], [5, 1], [2, 5], [1, 2], [4, 1], [2, 4], [6, 2], [7, 2]]],
+ ['edgelist',
+ 'G381',
+ 7,
+ [[5, 3], [1, 5], [4, 1], [2, 4], [5, 2], [2, 1], [2, 6], [7, 2]]],
+ ['edgelist',
+ 'G382',
+ 7,
+ [[1, 5], [4, 1], [5, 4], [2, 5], [4, 2], [2, 6], [3, 2], [7, 2]]],
+ ['edgelist',
+ 'G383',
+ 7,
+ [[3, 2], [1, 3], [4, 1], [6, 4], [3, 6], [4, 3], [5, 4], [7, 6]]],
+ ['edgelist',
+ 'G384',
+ 7,
+ [[5, 3], [1, 5], [4, 1], [2, 4], [5, 2], [4, 5], [2, 6], [7, 2]]],
+ ['edgelist',
+ 'G385',
+ 7,
+ [[3, 2], [1, 3], [4, 1], [6, 4], [3, 6], [7, 6], [5, 4], [6, 1]]],
+ ['edgelist',
+ 'G386',
+ 7,
+ [[2, 1], [3, 2], [4, 3], [2, 4], [5, 3], [4, 5], [5, 6], [7, 5]]],
+ ['edgelist',
+ 'G387',
+ 7,
+ [[7, 6], [2, 3], [5, 2], [1, 5], [4, 1], [2, 4], [1, 2], [4, 5]]],
+ ['edgelist',
+ 'G388',
+ 7,
+ [[1, 2], [7, 6], [3, 4], [7, 5], [7, 4], [7, 3], [7, 1], [7, 2]]],
+ ['edgelist',
+ 'G389',
+ 7,
+ [[7, 5], [2, 3], [3, 4], [7, 6], [5, 6], [7, 3], [7, 1], [7, 2]]],
+ ['edgelist',
+ 'G390',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [7, 6], [7, 5], [7, 4], [7, 1], [7, 3]]],
+ ['edgelist',
+ 'G391',
+ 7,
+ [[1, 5], [4, 1], [2, 4], [5, 2], [2, 1], [7, 2], [6, 2], [3, 6]]],
+ ['edgelist',
+ 'G392',
+ 7,
+ [[4, 1], [3, 4], [5, 3], [1, 5], [2, 1], [3, 2], [6, 3], [7, 3]]],
+ ['edgelist',
+ 'G393',
+ 7,
+ [[3, 2], [4, 3], [7, 4], [6, 7], [1, 6], [3, 1], [6, 3], [5, 6]]],
+ ['edgelist',
+ 'G394',
+ 7,
+ [[2, 1], [3, 2], [4, 3], [5, 4], [6, 3], [2, 6], [7, 2], [3, 7]]],
+ ['edgelist',
+ 'G395',
+ 7,
+ [[3, 6], [5, 3], [2, 5], [4, 2], [1, 4], [2, 1], [3, 2], [7, 3]]],
+ ['edgelist',
+ 'G396',
+ 7,
+ [[5, 6], [1, 5], [4, 1], [3, 4], [5, 3], [2, 5], [4, 2], [7, 4]]],
+ ['edgelist',
+ 'G397',
+ 7,
+ [[1, 2], [5, 1], [2, 5], [3, 2], [5, 3], [6, 5], [2, 6], [7, 4]]],
+ ['edgelist',
+ 'G398',
+ 7,
+ [[1, 2], [3, 1], [4, 3], [5, 4], [2, 5], [3, 2], [2, 7], [6, 1]]],
+ ['edgelist',
+ 'G399',
+ 7,
+ [[5, 6], [1, 5], [2, 1], [5, 2], [4, 1], [2, 4], [7, 2], [3, 7]]],
+ ['edgelist',
+ 'G400',
+ 7,
+ [[3, 6], [5, 3], [1, 5], [2, 1], [5, 2], [4, 1], [2, 4], [7, 2]]],
+ ['edgelist',
+ 'G401',
+ 7,
+ [[2, 7], [3, 2], [1, 3], [2, 1], [5, 2], [4, 5], [3, 4], [5, 6]]],
+ ['edgelist',
+ 'G402',
+ 7,
+ [[1, 2], [3, 1], [4, 3], [5, 4], [2, 5], [3, 2], [2, 7], [6, 4]]],
+ ['edgelist',
+ 'G403',
+ 7,
+ [[1, 5], [4, 1], [5, 4], [2, 5], [4, 2], [6, 2], [7, 3], [2, 7]]],
+ ['edgelist',
+ 'G404',
+ 7,
+ [[3, 4], [2, 3], [1, 2], [6, 1], [5, 6], [1, 5], [3, 1], [7, 6]]],
+ ['edgelist',
+ 'G405',
+ 7,
+ [[5, 6], [1, 5], [4, 1], [2, 4], [5, 2], [3, 5], [4, 3], [7, 3]]],
+ ['edgelist',
+ 'G406',
+ 7,
+ [[3, 4], [2, 3], [1, 2], [5, 1], [6, 5], [5, 2], [3, 7], [6, 3]]],
+ ['edgelist',
+ 'G407',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [7, 4], [5, 6], [7, 3], [7, 1], [7, 2]]],
+ ['edgelist',
+ 'G408',
+ 7,
+ [[5, 2], [1, 5], [4, 1], [2, 4], [1, 2], [3, 2], [6, 3], [7, 3]]],
+ ['edgelist',
+ 'G409',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [7, 6], [5, 6], [7, 3], [7, 5], [7, 2]]],
+ ['edgelist',
+ 'G410',
+ 7,
+ [[1, 2], [5, 1], [1, 3], [6, 1], [7, 6], [4, 7], [3, 4], [6, 3]]],
+ ['edgelist',
+ 'G411',
+ 7,
+ [[1, 5], [4, 1], [3, 4], [5, 3], [2, 5], [4, 2], [3, 6], [7, 3]]],
+ ['edgelist',
+ 'G412',
+ 7,
+ [[5, 6], [4, 5], [2, 4], [3, 2], [7, 3], [5, 7], [4, 3], [1, 2]]],
+ ['edgelist',
+ 'G413',
+ 7,
+ [[2, 1], [3, 7], [4, 3], [5, 4], [6, 3], [2, 6], [7, 2], [7, 6]]],
+ ['edgelist',
+ 'G414',
+ 7,
+ [[3, 4], [2, 3], [1, 2], [5, 1], [6, 5], [7, 6], [6, 3], [5, 2]]],
+ ['edgelist',
+ 'G415',
+ 7,
+ [[5, 2], [1, 5], [4, 1], [2, 4], [4, 5], [3, 2], [3, 6], [7, 3]]],
+ ['edgelist',
+ 'G416',
+ 7,
+ [[1, 7], [5, 1], [2, 5], [4, 2], [1, 4], [3, 5], [4, 3], [6, 3]]],
+ ['edgelist',
+ 'G417',
+ 7,
+ [[1, 5], [4, 1], [2, 4], [5, 2], [3, 5], [4, 3], [2, 1], [7, 6]]],
+ ['edgelist',
+ 'G418',
+ 7,
+ [[1, 2], [5, 1], [4, 3], [7, 4], [6, 7], [3, 6], [7, 3], [4, 6]]],
+ ['edgelist',
+ 'G419',
+ 7,
+ [[6, 3], [7, 6], [3, 7], [5, 3], [1, 5], [4, 1], [3, 4], [2, 3]]],
+ ['edgelist',
+ 'G420',
+ 7,
+ [[3, 1], [2, 3], [1, 2], [6, 1], [5, 6], [1, 5], [7, 1], [4, 7]]],
+ ['edgelist',
+ 'G421',
+ 7,
+ [[1, 2], [3, 1], [4, 3], [3, 2], [2, 5], [6, 5], [6, 4], [2, 7]]],
+ ['edgelist',
+ 'G422',
+ 7,
+ [[2, 7], [3, 2], [1, 3], [2, 1], [5, 2], [4, 5], [3, 4], [6, 7]]],
+ ['edgelist',
+ 'G423',
+ 7,
+ [[7, 2], [1, 7], [2, 1], [6, 2], [1, 6], [3, 2], [4, 3], [5, 4]]],
+ ['edgelist',
+ 'G424',
+ 7,
+ [[7, 6], [3, 7], [2, 3], [5, 2], [4, 5], [1, 4], [5, 1], [3, 5]]],
+ ['edgelist',
+ 'G425',
+ 7,
+ [[2, 7], [1, 2], [6, 1], [2, 6], [4, 1], [5, 4], [3, 5], [1, 3]]],
+ ['edgelist',
+ 'G426',
+ 7,
+ [[3, 7], [5, 3], [1, 5], [2, 1], [5, 2], [4, 5], [6, 4], [3, 6]]],
+ ['edgelist',
+ 'G427',
+ 7,
+ [[2, 1], [3, 2], [7, 3], [6, 7], [2, 6], [5, 2], [4, 5], [3, 4]]],
+ ['edgelist',
+ 'G428',
+ 7,
+ [[7, 2], [5, 4], [2, 1], [6, 2], [4, 3], [3, 2], [5, 7], [6, 5]]],
+ ['edgelist',
+ 'G429',
+ 7,
+ [[5, 3], [1, 5], [2, 1], [5, 2], [4, 5], [7, 4], [6, 7], [4, 6]]],
+ ['edgelist',
+ 'G430',
+ 7,
+ [[5, 2], [3, 5], [1, 3], [7, 1], [4, 7], [1, 4], [6, 1], [5, 6]]],
+ ['edgelist',
+ 'G431',
+ 7,
+ [[6, 7], [5, 6], [1, 5], [4, 1], [3, 4], [5, 3], [2, 5], [4, 2]]],
+ ['edgelist',
+ 'G432',
+ 7,
+ [[7, 4], [6, 7], [5, 6], [1, 5], [2, 1], [3, 2], [6, 3], [5, 2]]],
+ ['edgelist',
+ 'G433',
+ 7,
+ [[1, 2], [3, 1], [4, 3], [3, 2], [2, 5], [6, 5], [6, 4], [5, 7]]],
+ ['edgelist',
+ 'G434',
+ 7,
+ [[5, 1], [4, 5], [3, 4], [7, 3], [6, 7], [2, 6], [5, 2], [3, 2]]],
+ ['edgelist',
+ 'G435',
+ 7,
+ [[7, 2], [1, 7], [5, 4], [6, 2], [1, 6], [3, 2], [4, 3], [6, 7]]],
+ ['edgelist',
+ 'G436',
+ 7,
+ [[7, 3], [6, 7], [4, 6], [7, 4], [5, 4], [1, 5], [2, 1], [5, 2]]],
+ ['edgelist',
+ 'G437',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [1, 7], [6, 2]]],
+ ['edgelist',
+ 'G438',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [1, 7], [5, 3]]],
+ ['edgelist',
+ 'G439',
+ 7,
+ [[1, 2], [3, 1], [4, 3], [5, 4], [2, 5], [3, 2], [6, 7], [1, 6]]],
+ ['edgelist',
+ 'G440',
+ 7,
+ [[5, 1], [3, 5], [4, 3], [7, 4], [6, 7], [5, 6], [2, 3], [6, 2]]],
+ ['edgelist',
+ 'G441',
+ 7,
+ [[6, 2], [3, 5], [4, 3], [1, 4], [6, 1], [5, 6], [2, 3], [1, 7]]],
+ ['edgelist',
+ 'G442',
+ 7,
+ [[6, 7], [3, 6], [5, 3], [1, 5], [4, 1], [3, 4], [2, 5], [4, 2]]],
+ ['edgelist',
+ 'G443',
+ 7,
+ [[1, 5], [2, 1], [5, 2], [4, 5], [6, 4], [7, 6], [3, 7], [5, 3]]],
+ ['edgelist',
+ 'G444',
+ 7,
+ [[1, 2], [7, 6], [3, 4], [4, 5], [7, 5], [1, 6], [7, 3], [7, 2]]],
+ ['edgelist',
+ 'G445',
+ 7,
+ [[2, 3], [1, 2], [5, 1], [6, 5], [3, 6], [4, 3], [7, 4], [6, 7]]],
+ ['edgelist',
+ 'G446',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [3, 7], [2, 7]]],
+ ['edgelist',
+ 'G447',
+ 7,
+ [[7, 3], [6, 7], [3, 6], [2, 3], [5, 2], [1, 5], [4, 1], [2, 4]]],
+ ['edgelist',
+ 'G448',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [7, 6], [7, 2]]],
+ ['edgelist',
+ 'G449',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [7, 1], [7, 4]]],
+ ['edgelist',
+ 'G450',
+ 7,
+ [[1, 5], [2, 1], [4, 3], [2, 5], [3, 6], [6, 4], [7, 5], [7, 4]]],
+ ['edgelist',
+ 'G451',
+ 7,
+ [[1, 5], [4, 1], [2, 4], [5, 2], [2, 1], [7, 3], [6, 7], [3, 6]]],
+ ['edgelist',
+ 'G452',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 4], [5, 2], [3, 5], [1, 4]]],
+ ['edgelist',
+ 'G453',
+ 7,
+ [[1, 2], [3, 6], [1, 3], [4, 1], [4, 2], [4, 3], [3, 2], [6, 2], [5, 3]]],
+ ['edgelist',
+ 'G454',
+ 7,
+ [[1, 2], [3, 6], [1, 3], [4, 1], [4, 2], [4, 3], [3, 2], [6, 2], [1, 5]]],
+ ['edgelist',
+ 'G455',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [1, 4], [5, 1], [5, 2], [5, 3], [5, 4], [5, 6]]],
+ ['edgelist',
+ 'G456',
+ 7,
+ [[3, 1], [5, 2], [2, 3], [6, 5], [3, 6], [4, 2], [6, 4], [4, 3], [5, 4]]],
+ ['edgelist',
+ 'G457',
+ 7,
+ [[1, 2], [3, 6], [1, 3], [4, 1], [4, 2], [4, 3], [3, 2], [6, 2], [5, 6]]],
+ ['edgelist',
+ 'G458',
+ 7,
+ [[2, 6], [5, 2], [1, 5], [6, 1], [3, 6], [5, 3], [4, 5], [6, 4], [5, 6]]],
+ ['edgelist',
+ 'G459',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [3, 6], [1, 6], [3, 1], [4, 1]]],
+ ['edgelist',
+ 'G460',
+ 7,
+ [[2, 4], [3, 2], [1, 3], [6, 1], [5, 6], [4, 5], [1, 5], [2, 1], [5, 2]]],
+ ['edgelist',
+ 'G461',
+ 7,
+ [[2, 4], [3, 2], [1, 3], [6, 1], [5, 6], [4, 5], [5, 2], [2, 1], [6, 2]]],
+ ['edgelist',
+ 'G462',
+ 7,
+ [[1, 2], [2, 3], [1, 3], [4, 1], [4, 2], [4, 3], [6, 5], [5, 1], [6, 1]]],
+ ['edgelist',
+ 'G463',
+ 7,
+ [[5, 4], [1, 5], [2, 1], [3, 2], [4, 3], [1, 6], [6, 4], [1, 4], [2, 6]]],
+ ['edgelist',
+ 'G464',
+ 7,
+ [[2, 4], [3, 2], [1, 3], [6, 1], [5, 6], [4, 5], [4, 3], [1, 4], [5, 1]]],
+ ['edgelist',
+ 'G465',
+ 7,
+ [[2, 4], [3, 2], [1, 3], [6, 1], [5, 6], [4, 5], [4, 3], [1, 4], [3, 5]]],
+ ['edgelist',
+ 'G466',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [3, 6], [1, 6], [3, 1], [6, 2]]],
+ ['edgelist',
+ 'G467',
+ 7,
+ [[2, 6], [5, 2], [1, 5], [6, 1], [3, 6], [5, 3], [4, 5], [6, 4], [3, 1]]],
+ ['edgelist',
+ 'G468',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [6, 1], [6, 5], [6, 3], [6, 4]]],
+ ['edgelist',
+ 'G469',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [4, 1], [6, 3], [6, 1], [6, 2]]],
+ ['edgelist',
+ 'G470',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [1, 4], [5, 3], [6, 3]]],
+ ['edgelist',
+ 'G471',
+ 7,
+ [[3, 4], [1, 3], [4, 1], [5, 4], [2, 5], [6, 2], [5, 6], [2, 1], [6, 3]]],
+ ['edgelist',
+ 'G472',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [1, 4], [6, 3], [5, 2]]],
+ ['edgelist',
+ 'G473',
+ 7,
+ [[1, 2], [2, 3], [1, 3], [4, 1], [4, 2], [4, 3], [6, 1], [1, 5], [1, 7]]],
+ ['edgelist',
+ 'G474',
+ 7,
+ [[1, 2], [2, 3], [1, 3], [4, 1], [4, 2], [4, 3], [6, 1], [1, 5], [3, 7]]],
+ ['edgelist',
+ 'G475',
+ 7,
+ [[2, 3], [4, 2], [1, 4], [2, 1], [3, 1], [4, 3], [6, 4], [5, 1], [2, 7]]],
+ ['edgelist',
+ 'G476',
+ 7,
+ [[1, 2], [3, 5], [1, 3], [4, 2], [4, 3], [3, 2], [5, 2], [6, 3], [3, 7]]],
+ ['edgelist',
+ 'G477',
+ 7,
+ [[1, 2], [3, 5], [1, 3], [6, 3], [4, 2], [4, 3], [3, 2], [5, 2], [2, 7]]],
+ ['edgelist',
+ 'G478',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 4], [5, 2], [2, 6], [2, 7]]],
+ ['edgelist',
+ 'G479',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 4], [5, 2], [2, 6], [5, 7]]],
+ ['edgelist',
+ 'G480',
+ 7,
+ [[1, 2], [3, 6], [1, 3], [5, 1], [4, 2], [4, 3], [3, 2], [6, 2], [2, 7]]],
+ ['edgelist',
+ 'G481',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 4], [5, 2], [6, 5], [5, 7]]],
+ ['edgelist',
+ 'G482',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 4], [5, 2], [6, 5], [4, 7]]],
+ ['edgelist',
+ 'G483',
+ 7,
+ [[1, 2], [3, 6], [1, 3], [5, 1], [4, 2], [4, 3], [3, 2], [6, 2], [1, 7]]],
+ ['edgelist',
+ 'G484',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 4], [5, 2], [6, 1], [2, 7]]],
+ ['edgelist',
+ 'G485',
+ 7,
+ [[3, 1], [4, 3], [5, 4], [6, 5], [3, 6], [2, 3], [5, 2], [6, 4], [3, 7]]],
+ ['edgelist',
+ 'G486',
+ 7,
+ [[1, 2], [3, 6], [1, 3], [5, 1], [4, 2], [4, 3], [4, 1], [6, 2], [1, 7]]],
+ ['edgelist',
+ 'G487',
+ 7,
+ [[1, 2], [2, 3], [1, 3], [4, 1], [4, 2], [4, 3], [6, 1], [1, 5], [6, 7]]],
+ ['edgelist',
+ 'G488',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 4], [5, 2], [6, 1], [5, 7]]],
+ ['edgelist',
+ 'G489',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 4], [5, 2], [6, 5], [3, 7]]],
+ ['edgelist',
+ 'G490',
+ 7,
+ [[3, 1], [4, 3], [5, 4], [6, 5], [3, 6], [2, 3], [5, 2], [6, 4], [6, 7]]],
+ ['edgelist',
+ 'G491',
+ 7,
+ [[2, 3], [4, 2], [1, 4], [2, 1], [3, 1], [4, 3], [5, 1], [7, 6], [7, 4]]],
+ ['edgelist',
+ 'G492',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 4], [5, 2], [6, 1], [1, 7]]],
+ ['edgelist',
+ 'G493',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [4, 7], [6, 5], [1, 4], [3, 5]]],
+ ['edgelist',
+ 'G494',
+ 7,
+ [[1, 2], [3, 6], [1, 3], [5, 1], [4, 2], [4, 3], [3, 2], [6, 2], [6, 7]]],
+ ['edgelist',
+ 'G495',
+ 7,
+ [[3, 1], [4, 3], [5, 4], [6, 5], [3, 6], [2, 3], [5, 2], [6, 4], [5, 7]]],
+ ['edgelist',
+ 'G496',
+ 7,
+ [[1, 2], [3, 6], [1, 3], [4, 1], [4, 2], [4, 3], [3, 2], [6, 2], [5, 7]]],
+ ['edgelist',
+ 'G497',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 4], [5, 2], [6, 1], [3, 7]]],
+ ['edgelist',
+ 'G498',
+ 7,
+ [[1, 2], [3, 6], [1, 3], [5, 1], [4, 2], [4, 3], [4, 1], [6, 2], [6, 7]]],
+ ['edgelist',
+ 'G499',
+ 7,
+ [[1, 2], [3, 6], [1, 3], [6, 5], [4, 2], [4, 3], [4, 1], [6, 2], [3, 7]]],
+ ['edgelist',
+ 'G500',
+ 7,
+ [[1, 2], [3, 6], [1, 3], [5, 1], [4, 2], [4, 3], [6, 2], [6, 4], [1, 7]]],
+ ['edgelist',
+ 'G501',
+ 7,
+ [[1, 2], [2, 3], [1, 3], [4, 1], [4, 2], [4, 3], [1, 6], [6, 5], [6, 7]]],
+ ['edgelist',
+ 'G502',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [1, 4], [5, 1], [5, 2], [5, 3], [5, 4], [6, 7]]],
+ ['edgelist',
+ 'G503',
+ 7,
+ [[2, 5], [3, 5], [3, 4], [1, 5], [4, 2], [5, 6], [1, 6], [4, 5], [5, 7]]],
+ ['edgelist',
+ 'G504',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [6, 1], [6, 3], [1, 3], [1, 7]]],
+ ['edgelist',
+ 'G505',
+ 7,
+ [[2, 5], [3, 5], [3, 4], [1, 5], [4, 2], [5, 6], [1, 6], [4, 5], [4, 7]]],
+ ['edgelist',
+ 'G506',
+ 7,
+ [[1, 2], [3, 5], [1, 3], [6, 3], [4, 2], [4, 3], [3, 2], [5, 2], [6, 7]]],
+ ['edgelist',
+ 'G507',
+ 7,
+ [[2, 6], [5, 2], [1, 5], [6, 1], [3, 6], [5, 3], [4, 5], [6, 4], [5, 7]]],
+ ['edgelist',
+ 'G508',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [3, 5], [6, 3], [3, 7]]],
+ ['edgelist',
+ 'G509',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 4], [5, 2], [7, 6], [7, 2]]],
+ ['edgelist',
+ 'G510',
+ 7,
+ [[2, 5], [3, 5], [3, 4], [1, 5], [4, 2], [5, 6], [1, 6], [4, 5], [3, 7]]],
+ ['edgelist',
+ 'G511',
+ 7,
+ [[2, 5], [3, 5], [3, 4], [1, 5], [4, 2], [5, 6], [1, 6], [4, 5], [1, 7]]],
+ ['edgelist',
+ 'G512',
+ 7,
+ [[2, 4], [3, 2], [1, 3], [6, 1], [7, 6], [4, 7], [2, 7], [1, 2], [2, 5]]],
+ ['edgelist',
+ 'G513',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [4, 1], [6, 3], [6, 1], [1, 7]]],
+ ['edgelist',
+ 'G514',
+ 7,
+ [[2, 5], [3, 5], [3, 4], [1, 5], [4, 2], [5, 6], [1, 6], [3, 2], [5, 7]]],
+ ['edgelist',
+ 'G515',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 4], [5, 2], [6, 5], [6, 7]]],
+ ['edgelist',
+ 'G516',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [3, 5], [6, 3], [5, 7]]],
+ ['edgelist',
+ 'G517',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [3, 5], [6, 3], [6, 7]]],
+ ['edgelist',
+ 'G518',
+ 7,
+ [[2, 4], [3, 2], [1, 3], [6, 1], [7, 6], [4, 7], [2, 7], [1, 2], [1, 5]]],
+ ['edgelist',
+ 'G519',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [6, 1], [6, 3], [1, 3], [2, 7]]],
+ ['edgelist',
+ 'G520',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [6, 1], [6, 3], [1, 3], [5, 7]]],
+ ['edgelist',
+ 'G521',
+ 7,
+ [[2, 5], [3, 5], [3, 4], [1, 5], [4, 2], [5, 6], [1, 6], [3, 2], [3, 7]]],
+ ['edgelist',
+ 'G522',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [4, 1], [6, 3], [6, 1], [4, 7]]],
+ ['edgelist',
+ 'G523',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [4, 1], [6, 3], [6, 1], [3, 7]]],
+ ['edgelist',
+ 'G524',
+ 7,
+ [[1, 2], [3, 6], [1, 3], [6, 2], [4, 2], [4, 3], [3, 2], [7, 1], [7, 5]]],
+ ['edgelist',
+ 'G525',
+ 7,
+ [[2, 6], [5, 2], [1, 5], [6, 1], [3, 6], [5, 3], [4, 5], [6, 4], [2, 7]]],
+ ['edgelist',
+ 'G526',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 4], [5, 2], [6, 1], [6, 7]]],
+ ['edgelist',
+ 'G527',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [3, 5], [6, 3], [2, 7]]],
+ ['edgelist',
+ 'G528',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [3, 5], [6, 3], [1, 7]]],
+ ['edgelist',
+ 'G529',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [3, 5], [6, 3], [4, 7]]],
+ ['edgelist',
+ 'G530',
+ 7,
+ [[2, 4], [3, 2], [1, 3], [6, 1], [7, 6], [4, 7], [2, 7], [1, 2], [3, 5]]],
+ ['edgelist',
+ 'G531',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [5, 6], [6, 4], [2, 6], [4, 7]]],
+ ['edgelist',
+ 'G532',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [4, 1], [6, 3], [6, 1], [2, 7]]],
+ ['edgelist',
+ 'G533',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [3, 5], [6, 2], [5, 7]]],
+ ['edgelist',
+ 'G534',
+ 7,
+ [[1, 2], [3, 6], [1, 3], [6, 2], [4, 2], [4, 3], [4, 1], [7, 5], [7, 1]]],
+ ['edgelist',
+ 'G535',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 6], [6, 3], [6, 1], [2, 7]]],
+ ['edgelist',
+ 'G536',
+ 7,
+ [[2, 5], [3, 5], [3, 4], [1, 5], [4, 2], [5, 6], [1, 6], [3, 2], [7, 1]]],
+ ['edgelist',
+ 'G537',
+ 7,
+ [[6, 4], [4, 3], [5, 4], [6, 5], [3, 6], [2, 3], [5, 2], [7, 1], [7, 3]]],
+ ['edgelist',
+ 'G538',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 6], [6, 3], [6, 1], [1, 7]]],
+ ['edgelist',
+ 'G539',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [5, 6], [6, 4], [2, 6], [6, 7]]],
+ ['edgelist',
+ 'G540',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [4, 1], [6, 3], [6, 1], [5, 7]]],
+ ['edgelist',
+ 'G541',
+ 7,
+ [[2, 4], [3, 2], [1, 3], [6, 1], [7, 6], [4, 7], [2, 7], [1, 2], [6, 5]]],
+ ['edgelist',
+ 'G542',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [5, 2], [6, 3], [6, 7]]],
+ ['edgelist',
+ 'G543',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [5, 6], [6, 4], [2, 6], [2, 7]]],
+ ['edgelist',
+ 'G544',
+ 7,
+ [[2, 5], [3, 5], [3, 4], [1, 5], [4, 2], [5, 6], [1, 6], [3, 2], [4, 7]]],
+ ['edgelist',
+ 'G545',
+ 7,
+ [[1, 2], [2, 3], [1, 3], [4, 1], [4, 2], [4, 3], [1, 6], [6, 5], [5, 7]]],
+ ['edgelist',
+ 'G546',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [5, 6], [6, 4], [2, 6], [1, 7]]],
+ ['edgelist',
+ 'G547',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [2, 6], [6, 3], [6, 1], [5, 7]]],
+ ['edgelist',
+ 'G548',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [3, 5], [6, 2], [1, 7]]],
+ ['edgelist',
+ 'G549',
+ 7,
+ [[1, 2], [3, 6], [1, 3], [6, 4], [4, 2], [4, 3], [6, 2], [7, 5], [7, 1]]],
+ ['edgelist',
+ 'G550',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [5, 2], [6, 3], [1, 7]]],
+ ['edgelist',
+ 'G551',
+ 7,
+ [[7, 4], [2, 3], [7, 6], [4, 5], [7, 5], [1, 6], [7, 1], [7, 2], [7, 3]]],
+ ['edgelist',
+ 'G552',
+ 7,
+ [[1, 2], [3, 1], [4, 3], [5, 4], [2, 5], [3, 2], [7, 3], [6, 7], [3, 6]]],
+ ['edgelist',
+ 'G553',
+ 7,
+ [[2, 5], [3, 5], [3, 4], [1, 5], [4, 2], [5, 6], [4, 5], [7, 6], [7, 1]]],
+ ['edgelist',
+ 'G554',
+ 7,
+ [[2, 5], [3, 5], [3, 4], [1, 5], [4, 2], [5, 6], [1, 6], [7, 5], [7, 4]]],
+ ['edgelist',
+ 'G555',
+ 7,
+ [[5, 2], [6, 5], [7, 6], [4, 7], [3, 4], [2, 3], [6, 3], [1, 6], [3, 1]]],
+ ['edgelist',
+ 'G556',
+ 7,
+ [[5, 2], [4, 2], [3, 4], [5, 1], [6, 1], [6, 3], [6, 5], [7, 5], [6, 7]]],
+ ['edgelist',
+ 'G557',
+ 7,
+ [[2, 1], [3, 2], [7, 3], [4, 7], [6, 4], [5, 6], [4, 5], [3, 4], [1, 3]]],
+ ['edgelist',
+ 'G558',
+ 7,
+ [[1, 3], [6, 1], [2, 6], [3, 2], [5, 3], [6, 5], [7, 6], [4, 7], [3, 4]]],
+ ['edgelist',
+ 'G559',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [1, 7], [2, 4], [5, 2]]],
+ ['edgelist',
+ 'G560',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [6, 5], [6, 2], [7, 2], [1, 7]]],
+ ['edgelist',
+ 'G561',
+ 7,
+ [[1, 5], [2, 1], [5, 2], [4, 5], [3, 4], [7, 3], [6, 7], [2, 6], [3, 2]]],
+ ['edgelist',
+ 'G562',
+ 7,
+ [[1, 2], [3, 1], [4, 3], [5, 4], [2, 5], [3, 2], [6, 4], [7, 6], [4, 7]]],
+ ['edgelist',
+ 'G563',
+ 7,
+ [[7, 6], [4, 7], [3, 4], [1, 5], [1, 6], [2, 1], [3, 1], [2, 3], [6, 5]]],
+ ['edgelist',
+ 'G564',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [6, 4], [6, 2], [7, 2], [1, 7]]],
+ ['edgelist',
+ 'G565',
+ 7,
+ [[6, 3], [7, 6], [4, 7], [3, 4], [1, 3], [5, 1], [6, 5], [2, 6], [1, 2]]],
+ ['edgelist',
+ 'G566',
+ 7,
+ [[3, 5], [2, 3], [5, 2], [6, 5], [1, 6], [2, 1], [7, 5], [4, 7], [3, 4]]],
+ ['edgelist',
+ 'G567',
+ 7,
+ [[7, 3], [6, 7], [3, 6], [2, 3], [1, 2], [5, 1], [2, 5], [4, 2], [1, 4]]],
+ ['edgelist',
+ 'G568',
+ 7,
+ [[1, 6], [7, 1], [2, 7], [5, 2], [3, 5], [4, 3], [2, 4], [6, 2], [7, 6]]],
+ ['edgelist',
+ 'G569',
+ 7,
+ [[7, 6], [4, 7], [3, 4], [6, 3], [1, 6], [2, 1], [5, 2], [1, 5], [3, 1]]],
+ ['edgelist',
+ 'G570',
+ 7,
+ [[1, 5], [4, 1], [2, 4], [5, 2], [3, 5], [7, 3], [6, 7], [3, 6], [4, 3]]],
+ ['edgelist',
+ 'G571',
+ 7,
+ [[2, 1], [5, 2], [6, 5], [1, 6], [7, 1], [4, 7], [3, 4], [1, 3], [4, 5]]],
+ ['edgelist',
+ 'G572',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [7, 1], [7, 2], [7, 4]]],
+ ['edgelist',
+ 'G573',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [1, 4], [5, 2], [6, 5], [6, 4], [7, 1], [7, 5]]],
+ ['edgelist',
+ 'G574',
+ 7,
+ [[1, 2], [5, 1], [2, 5], [3, 2], [6, 3], [5, 6], [7, 6], [4, 7], [3, 4]]],
+ ['edgelist',
+ 'G575',
+ 7,
+ [[2, 1], [7, 4], [1, 5], [6, 1], [4, 6], [6, 7], [2, 3], [2, 5], [7, 3]]],
+ ['edgelist',
+ 'G576',
+ 7,
+ [[7, 3], [6, 7], [3, 6], [2, 3], [1, 4], [5, 1], [2, 5], [4, 2], [4, 5]]],
+ ['edgelist',
+ 'G577',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [7, 2], [6, 7], [7, 1]]],
+ ['edgelist',
+ 'G578',
+ 7,
+ [[1, 5], [2, 1], [3, 2], [4, 3], [1, 4], [3, 5], [6, 5], [7, 6], [4, 7]]],
+ ['edgelist',
+ 'G579',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [5, 3], [7, 2], [6, 7]]],
+ ['edgelist',
+ 'G580',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5], [6, 4], [6, 5], [7, 2], [7, 6]]],
+ ['edgelist',
+ 'G581',
+ 7,
+ [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [1, 6], [7, 1], [7, 5], [7, 3]]],
+ ['edgelist',
+ 'G582',
+ 7,
+ [[1, 5], [4, 1], [5, 4], [7, 2], [6, 7], [2, 6], [3, 2], [6, 3], [7, 3]]],
+ ['edgelist',
+ 'G583',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [1, 3]]],
+ ['edgelist',
+ 'G584',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 5]]],
+ ['edgelist',
+ 'G585',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 1]]],
+ ['edgelist',
+ 'G586',
+ 7,
+ [[2, 6],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [3, 6],
+ [5, 3],
+ [4, 5],
+ [6, 4],
+ [5, 6],
+ [2, 1]]],
+ ['edgelist',
+ 'G587',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [6, 5],
+ [4, 6],
+ [2, 6]]],
+ ['edgelist',
+ 'G588',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [4, 3],
+ [1, 4],
+ [5, 1],
+ [3, 5]]],
+ ['edgelist',
+ 'G589',
+ 7,
+ [[2, 1],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [2, 4],
+ [6, 2],
+ [3, 4],
+ [2, 3]]],
+ ['edgelist',
+ 'G590',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [4, 3],
+ [1, 4],
+ [3, 5],
+ [6, 3]]],
+ ['edgelist',
+ 'G591',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [3, 6],
+ [1, 6],
+ [3, 1],
+ [6, 2],
+ [5, 2]]],
+ ['edgelist',
+ 'G592',
+ 7,
+ [[5, 4],
+ [1, 5],
+ [2, 1],
+ [3, 2],
+ [4, 3],
+ [1, 6],
+ [6, 4],
+ [1, 4],
+ [2, 6],
+ [6, 3]]],
+ ['edgelist',
+ 'G593',
+ 7,
+ [[1, 2],
+ [3, 5],
+ [1, 3],
+ [5, 6],
+ [4, 2],
+ [4, 3],
+ [5, 2],
+ [6, 2],
+ [6, 3],
+ [6, 4]]],
+ ['edgelist',
+ 'G594',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 1],
+ [6, 2],
+ [6, 3],
+ [6, 4],
+ [6, 5]]],
+ ['edgelist',
+ 'G595',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [3, 5],
+ [1, 3],
+ [2, 4],
+ [6, 2]]],
+ ['edgelist',
+ 'G596',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [4, 5],
+ [1, 3],
+ [4, 1],
+ [3, 5],
+ [6, 3],
+ [2, 6],
+ [5, 2],
+ [4, 6]]],
+ ['edgelist',
+ 'G597',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [5, 2],
+ [6, 4],
+ [3, 6],
+ [2, 1]]],
+ ['edgelist',
+ 'G598',
+ 7,
+ [[1, 2],
+ [3, 6],
+ [1, 3],
+ [4, 1],
+ [4, 2],
+ [4, 3],
+ [3, 2],
+ [6, 2],
+ [5, 3],
+ [3, 7]]],
+ ['edgelist',
+ 'G599',
+ 7,
+ [[1, 2],
+ [3, 6],
+ [1, 3],
+ [4, 1],
+ [4, 2],
+ [4, 3],
+ [3, 2],
+ [6, 2],
+ [5, 3],
+ [2, 7]]],
+ ['edgelist',
+ 'G600',
+ 7,
+ [[1, 2],
+ [3, 6],
+ [1, 3],
+ [4, 1],
+ [4, 2],
+ [4, 3],
+ [3, 2],
+ [6, 2],
+ [1, 5],
+ [2, 7]]],
+ ['edgelist',
+ 'G601',
+ 7,
+ [[1, 2],
+ [3, 6],
+ [1, 3],
+ [4, 1],
+ [4, 2],
+ [4, 3],
+ [3, 2],
+ [6, 2],
+ [1, 5],
+ [1, 7]]],
+ ['edgelist',
+ 'G602',
+ 7,
+ [[1, 2],
+ [3, 6],
+ [1, 3],
+ [4, 1],
+ [4, 2],
+ [4, 3],
+ [3, 2],
+ [6, 2],
+ [1, 5],
+ [4, 7]]],
+ ['edgelist',
+ 'G603',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [5, 3],
+ [5, 4],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G604',
+ 7,
+ [[3, 1],
+ [5, 2],
+ [2, 3],
+ [6, 5],
+ [3, 6],
+ [4, 2],
+ [6, 4],
+ [4, 3],
+ [5, 4],
+ [4, 7]]],
+ ['edgelist',
+ 'G605',
+ 7,
+ [[1, 2],
+ [3, 6],
+ [1, 3],
+ [4, 1],
+ [4, 2],
+ [4, 3],
+ [3, 2],
+ [6, 2],
+ [5, 6],
+ [3, 7]]],
+ ['edgelist',
+ 'G606',
+ 7,
+ [[3, 1],
+ [5, 2],
+ [2, 3],
+ [6, 5],
+ [3, 6],
+ [4, 2],
+ [6, 4],
+ [4, 3],
+ [5, 4],
+ [3, 7]]],
+ ['edgelist',
+ 'G607',
+ 7,
+ [[3, 4],
+ [2, 3],
+ [5, 2],
+ [6, 5],
+ [3, 6],
+ [1, 3],
+ [5, 1],
+ [1, 2],
+ [6, 1],
+ [7, 6]]],
+ ['edgelist',
+ 'G608',
+ 7,
+ [[1, 2],
+ [3, 6],
+ [1, 3],
+ [4, 1],
+ [4, 2],
+ [4, 3],
+ [3, 2],
+ [6, 2],
+ [1, 5],
+ [6, 7]]],
+ ['edgelist',
+ 'G609',
+ 7,
+ [[3, 1],
+ [5, 2],
+ [2, 3],
+ [6, 5],
+ [3, 6],
+ [4, 2],
+ [6, 4],
+ [4, 3],
+ [5, 4],
+ [5, 7]]],
+ ['edgelist',
+ 'G610',
+ 7,
+ [[1, 2],
+ [3, 6],
+ [1, 3],
+ [4, 1],
+ [4, 2],
+ [4, 3],
+ [3, 2],
+ [6, 2],
+ [5, 6],
+ [7, 6]]],
+ ['edgelist',
+ 'G611',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 7]]],
+ ['edgelist',
+ 'G612',
+ 7,
+ [[2, 6],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [3, 6],
+ [5, 3],
+ [4, 5],
+ [6, 4],
+ [5, 6],
+ [7, 6]]],
+ ['edgelist',
+ 'G613',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [3, 6],
+ [1, 6],
+ [3, 1],
+ [4, 1],
+ [1, 7]]],
+ ['edgelist',
+ 'G614',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [3, 6],
+ [1, 6],
+ [3, 1],
+ [4, 1],
+ [3, 7]]],
+ ['edgelist',
+ 'G615',
+ 7,
+ [[2, 6],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [3, 6],
+ [5, 3],
+ [4, 5],
+ [6, 4],
+ [5, 6],
+ [2, 7]]],
+ ['edgelist',
+ 'G616',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [3, 6],
+ [1, 6],
+ [3, 1],
+ [4, 1],
+ [4, 7]]],
+ ['edgelist',
+ 'G617',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [1, 5],
+ [2, 1],
+ [5, 2],
+ [1, 7]]],
+ ['edgelist',
+ 'G618',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [5, 2],
+ [2, 1],
+ [6, 2],
+ [2, 7]]],
+ ['edgelist',
+ 'G619',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [1, 3],
+ [4, 1],
+ [4, 2],
+ [4, 3],
+ [6, 5],
+ [5, 1],
+ [6, 1],
+ [1, 7]]],
+ ['edgelist',
+ 'G620',
+ 7,
+ [[5, 4],
+ [1, 5],
+ [2, 1],
+ [3, 2],
+ [4, 3],
+ [1, 6],
+ [6, 4],
+ [1, 4],
+ [2, 6],
+ [4, 7]]],
+ ['edgelist',
+ 'G621',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [3, 6],
+ [1, 6],
+ [3, 1],
+ [4, 1],
+ [6, 7]]],
+ ['edgelist',
+ 'G622',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [4, 3],
+ [1, 4],
+ [5, 1],
+ [4, 7]]],
+ ['edgelist',
+ 'G623',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [5, 2],
+ [2, 1],
+ [6, 2],
+ [1, 7]]],
+ ['edgelist',
+ 'G624',
+ 7,
+ [[2, 6],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [3, 6],
+ [5, 3],
+ [3, 1],
+ [6, 4],
+ [3, 4],
+ [7, 3]]],
+ ['edgelist',
+ 'G625',
+ 7,
+ [[1, 2],
+ [3, 6],
+ [1, 3],
+ [4, 1],
+ [4, 2],
+ [4, 3],
+ [3, 2],
+ [6, 2],
+ [7, 5],
+ [7, 3]]],
+ ['edgelist',
+ 'G626',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [3, 6],
+ [1, 6],
+ [3, 1],
+ [6, 2],
+ [3, 7]]],
+ ['edgelist',
+ 'G627',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [5, 2],
+ [2, 1],
+ [6, 2],
+ [6, 7]]],
+ ['edgelist',
+ 'G628',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [3, 6],
+ [1, 6],
+ [3, 1],
+ [4, 1],
+ [5, 7]]],
+ ['edgelist',
+ 'G629',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [4, 3],
+ [1, 4],
+ [3, 5],
+ [4, 7]]],
+ ['edgelist',
+ 'G630',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [1, 3],
+ [4, 1],
+ [4, 2],
+ [4, 3],
+ [6, 5],
+ [5, 1],
+ [6, 1],
+ [3, 7]]],
+ ['edgelist',
+ 'G631',
+ 7,
+ [[2, 6],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [3, 6],
+ [5, 3],
+ [4, 5],
+ [6, 4],
+ [3, 1],
+ [6, 7]]],
+ ['edgelist',
+ 'G632',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [4, 3],
+ [1, 4],
+ [5, 1],
+ [3, 7]]],
+ ['edgelist',
+ 'G633',
+ 7,
+ [[2, 6],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [3, 6],
+ [5, 3],
+ [3, 1],
+ [6, 4],
+ [3, 4],
+ [1, 7]]],
+ ['edgelist',
+ 'G634',
+ 7,
+ [[5, 4],
+ [1, 5],
+ [2, 1],
+ [3, 2],
+ [4, 3],
+ [1, 6],
+ [6, 4],
+ [1, 4],
+ [2, 6],
+ [2, 7]]],
+ ['edgelist',
+ 'G635',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [4, 3],
+ [1, 4],
+ [3, 5],
+ [5, 7]]],
+ ['edgelist',
+ 'G636',
+ 7,
+ [[1, 2],
+ [3, 6],
+ [1, 3],
+ [4, 1],
+ [4, 2],
+ [4, 3],
+ [3, 2],
+ [6, 2],
+ [7, 5],
+ [7, 1]]],
+ ['edgelist',
+ 'G637',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [3, 6],
+ [1, 6],
+ [3, 1],
+ [6, 2],
+ [6, 7]]],
+ ['edgelist',
+ 'G638',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [1, 5],
+ [2, 1],
+ [5, 2],
+ [6, 7]]],
+ ['edgelist',
+ 'G639',
+ 7,
+ [[2, 6],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [3, 6],
+ [5, 3],
+ [4, 5],
+ [6, 4],
+ [3, 1],
+ [1, 7]]],
+ ['edgelist',
+ 'G640',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [5, 2],
+ [2, 1],
+ [6, 2],
+ [3, 7]]],
+ ['edgelist',
+ 'G641',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [1, 4],
+ [5, 3],
+ [6, 3],
+ [3, 7]]],
+ ['edgelist',
+ 'G642',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 1],
+ [6, 5],
+ [6, 3],
+ [6, 4],
+ [6, 7]]],
+ ['edgelist',
+ 'G643',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [4, 1],
+ [6, 3],
+ [6, 1],
+ [6, 2],
+ [1, 7]]],
+ ['edgelist',
+ 'G644',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [1, 3],
+ [4, 1],
+ [4, 2],
+ [4, 3],
+ [6, 5],
+ [5, 1],
+ [6, 1],
+ [6, 7]]],
+ ['edgelist',
+ 'G645',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [5, 3],
+ [5, 4],
+ [7, 6],
+ [7, 5]]],
+ ['edgelist',
+ 'G646',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [4, 3],
+ [1, 4],
+ [5, 1],
+ [2, 7]]],
+ ['edgelist',
+ 'G647',
+ 7,
+ [[5, 4],
+ [1, 5],
+ [2, 1],
+ [3, 2],
+ [4, 3],
+ [1, 6],
+ [6, 4],
+ [1, 4],
+ [2, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G648',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 1],
+ [6, 5],
+ [6, 3],
+ [6, 4],
+ [5, 7]]],
+ ['edgelist',
+ 'G649',
+ 7,
+ [[5, 4],
+ [1, 5],
+ [2, 1],
+ [3, 2],
+ [4, 3],
+ [1, 6],
+ [6, 4],
+ [1, 4],
+ [2, 6],
+ [3, 7]]],
+ ['edgelist',
+ 'G650',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 1],
+ [6, 5],
+ [6, 3],
+ [6, 4],
+ [1, 7]]],
+ ['edgelist',
+ 'G651',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [1, 4],
+ [5, 3],
+ [6, 3],
+ [6, 7]]],
+ ['edgelist',
+ 'G652',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [4, 1],
+ [6, 3],
+ [6, 1],
+ [6, 2],
+ [2, 7]]],
+ ['edgelist',
+ 'G653',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [3, 6],
+ [1, 6],
+ [3, 1],
+ [6, 2],
+ [4, 7]]],
+ ['edgelist',
+ 'G654',
+ 7,
+ [[5, 4],
+ [5, 2],
+ [2, 3],
+ [6, 5],
+ [3, 6],
+ [4, 2],
+ [6, 4],
+ [4, 3],
+ [7, 1],
+ [7, 3]]],
+ ['edgelist',
+ 'G655',
+ 7,
+ [[2, 1],
+ [3, 2],
+ [4, 3],
+ [5, 4],
+ [6, 5],
+ [2, 6],
+ [7, 2],
+ [5, 7],
+ [3, 7],
+ [6, 3]]],
+ ['edgelist',
+ 'G656',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [1, 4],
+ [5, 3],
+ [6, 3],
+ [1, 7]]],
+ ['edgelist',
+ 'G657',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [4, 1],
+ [6, 3],
+ [6, 1],
+ [6, 2],
+ [4, 7]]],
+ ['edgelist',
+ 'G658',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [4, 1],
+ [6, 3],
+ [6, 1],
+ [6, 2],
+ [3, 7]]],
+ ['edgelist',
+ 'G659',
+ 7,
+ [[1, 2],
+ [3, 6],
+ [1, 3],
+ [4, 1],
+ [4, 2],
+ [4, 3],
+ [3, 2],
+ [6, 2],
+ [7, 6],
+ [7, 5]]],
+ ['edgelist',
+ 'G660',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [4, 3],
+ [1, 4],
+ [3, 5],
+ [2, 7]]],
+ ['edgelist',
+ 'G661',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [4, 3],
+ [1, 4],
+ [3, 5],
+ [6, 7]]],
+ ['edgelist',
+ 'G662',
+ 7,
+ [[2, 6],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [3, 6],
+ [5, 3],
+ [4, 5],
+ [6, 4],
+ [3, 1],
+ [2, 7]]],
+ ['edgelist',
+ 'G663',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 1],
+ [6, 5],
+ [6, 3],
+ [6, 4],
+ [2, 7]]],
+ ['edgelist',
+ 'G664',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [1, 4],
+ [5, 3],
+ [6, 3],
+ [2, 7]]],
+ ['edgelist',
+ 'G665',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [4, 1],
+ [6, 3],
+ [6, 1],
+ [6, 2],
+ [5, 7]]],
+ ['edgelist',
+ 'G666',
+ 7,
+ [[3, 4],
+ [1, 3],
+ [4, 1],
+ [5, 4],
+ [2, 5],
+ [6, 2],
+ [5, 6],
+ [2, 1],
+ [6, 3],
+ [5, 7]]],
+ ['edgelist',
+ 'G667',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [1, 4],
+ [6, 3],
+ [5, 2],
+ [1, 7]]],
+ ['edgelist',
+ 'G668',
+ 7,
+ [[5, 1],
+ [2, 5],
+ [4, 2],
+ [3, 4],
+ [2, 3],
+ [7, 2],
+ [1, 7],
+ [6, 1],
+ [2, 6],
+ [1, 2]]],
+ ['edgelist',
+ 'G669',
+ 7,
+ [[4, 3],
+ [7, 4],
+ [6, 7],
+ [1, 6],
+ [3, 1],
+ [6, 3],
+ [2, 6],
+ [3, 2],
+ [5, 3],
+ [6, 5]]],
+ ['edgelist',
+ 'G670',
+ 7,
+ [[3, 1],
+ [2, 3],
+ [4, 2],
+ [1, 4],
+ [7, 1],
+ [2, 7],
+ [6, 2],
+ [1, 6],
+ [5, 1],
+ [2, 5]]],
+ ['edgelist',
+ 'G671',
+ 7,
+ [[7, 5],
+ [2, 3],
+ [7, 6],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [7, 1],
+ [7, 2],
+ [7, 3],
+ [7, 4]]],
+ ['edgelist',
+ 'G672',
+ 7,
+ [[1, 2],
+ [7, 6],
+ [3, 4],
+ [4, 5],
+ [7, 5],
+ [1, 6],
+ [7, 1],
+ [7, 2],
+ [7, 3],
+ [7, 4]]],
+ ['edgelist',
+ 'G673',
+ 7,
+ [[1, 2],
+ [3, 1],
+ [4, 3],
+ [5, 4],
+ [2, 5],
+ [3, 2],
+ [1, 6],
+ [6, 3],
+ [7, 2],
+ [3, 7]]],
+ ['edgelist',
+ 'G674',
+ 7,
+ [[1, 2],
+ [3, 1],
+ [4, 3],
+ [5, 4],
+ [2, 5],
+ [3, 2],
+ [6, 1],
+ [6, 3],
+ [7, 3],
+ [1, 7]]],
+ ['edgelist',
+ 'G675',
+ 7,
+ [[7, 5],
+ [2, 3],
+ [7, 6],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [7, 4],
+ [7, 2],
+ [7, 3],
+ [1, 5]]],
+ ['edgelist',
+ 'G676',
+ 7,
+ [[2, 1],
+ [3, 2],
+ [1, 3],
+ [4, 3],
+ [5, 4],
+ [3, 5],
+ [6, 3],
+ [5, 6],
+ [7, 5],
+ [2, 7]]],
+ ['edgelist',
+ 'G677',
+ 7,
+ [[1, 2],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [3, 6],
+ [3, 7]]],
+ ['edgelist',
+ 'G678',
+ 7,
+ [[1, 3],
+ [6, 1],
+ [5, 6],
+ [3, 5],
+ [2, 3],
+ [6, 2],
+ [7, 6],
+ [4, 7],
+ [3, 4],
+ [3, 7]]],
+ ['edgelist',
+ 'G679',
+ 7,
+ [[1, 2],
+ [3, 1],
+ [4, 3],
+ [5, 4],
+ [2, 5],
+ [3, 2],
+ [6, 2],
+ [1, 6],
+ [7, 1],
+ [3, 7]]],
+ ['edgelist',
+ 'G680',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [7, 3],
+ [7, 5],
+ [1, 3],
+ [5, 1]]],
+ ['edgelist',
+ 'G681',
+ 7,
+ [[1, 5],
+ [4, 1],
+ [3, 4],
+ [6, 3],
+ [7, 6],
+ [3, 7],
+ [5, 3],
+ [2, 5],
+ [4, 2],
+ [5, 4]]],
+ ['edgelist',
+ 'G682',
+ 7,
+ [[2, 7],
+ [3, 2],
+ [1, 3],
+ [2, 1],
+ [5, 2],
+ [4, 5],
+ [3, 4],
+ [6, 7],
+ [5, 6],
+ [4, 2]]],
+ ['edgelist',
+ 'G683',
+ 7,
+ [[7, 6],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [7, 1],
+ [7, 2],
+ [7, 3],
+ [7, 5]]],
+ ['edgelist',
+ 'G684',
+ 7,
+ [[1, 2],
+ [7, 6],
+ [3, 4],
+ [4, 5],
+ [7, 5],
+ [1, 6],
+ [7, 1],
+ [7, 2],
+ [7, 3],
+ [6, 4]]],
+ ['edgelist',
+ 'G685',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [6, 5],
+ [1, 5],
+ [6, 1],
+ [6, 4],
+ [6, 3],
+ [7, 6],
+ [7, 2]]],
+ ['edgelist',
+ 'G686',
+ 7,
+ [[1, 4],
+ [3, 1],
+ [2, 3],
+ [4, 2],
+ [5, 4],
+ [3, 5],
+ [1, 5],
+ [7, 1],
+ [6, 7],
+ [1, 6]]],
+ ['edgelist',
+ 'G687',
+ 7,
+ [[1, 4],
+ [3, 1],
+ [2, 3],
+ [4, 2],
+ [5, 4],
+ [1, 6],
+ [1, 5],
+ [7, 1],
+ [6, 7],
+ [2, 5]]],
+ ['edgelist',
+ 'G688',
+ 7,
+ [[1, 2],
+ [7, 6],
+ [3, 4],
+ [4, 5],
+ [7, 5],
+ [1, 6],
+ [7, 1],
+ [7, 2],
+ [7, 3],
+ [5, 3]]],
+ ['edgelist',
+ 'G689',
+ 7,
+ [[2, 3],
+ [6, 2],
+ [7, 6],
+ [3, 7],
+ [2, 7],
+ [6, 3],
+ [5, 2],
+ [1, 5],
+ [4, 1],
+ [2, 4]]],
+ ['edgelist',
+ 'G690',
+ 7,
+ [[5, 3],
+ [7, 3],
+ [6, 4],
+ [5, 2],
+ [3, 1],
+ [7, 4],
+ [6, 3],
+ [1, 2],
+ [1, 5],
+ [7, 1]]],
+ ['edgelist',
+ 'G691',
+ 7,
+ [[5, 3],
+ [4, 7],
+ [6, 4],
+ [6, 2],
+ [3, 1],
+ [7, 1],
+ [6, 3],
+ [2, 5],
+ [1, 5],
+ [6, 5]]],
+ ['edgelist',
+ 'G692',
+ 7,
+ [[5, 1],
+ [6, 5],
+ [5, 2],
+ [3, 2],
+ [4, 3],
+ [1, 4],
+ [4, 5],
+ [6, 4],
+ [7, 2],
+ [7, 6]]],
+ ['edgelist',
+ 'G693',
+ 7,
+ [[1, 5],
+ [2, 1],
+ [3, 2],
+ [5, 3],
+ [4, 5],
+ [6, 4],
+ [5, 6],
+ [6, 3],
+ [7, 4],
+ [3, 7]]],
+ ['edgelist',
+ 'G694',
+ 7,
+ [[2, 7],
+ [3, 2],
+ [1, 3],
+ [2, 1],
+ [5, 2],
+ [4, 5],
+ [3, 4],
+ [6, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G695',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [7, 4],
+ [7, 2],
+ [7, 6],
+ [6, 2]]],
+ ['edgelist',
+ 'G696',
+ 7,
+ [[2, 1],
+ [5, 2],
+ [1, 5],
+ [3, 1],
+ [4, 3],
+ [7, 4],
+ [6, 7],
+ [1, 6],
+ [6, 3],
+ [7, 3]]],
+ ['edgelist',
+ 'G697',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 4],
+ [6, 2],
+ [6, 5],
+ [7, 2],
+ [6, 7]]],
+ ['edgelist',
+ 'G698',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [3, 5],
+ [5, 2],
+ [7, 2],
+ [7, 6]]],
+ ['edgelist',
+ 'G699',
+ 7,
+ [[1, 2],
+ [3, 1],
+ [4, 3],
+ [5, 4],
+ [2, 5],
+ [3, 2],
+ [6, 4],
+ [3, 6],
+ [7, 2],
+ [5, 7]]],
+ ['edgelist',
+ 'G700',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 3],
+ [6, 5],
+ [7, 6],
+ [7, 1],
+ [1, 3]]],
+ ['edgelist',
+ 'G701',
+ 7,
+ [[3, 1],
+ [6, 3],
+ [2, 6],
+ [1, 2],
+ [4, 1],
+ [6, 4],
+ [7, 6],
+ [5, 7],
+ [1, 5],
+ [5, 4]]],
+ ['edgelist',
+ 'G702',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [5, 3],
+ [2, 6],
+ [7, 3],
+ [7, 6]]],
+ ['edgelist',
+ 'G703',
+ 7,
+ [[6, 1],
+ [7, 6],
+ [3, 7],
+ [4, 3],
+ [1, 4],
+ [5, 1],
+ [3, 5],
+ [5, 4],
+ [2, 5],
+ [4, 2]]],
+ ['edgelist',
+ 'G704',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [7, 4],
+ [6, 7],
+ [4, 6],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G705',
+ 7,
+ [[6, 3],
+ [3, 2],
+ [4, 3],
+ [5, 4],
+ [2, 5],
+ [6, 1],
+ [7, 2],
+ [7, 1],
+ [2, 6],
+ [3, 7]]],
+ ['edgelist',
+ 'G706',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [7, 3],
+ [7, 5],
+ [5, 3],
+ [6, 2]]],
+ ['edgelist',
+ 'G707',
+ 7,
+ [[5, 3],
+ [3, 4],
+ [5, 2],
+ [1, 2],
+ [4, 1],
+ [7, 5],
+ [1, 7],
+ [6, 1],
+ [5, 6],
+ [2, 6]]],
+ ['edgelist',
+ 'G708',
+ 7,
+ [[3, 2],
+ [6, 3],
+ [4, 6],
+ [1, 4],
+ [5, 1],
+ [7, 5],
+ [4, 7],
+ [2, 4],
+ [5, 2],
+ [6, 5]]],
+ ['edgelist',
+ 'G709',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 1],
+ [6, 2],
+ [6, 3],
+ [7, 6],
+ [7, 4]]],
+ ['edgelist',
+ 'G710',
+ 7,
+ [[1, 2],
+ [5, 1],
+ [2, 5],
+ [3, 2],
+ [6, 3],
+ [5, 6],
+ [7, 6],
+ [4, 7],
+ [3, 4],
+ [6, 4]]],
+ ['edgelist',
+ 'G711',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [7, 6],
+ [7, 2],
+ [7, 3],
+ [5, 3]]],
+ ['edgelist',
+ 'G712',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 1],
+ [6, 4],
+ [6, 3],
+ [7, 6],
+ [7, 5]]],
+ ['edgelist',
+ 'G713',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [7, 1],
+ [7, 4],
+ [7, 3],
+ [5, 1]]],
+ ['edgelist',
+ 'G714',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [7, 1],
+ [7, 5],
+ [7, 6],
+ [7, 4]]],
+ ['edgelist',
+ 'G715',
+ 7,
+ [[1, 6],
+ [7, 1],
+ [2, 7],
+ [1, 2],
+ [2, 6],
+ [3, 2],
+ [4, 3],
+ [5, 4],
+ [7, 5],
+ [5, 6]]],
+ ['edgelist',
+ 'G716',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [7, 1],
+ [7, 6],
+ [7, 5],
+ [3, 1]]],
+ ['edgelist',
+ 'G717',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [7, 1],
+ [7, 2],
+ [7, 6],
+ [7, 4]]],
+ ['edgelist',
+ 'G718',
+ 7,
+ [[3, 2],
+ [3, 1],
+ [4, 3],
+ [5, 4],
+ [2, 5],
+ [6, 2],
+ [6, 1],
+ [7, 1],
+ [2, 7],
+ [7, 6]]],
+ ['edgelist',
+ 'G719',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 4],
+ [6, 2],
+ [7, 2],
+ [7, 5],
+ [7, 6]]],
+ ['edgelist',
+ 'G720',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [6, 3],
+ [5, 2],
+ [7, 1],
+ [6, 7]]],
+ ['edgelist',
+ 'G721',
+ 7,
+ [[4, 2],
+ [1, 4],
+ [6, 1],
+ [2, 6],
+ [3, 2],
+ [7, 3],
+ [1, 7],
+ [1, 5],
+ [5, 3],
+ [5, 7]]],
+ ['edgelist',
+ 'G722',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [7, 5],
+ [7, 2],
+ [7, 3],
+ [7, 6]]],
+ ['edgelist',
+ 'G723',
+ 7,
+ [[1, 4],
+ [3, 1],
+ [2, 3],
+ [4, 2],
+ [5, 4],
+ [3, 5],
+ [6, 5],
+ [6, 1],
+ [7, 5],
+ [7, 2]]],
+ ['edgelist',
+ 'G724',
+ 7,
+ [[1, 2],
+ [7, 6],
+ [3, 4],
+ [4, 5],
+ [7, 5],
+ [1, 6],
+ [7, 3],
+ [7, 2],
+ [5, 3],
+ [6, 2]]],
+ ['edgelist',
+ 'G725',
+ 7,
+ [[6, 3],
+ [7, 6],
+ [3, 7],
+ [5, 3],
+ [1, 5],
+ [4, 1],
+ [3, 4],
+ [2, 1],
+ [2, 4],
+ [5, 2]]],
+ ['edgelist',
+ 'G726',
+ 7,
+ [[4, 5],
+ [2, 4],
+ [5, 2],
+ [1, 5],
+ [4, 1],
+ [2, 1],
+ [3, 2],
+ [6, 3],
+ [7, 6],
+ [3, 7]]],
+ ['edgelist',
+ 'G727',
+ 7,
+ [[6, 7],
+ [3, 6],
+ [7, 3],
+ [4, 7],
+ [1, 4],
+ [5, 1],
+ [6, 5],
+ [2, 5],
+ [4, 2],
+ [3, 2]]],
+ ['edgelist',
+ 'G728',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [7, 1],
+ [7, 2],
+ [7, 6],
+ [5, 3]]],
+ ['edgelist',
+ 'G729',
+ 7,
+ [[2, 1],
+ [3, 2],
+ [4, 3],
+ [1, 4],
+ [6, 1],
+ [2, 6],
+ [5, 6],
+ [7, 5],
+ [4, 7],
+ [3, 7]]],
+ ['edgelist',
+ 'G730',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [5, 2],
+ [3, 6],
+ [7, 1],
+ [4, 7]]],
+ ['edgelist',
+ 'G731',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [1, 3],
+ [2, 6]]],
+ ['edgelist',
+ 'G732',
+ 7,
+ [[1, 2],
+ [3, 5],
+ [1, 3],
+ [3, 2],
+ [4, 2],
+ [4, 3],
+ [5, 2],
+ [6, 2],
+ [6, 3],
+ [6, 4],
+ [1, 4]]],
+ ['edgelist',
+ 'G733',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 1],
+ [5, 6]]],
+ ['edgelist',
+ 'G734',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [5, 6],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 4],
+ [1, 3]]],
+ ['edgelist',
+ 'G735',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [6, 4],
+ [3, 6],
+ [2, 1],
+ [5, 2],
+ [6, 2]]],
+ ['edgelist',
+ 'G736',
+ 7,
+ [[2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G737',
+ 7,
+ [[4, 7],
+ [2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [7, 6],
+ [3, 7],
+ [6, 2],
+ [1, 4],
+ [2, 7],
+ [1, 2]]],
+ ['edgelist',
+ 'G738',
+ 7,
+ [[2, 6],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [3, 6],
+ [5, 3],
+ [4, 5],
+ [6, 4],
+ [1, 2],
+ [3, 1],
+ [4, 3]]],
+ ['edgelist',
+ 'G739',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [3, 5],
+ [6, 3],
+ [2, 6],
+ [2, 5],
+ [1, 4]]],
+ ['edgelist',
+ 'G740',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 5],
+ [7, 5]]],
+ ['edgelist',
+ 'G741',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 4],
+ [7, 5]]],
+ ['edgelist',
+ 'G742',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 3],
+ [5, 7]]],
+ ['edgelist',
+ 'G743',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [3, 6],
+ [7, 3]]],
+ ['edgelist',
+ 'G744',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 3],
+ [1, 7]]],
+ ['edgelist',
+ 'G745',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 5],
+ [4, 5],
+ [7, 6]]],
+ ['edgelist',
+ 'G746',
+ 7,
+ [[2, 6],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [3, 6],
+ [5, 3],
+ [4, 5],
+ [6, 4],
+ [5, 6],
+ [2, 1],
+ [5, 7]]],
+ ['edgelist',
+ 'G747',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [6, 5],
+ [4, 6],
+ [2, 6],
+ [2, 7]]],
+ ['edgelist',
+ 'G748',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [6, 5],
+ [4, 6],
+ [2, 6],
+ [7, 5]]],
+ ['edgelist',
+ 'G749',
+ 7,
+ [[2, 6],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [3, 6],
+ [5, 3],
+ [4, 5],
+ [6, 4],
+ [5, 6],
+ [2, 1],
+ [2, 7]]],
+ ['edgelist',
+ 'G750',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [4, 3],
+ [1, 4],
+ [5, 1],
+ [3, 5],
+ [3, 7]]],
+ ['edgelist',
+ 'G751',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [6, 5],
+ [4, 6],
+ [2, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G752',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [4, 3],
+ [1, 4],
+ [3, 5],
+ [6, 3],
+ [3, 7]]],
+ ['edgelist',
+ 'G753',
+ 7,
+ [[2, 1],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [2, 4],
+ [6, 2],
+ [3, 4],
+ [2, 3],
+ [7, 2]]],
+ ['edgelist',
+ 'G754',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [4, 3],
+ [1, 4],
+ [3, 5],
+ [6, 3],
+ [4, 7]]],
+ ['edgelist',
+ 'G755',
+ 7,
+ [[2, 1],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [2, 4],
+ [6, 2],
+ [3, 4],
+ [2, 3],
+ [7, 5]]],
+ ['edgelist',
+ 'G756',
+ 7,
+ [[1, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 6],
+ [3, 7],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G757',
+ 7,
+ [[5, 4],
+ [1, 5],
+ [2, 1],
+ [3, 2],
+ [4, 3],
+ [1, 6],
+ [6, 4],
+ [1, 4],
+ [2, 6],
+ [6, 3],
+ [1, 7]]],
+ ['edgelist',
+ 'G758',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [4, 3],
+ [1, 4],
+ [3, 5],
+ [6, 3],
+ [1, 7]]],
+ ['edgelist',
+ 'G759',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [3, 6],
+ [1, 6],
+ [3, 1],
+ [6, 2],
+ [5, 2],
+ [2, 7]]],
+ ['edgelist',
+ 'G760',
+ 7,
+ [[2, 1],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [2, 4],
+ [6, 2],
+ [3, 4],
+ [2, 3],
+ [6, 7]]],
+ ['edgelist',
+ 'G761',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [6, 5],
+ [4, 6],
+ [2, 6],
+ [1, 7]]],
+ ['edgelist',
+ 'G762',
+ 7,
+ [[1, 2],
+ [3, 5],
+ [1, 3],
+ [5, 6],
+ [4, 2],
+ [4, 3],
+ [5, 2],
+ [6, 2],
+ [6, 3],
+ [6, 4],
+ [3, 7]]],
+ ['edgelist',
+ 'G763',
+ 7,
+ [[2, 1],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [2, 4],
+ [6, 2],
+ [3, 4],
+ [2, 3],
+ [4, 7]]],
+ ['edgelist',
+ 'G764',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [3, 6],
+ [1, 6],
+ [3, 1],
+ [6, 2],
+ [5, 2],
+ [3, 7]]],
+ ['edgelist',
+ 'G765',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [4, 3],
+ [1, 4],
+ [3, 5],
+ [6, 3],
+ [6, 7]]],
+ ['edgelist',
+ 'G766',
+ 7,
+ [[5, 4],
+ [1, 5],
+ [2, 1],
+ [3, 2],
+ [4, 3],
+ [1, 6],
+ [6, 4],
+ [1, 4],
+ [2, 6],
+ [6, 3],
+ [6, 7]]],
+ ['edgelist',
+ 'G767',
+ 7,
+ [[1, 2],
+ [3, 5],
+ [1, 3],
+ [5, 6],
+ [4, 2],
+ [4, 3],
+ [5, 2],
+ [6, 2],
+ [6, 3],
+ [6, 4],
+ [6, 7]]],
+ ['edgelist',
+ 'G768',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 5],
+ [6, 7]]],
+ ['edgelist',
+ 'G769',
+ 7,
+ [[5, 4],
+ [1, 5],
+ [2, 1],
+ [3, 2],
+ [4, 3],
+ [1, 6],
+ [6, 4],
+ [1, 4],
+ [2, 6],
+ [6, 3],
+ [2, 7]]],
+ ['edgelist',
+ 'G770',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [3, 6],
+ [1, 6],
+ [3, 1],
+ [6, 2],
+ [5, 2],
+ [5, 7]]],
+ ['edgelist',
+ 'G771',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [3, 6],
+ [1, 6],
+ [3, 1],
+ [6, 2],
+ [5, 2],
+ [6, 7]]],
+ ['edgelist',
+ 'G772',
+ 7,
+ [[1, 2],
+ [3, 5],
+ [1, 3],
+ [5, 6],
+ [4, 2],
+ [4, 3],
+ [5, 2],
+ [6, 2],
+ [6, 3],
+ [6, 4],
+ [5, 7]]],
+ ['edgelist',
+ 'G773',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [4, 3],
+ [1, 4],
+ [5, 1],
+ [3, 5],
+ [2, 7]]],
+ ['edgelist',
+ 'G774',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [7, 6],
+ [7, 3]]],
+ ['edgelist',
+ 'G775',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 1],
+ [6, 2],
+ [6, 3],
+ [6, 4],
+ [6, 5],
+ [6, 7]]],
+ ['edgelist',
+ 'G776',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [3, 5],
+ [1, 3],
+ [2, 4],
+ [6, 2],
+ [2, 7]]],
+ ['edgelist',
+ 'G777',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 1],
+ [6, 2],
+ [6, 3],
+ [6, 4],
+ [6, 5],
+ [2, 7]]],
+ ['edgelist',
+ 'G778',
+ 7,
+ [[2, 1],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [2, 4],
+ [6, 2],
+ [3, 4],
+ [2, 3],
+ [3, 7]]],
+ ['edgelist',
+ 'G779',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [4, 3],
+ [1, 4],
+ [3, 5],
+ [6, 3],
+ [2, 7]]],
+ ['edgelist',
+ 'G780',
+ 7,
+ [[1, 7],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G781',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [5, 2],
+ [6, 4],
+ [3, 6],
+ [2, 1],
+ [2, 7]]],
+ ['edgelist',
+ 'G782',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [3, 5],
+ [1, 3],
+ [2, 4],
+ [6, 2],
+ [6, 7]]],
+ ['edgelist',
+ 'G783',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [3, 5],
+ [1, 3],
+ [2, 4],
+ [6, 2],
+ [7, 4]]],
+ ['edgelist',
+ 'G784',
+ 7,
+ [[5, 4],
+ [1, 5],
+ [2, 1],
+ [3, 2],
+ [4, 3],
+ [1, 6],
+ [6, 4],
+ [1, 4],
+ [2, 6],
+ [6, 3],
+ [5, 7]]],
+ ['edgelist',
+ 'G785',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [3, 6],
+ [1, 6],
+ [3, 1],
+ [6, 2],
+ [5, 2],
+ [7, 4]]],
+ ['edgelist',
+ 'G786',
+ 7,
+ [[4, 5],
+ [2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [3, 5],
+ [6, 2],
+ [4, 3],
+ [1, 4],
+ [2, 7]]],
+ ['edgelist',
+ 'G787',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [5, 2],
+ [6, 4],
+ [3, 6],
+ [2, 1],
+ [7, 3]]],
+ ['edgelist',
+ 'G788',
+ 7,
+ [[1, 2],
+ [3, 5],
+ [1, 3],
+ [5, 6],
+ [4, 2],
+ [4, 3],
+ [5, 2],
+ [6, 2],
+ [6, 3],
+ [6, 4],
+ [1, 7]]],
+ ['edgelist',
+ 'G789',
+ 7,
+ [[1, 7],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6]]],
+ ['edgelist',
+ 'G790',
+ 7,
+ [[7, 6],
+ [1, 7],
+ [6, 1],
+ [2, 6],
+ [7, 2],
+ [3, 7],
+ [6, 3],
+ [4, 6],
+ [7, 4],
+ [5, 7],
+ [6, 5]]],
+ ['edgelist',
+ 'G791',
+ 7,
+ [[1, 2],
+ [3, 1],
+ [4, 3],
+ [5, 4],
+ [2, 5],
+ [3, 2],
+ [6, 2],
+ [3, 6],
+ [7, 3],
+ [2, 7],
+ [4, 2]]],
+ ['edgelist',
+ 'G792',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 2],
+ [4, 6],
+ [7, 2],
+ [5, 7],
+ [2, 5],
+ [4, 2]]],
+ ['edgelist',
+ 'G793',
+ 7,
+ [[2, 5],
+ [3, 4],
+ [5, 3],
+ [1, 7],
+ [5, 6],
+ [7, 6],
+ [4, 2],
+ [7, 5],
+ [4, 1],
+ [4, 7],
+ [5, 4]]],
+ ['edgelist',
+ 'G794',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [5, 3],
+ [7, 5],
+ [3, 7],
+ [6, 3],
+ [1, 3]]],
+ ['edgelist',
+ 'G795',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [5, 1],
+ [4, 1],
+ [3, 1],
+ [7, 1],
+ [4, 7]]],
+ ['edgelist',
+ 'G796',
+ 7,
+ [[1, 2],
+ [3, 1],
+ [6, 3],
+ [7, 6],
+ [3, 7],
+ [2, 3],
+ [5, 2],
+ [3, 5],
+ [4, 3],
+ [5, 4],
+ [4, 2]]],
+ ['edgelist',
+ 'G797',
+ 7,
+ [[5, 6],
+ [2, 5],
+ [3, 2],
+ [4, 3],
+ [7, 4],
+ [6, 7],
+ [3, 6],
+ [5, 3],
+ [4, 6],
+ [1, 6],
+ [3, 1]]],
+ ['edgelist',
+ 'G798',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [5, 3],
+ [6, 3],
+ [6, 1],
+ [7, 3],
+ [5, 7],
+ [6, 5]]],
+ ['edgelist',
+ 'G799',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [7, 2],
+ [3, 7],
+ [1, 3],
+ [7, 1],
+ [6, 3],
+ [1, 6]]],
+ ['edgelist',
+ 'G800',
+ 7,
+ [[1, 6],
+ [7, 1],
+ [2, 7],
+ [6, 2],
+ [3, 6],
+ [7, 3],
+ [5, 4],
+ [4, 3],
+ [5, 6],
+ [7, 5],
+ [7, 6]]],
+ ['edgelist',
+ 'G801',
+ 7,
+ [[1, 6],
+ [7, 1],
+ [2, 7],
+ [6, 2],
+ [3, 6],
+ [7, 3],
+ [4, 7],
+ [6, 4],
+ [5, 6],
+ [7, 5],
+ [5, 4]]],
+ ['edgelist',
+ 'G802',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 7],
+ [3, 5],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G803',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [1, 3],
+ [3, 5],
+ [6, 3],
+ [5, 6],
+ [7, 6],
+ [7, 1]]],
+ ['edgelist',
+ 'G804',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [6, 7],
+ [1, 7],
+ [5, 3],
+ [1, 5],
+ [3, 1],
+ [7, 5]]],
+ ['edgelist',
+ 'G805',
+ 7,
+ [[1, 2],
+ [3, 1],
+ [4, 3],
+ [5, 4],
+ [2, 5],
+ [6, 2],
+ [6, 3],
+ [7, 2],
+ [3, 7],
+ [5, 3],
+ [6, 5]]],
+ ['edgelist',
+ 'G806',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [6, 2],
+ [3, 6],
+ [5, 3],
+ [7, 3],
+ [5, 7]]],
+ ['edgelist',
+ 'G807',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [7, 1],
+ [7, 5],
+ [7, 3],
+ [1, 3],
+ [5, 1]]],
+ ['edgelist',
+ 'G808',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [4, 2],
+ [6, 4],
+ [5, 6],
+ [2, 5],
+ [7, 6],
+ [7, 2]]],
+ ['edgelist',
+ 'G809',
+ 7,
+ [[1, 5],
+ [4, 1],
+ [5, 4],
+ [3, 5],
+ [4, 3],
+ [2, 4],
+ [3, 2],
+ [5, 2],
+ [6, 3],
+ [7, 6],
+ [3, 7]]],
+ ['edgelist',
+ 'G810',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 5],
+ [2, 7],
+ [3, 4],
+ [3, 6],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G811',
+ 7,
+ [[1, 2],
+ [5, 1],
+ [6, 5],
+ [7, 6],
+ [4, 7],
+ [3, 4],
+ [2, 3],
+ [5, 2],
+ [3, 5],
+ [6, 3],
+ [2, 6]]],
+ ['edgelist',
+ 'G812',
+ 7,
+ [[1, 5],
+ [4, 1],
+ [5, 4],
+ [3, 5],
+ [7, 3],
+ [2, 7],
+ [6, 2],
+ [3, 6],
+ [4, 3],
+ [2, 4],
+ [5, 2]]],
+ ['edgelist',
+ 'G813',
+ 7,
+ [[1, 2],
+ [7, 6],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [7, 1],
+ [7, 2],
+ [7, 3],
+ [7, 4],
+ [7, 5]]],
+ ['edgelist',
+ 'G814',
+ 7,
+ [[5, 2],
+ [1, 5],
+ [2, 1],
+ [4, 2],
+ [1, 4],
+ [6, 2],
+ [7, 6],
+ [2, 7],
+ [3, 2],
+ [6, 3],
+ [7, 3]]],
+ ['edgelist',
+ 'G815',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [5, 3],
+ [5, 4],
+ [6, 5],
+ [7, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G816',
+ 7,
+ [[2, 1],
+ [3, 2],
+ [4, 3],
+ [5, 4],
+ [1, 5],
+ [3, 1],
+ [6, 3],
+ [7, 6],
+ [4, 7],
+ [7, 1],
+ [1, 6]]],
+ ['edgelist',
+ 'G817',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [1, 3],
+ [5, 1],
+ [7, 5],
+ [1, 7],
+ [4, 7]]],
+ ['edgelist',
+ 'G818',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [3, 1],
+ [6, 3],
+ [7, 6],
+ [5, 7],
+ [1, 6],
+ [7, 1]]],
+ ['edgelist',
+ 'G819',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [7, 1],
+ [3, 7],
+ [4, 7],
+ [1, 4],
+ [5, 1]]],
+ ['edgelist',
+ 'G820',
+ 7,
+ [[5, 7],
+ [6, 5],
+ [7, 6],
+ [4, 7],
+ [6, 4],
+ [3, 6],
+ [4, 3],
+ [6, 1],
+ [7, 1],
+ [2, 1],
+ [3, 2]]],
+ ['edgelist',
+ 'G821',
+ 7,
+ [[3, 1],
+ [5, 3],
+ [6, 5],
+ [4, 6],
+ [2, 4],
+ [1, 2],
+ [3, 2],
+ [4, 3],
+ [7, 4],
+ [6, 7],
+ [5, 4]]],
+ ['edgelist',
+ 'G822',
+ 7,
+ [[5, 4],
+ [5, 3],
+ [2, 5],
+ [4, 2],
+ [1, 4],
+ [2, 1],
+ [4, 3],
+ [4, 6],
+ [3, 6],
+ [7, 1],
+ [5, 7]]],
+ ['edgelist',
+ 'G823',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [3, 4],
+ [6, 2],
+ [2, 4],
+ [6, 3],
+ [7, 4],
+ [7, 1],
+ [6, 4],
+ [5, 6],
+ [4, 5]]],
+ ['edgelist',
+ 'G824',
+ 7,
+ [[5, 1],
+ [2, 5],
+ [7, 2],
+ [1, 7],
+ [4, 1],
+ [2, 4],
+ [6, 2],
+ [1, 6],
+ [7, 6],
+ [3, 4],
+ [1, 3]]],
+ ['edgelist',
+ 'G825',
+ 7,
+ [[1, 2],
+ [6, 1],
+ [5, 6],
+ [2, 5],
+ [3, 2],
+ [4, 3],
+ [5, 4],
+ [5, 3],
+ [7, 5],
+ [3, 7],
+ [4, 7]]],
+ ['edgelist',
+ 'G826',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [6, 2],
+ [4, 6],
+ [7, 4],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G827',
+ 7,
+ [[7, 4],
+ [6, 7],
+ [3, 6],
+ [4, 3],
+ [6, 4],
+ [5, 6],
+ [3, 5],
+ [2, 3],
+ [6, 2],
+ [1, 2],
+ [5, 1]]],
+ ['edgelist',
+ 'G828',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [5, 3],
+ [5, 4],
+ [6, 2],
+ [7, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G829',
+ 7,
+ [[1, 5],
+ [4, 1],
+ [3, 4],
+ [6, 3],
+ [7, 6],
+ [3, 7],
+ [5, 3],
+ [2, 5],
+ [4, 2],
+ [2, 1],
+ [3, 2]]],
+ ['edgelist',
+ 'G830',
+ 7,
+ [[6, 1],
+ [1, 2],
+ [4, 1],
+ [6, 4],
+ [3, 6],
+ [7, 3],
+ [5, 7],
+ [6, 5],
+ [2, 6],
+ [7, 2],
+ [4, 7]]],
+ ['edgelist',
+ 'G831',
+ 7,
+ [[1, 2],
+ [3, 1],
+ [4, 3],
+ [5, 4],
+ [2, 5],
+ [3, 2],
+ [6, 2],
+ [3, 6],
+ [7, 5],
+ [7, 3],
+ [4, 7]]],
+ ['edgelist',
+ 'G832',
+ 7,
+ [[4, 3],
+ [7, 4],
+ [6, 7],
+ [1, 6],
+ [3, 1],
+ [2, 3],
+ [1, 2],
+ [6, 2],
+ [3, 6],
+ [5, 3],
+ [7, 5]]],
+ ['edgelist',
+ 'G833',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [5, 2],
+ [6, 4],
+ [6, 2],
+ [7, 5],
+ [7, 6],
+ [4, 7]]],
+ ['edgelist',
+ 'G834',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 1],
+ [6, 4],
+ [7, 1],
+ [7, 3],
+ [7, 4],
+ [6, 7]]],
+ ['edgelist',
+ 'G835',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [6, 2],
+ [7, 6],
+ [5, 7],
+ [4, 7],
+ [2, 7]]],
+ ['edgelist',
+ 'G836',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [6, 2],
+ [7, 6],
+ [2, 7],
+ [3, 7],
+ [5, 3]]],
+ ['edgelist',
+ 'G837',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 4],
+ [5, 3],
+ [7, 2],
+ [7, 5],
+ [6, 3],
+ [4, 6]]],
+ ['edgelist',
+ 'G838',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [6, 2],
+ [7, 2],
+ [5, 7],
+ [7, 6],
+ [3, 7]]],
+ ['edgelist',
+ 'G839',
+ 7,
+ [[1, 4],
+ [1, 7],
+ [2, 3],
+ [2, 6],
+ [3, 5],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G840',
+ 7,
+ [[6, 2],
+ [7, 6],
+ [5, 7],
+ [4, 5],
+ [3, 4],
+ [1, 3],
+ [2, 1],
+ [6, 1],
+ [7, 1],
+ [3, 7],
+ [5, 3]]],
+ ['edgelist',
+ 'G841',
+ 7,
+ [[2, 1],
+ [3, 2],
+ [4, 3],
+ [5, 4],
+ [1, 5],
+ [6, 3],
+ [4, 6],
+ [7, 1],
+ [7, 6],
+ [7, 3],
+ [4, 7]]],
+ ['edgelist',
+ 'G842',
+ 7,
+ [[1, 4],
+ [5, 1],
+ [3, 5],
+ [4, 3],
+ [2, 4],
+ [5, 2],
+ [6, 2],
+ [7, 1],
+ [7, 2],
+ [6, 4],
+ [5, 6]]],
+ ['edgelist',
+ 'G843',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G844',
+ 7,
+ [[1, 3],
+ [2, 1],
+ [3, 2],
+ [1, 4],
+ [4, 2],
+ [6, 5],
+ [6, 4],
+ [7, 5],
+ [7, 3],
+ [7, 1],
+ [2, 7]]],
+ ['edgelist',
+ 'G845',
+ 7,
+ [[5, 2],
+ [6, 5],
+ [3, 6],
+ [2, 3],
+ [1, 2],
+ [6, 1],
+ [7, 6],
+ [4, 7],
+ [3, 4],
+ [1, 3],
+ [5, 1]]],
+ ['edgelist',
+ 'G846',
+ 7,
+ [[1, 2],
+ [3, 1],
+ [4, 3],
+ [5, 4],
+ [2, 5],
+ [7, 2],
+ [7, 3],
+ [6, 2],
+ [4, 6],
+ [6, 3],
+ [5, 6]]],
+ ['edgelist',
+ 'G847',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 5],
+ [2, 7],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6]]],
+ ['edgelist',
+ 'G848',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [7, 4],
+ [7, 5],
+ [7, 3],
+ [3, 1],
+ [5, 1]]],
+ ['edgelist',
+ 'G849',
+ 7,
+ [[1, 3],
+ [2, 1],
+ [3, 2],
+ [1, 4],
+ [4, 2],
+ [6, 5],
+ [6, 4],
+ [7, 5],
+ [7, 3],
+ [5, 1],
+ [2, 5]]],
+ ['edgelist',
+ 'G850',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 1],
+ [6, 2],
+ [6, 3],
+ [7, 3],
+ [1, 7],
+ [2, 7]]],
+ ['edgelist',
+ 'G851',
+ 7,
+ [[1, 4],
+ [5, 1],
+ [2, 5],
+ [4, 2],
+ [5, 4],
+ [1, 2],
+ [3, 5],
+ [7, 3],
+ [6, 7],
+ [3, 6],
+ [2, 3]]],
+ ['edgelist',
+ 'G852',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 1],
+ [6, 5],
+ [6, 3],
+ [6, 4],
+ [7, 2],
+ [7, 6]]],
+ ['edgelist',
+ 'G853',
+ 7,
+ [[5, 2],
+ [6, 5],
+ [3, 6],
+ [2, 3],
+ [1, 2],
+ [5, 1],
+ [6, 1],
+ [7, 6],
+ [4, 7],
+ [3, 4],
+ [6, 4]]],
+ ['edgelist',
+ 'G854',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 4],
+ [5, 3],
+ [6, 2],
+ [6, 5],
+ [7, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G855',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [6, 5],
+ [1, 5],
+ [6, 1],
+ [6, 2],
+ [6, 3],
+ [6, 4],
+ [7, 4],
+ [7, 5]]],
+ ['edgelist',
+ 'G856',
+ 7,
+ [[1, 5],
+ [4, 1],
+ [2, 4],
+ [5, 2],
+ [4, 5],
+ [6, 2],
+ [7, 6],
+ [2, 7],
+ [3, 2],
+ [6, 3],
+ [7, 3]]],
+ ['edgelist',
+ 'G857',
+ 7,
+ [[5, 2],
+ [1, 5],
+ [4, 1],
+ [3, 6],
+ [6, 5],
+ [7, 6],
+ [3, 7],
+ [2, 3],
+ [4, 2],
+ [7, 4],
+ [3, 4]]],
+ ['edgelist',
+ 'G858',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 3],
+ [6, 5],
+ [6, 4],
+ [7, 1],
+ [7, 6],
+ [4, 7]]],
+ ['edgelist',
+ 'G859',
+ 7,
+ [[6, 3],
+ [3, 5],
+ [6, 4],
+ [5, 2],
+ [6, 5],
+ [1, 2],
+ [4, 1],
+ [1, 3],
+ [7, 3],
+ [7, 4],
+ [1, 7]]],
+ ['edgelist',
+ 'G860',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 1],
+ [6, 5],
+ [6, 3],
+ [6, 4],
+ [7, 2],
+ [1, 7]]],
+ ['edgelist',
+ 'G861',
+ 7,
+ [[1, 4],
+ [1, 5],
+ [2, 3],
+ [2, 6],
+ [2, 7],
+ [3, 5],
+ [3, 7],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G862',
+ 7,
+ [[5, 1],
+ [4, 5],
+ [6, 4],
+ [1, 6],
+ [2, 1],
+ [3, 2],
+ [4, 3],
+ [5, 2],
+ [6, 3],
+ [7, 5],
+ [6, 7]]],
+ ['edgelist',
+ 'G863',
+ 7,
+ [[3, 4],
+ [5, 3],
+ [1, 5],
+ [6, 1],
+ [2, 6],
+ [5, 2],
+ [4, 5],
+ [6, 4],
+ [2, 1],
+ [7, 6],
+ [7, 3]]],
+ ['edgelist',
+ 'G864',
+ 7,
+ [[5, 2],
+ [1, 5],
+ [4, 1],
+ [5, 4],
+ [6, 5],
+ [7, 6],
+ [3, 7],
+ [2, 3],
+ [4, 2],
+ [7, 4],
+ [3, 6]]],
+ ['edgelist',
+ 'G865',
+ 7,
+ [[1, 4],
+ [5, 1],
+ [3, 5],
+ [4, 3],
+ [2, 4],
+ [1, 2],
+ [7, 1],
+ [6, 7],
+ [3, 6],
+ [2, 6],
+ [5, 2]]],
+ ['edgelist',
+ 'G866',
+ 7,
+ [[1, 4],
+ [1, 5],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 6],
+ [3, 7],
+ [4, 7],
+ [5, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G867',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 4],
+ [5, 3],
+ [6, 2],
+ [6, 5],
+ [7, 2],
+ [6, 7]]],
+ ['edgelist',
+ 'G868',
+ 7,
+ [[5, 2],
+ [6, 5],
+ [7, 6],
+ [4, 7],
+ [3, 4],
+ [2, 3],
+ [1, 2],
+ [6, 1],
+ [5, 1],
+ [6, 3],
+ [7, 3]]],
+ ['edgelist',
+ 'G869',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [5, 3],
+ [4, 6],
+ [5, 6],
+ [4, 1],
+ [7, 6],
+ [7, 2]]],
+ ['edgelist',
+ 'G870',
+ 7,
+ [[1, 5],
+ [2, 1],
+ [5, 2],
+ [4, 5],
+ [3, 4],
+ [2, 3],
+ [7, 2],
+ [6, 7],
+ [4, 6],
+ [6, 5],
+ [3, 7]]],
+ ['edgelist',
+ 'G871',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [6, 2],
+ [5, 3],
+ [7, 3],
+ [4, 7],
+ [5, 7]]],
+ ['edgelist',
+ 'G872',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [7, 3],
+ [2, 7],
+ [6, 3],
+ [5, 2],
+ [1, 4]]],
+ ['edgelist',
+ 'G873',
+ 7,
+ [[1, 4],
+ [1, 5],
+ [2, 3],
+ [2, 6],
+ [2, 7],
+ [3, 6],
+ [3, 7],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G874',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [6, 5],
+ [1, 5],
+ [6, 4],
+ [6, 2],
+ [7, 4],
+ [7, 5],
+ [5, 3],
+ [1, 4]]],
+ ['edgelist',
+ 'G875',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 7],
+ [4, 7],
+ [5, 6]]],
+ ['edgelist',
+ 'G876',
+ 7,
+ [[5, 4],
+ [3, 5],
+ [4, 3],
+ [1, 4],
+ [3, 2],
+ [6, 5],
+ [6, 1],
+ [7, 5],
+ [7, 2],
+ [2, 6],
+ [1, 7]]],
+ ['edgelist',
+ 'G877',
+ 7,
+ [[7, 5],
+ [4, 7],
+ [2, 4],
+ [5, 2],
+ [1, 5],
+ [3, 1],
+ [4, 3],
+ [1, 2],
+ [6, 1],
+ [7, 6],
+ [6, 3]]],
+ ['edgelist',
+ 'G878',
+ 7,
+ [[7, 2],
+ [3, 7],
+ [2, 3],
+ [1, 2],
+ [4, 1],
+ [5, 4],
+ [6, 5],
+ [4, 6],
+ [3, 1],
+ [5, 1],
+ [6, 7]]],
+ ['edgelist',
+ 'G879',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [5, 6],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 4],
+ [5, 4],
+ [1, 3]]],
+ ['edgelist',
+ 'G880',
+ 7,
+ [[4, 7],
+ [2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [7, 6],
+ [3, 7],
+ [6, 2],
+ [1, 4],
+ [2, 7],
+ [1, 2],
+ [1, 7]]],
+ ['edgelist',
+ 'G881',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [6, 4],
+ [3, 6],
+ [2, 1],
+ [5, 2],
+ [6, 2],
+ [3, 5]]],
+ ['edgelist',
+ 'G882',
+ 7,
+ [[4, 5],
+ [2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [3, 5],
+ [6, 2],
+ [1, 4],
+ [2, 5],
+ [1, 2],
+ [3, 4]]],
+ ['edgelist',
+ 'G883',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [1, 3],
+ [4, 3],
+ [4, 2],
+ [5, 1],
+ [3, 5],
+ [6, 2],
+ [1, 6],
+ [5, 6],
+ [4, 5],
+ [6, 4]]],
+ ['edgelist',
+ 'G884',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [1, 3],
+ [2, 6],
+ [7, 2]]],
+ ['edgelist',
+ 'G885',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [1, 3],
+ [5, 7],
+ [6, 4]]],
+ ['edgelist',
+ 'G886',
+ 7,
+ [[1, 2],
+ [3, 5],
+ [1, 3],
+ [3, 2],
+ [4, 2],
+ [4, 3],
+ [5, 2],
+ [6, 2],
+ [6, 3],
+ [6, 4],
+ [1, 4],
+ [2, 7]]],
+ ['edgelist',
+ 'G887',
+ 7,
+ [[1, 2],
+ [3, 5],
+ [1, 3],
+ [3, 2],
+ [4, 2],
+ [4, 3],
+ [5, 2],
+ [6, 2],
+ [6, 3],
+ [6, 4],
+ [1, 4],
+ [4, 7]]],
+ ['edgelist',
+ 'G888',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 1],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G889',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 1],
+ [5, 6],
+ [7, 2]]],
+ ['edgelist',
+ 'G890',
+ 7,
+ [[1, 2],
+ [3, 5],
+ [1, 3],
+ [3, 2],
+ [4, 2],
+ [4, 3],
+ [5, 2],
+ [6, 2],
+ [6, 3],
+ [6, 4],
+ [1, 4],
+ [1, 7]]],
+ ['edgelist',
+ 'G891',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 1],
+ [5, 6],
+ [1, 7]]],
+ ['edgelist',
+ 'G892',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 1],
+ [5, 6],
+ [3, 7]]],
+ ['edgelist',
+ 'G893',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [5, 6],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 4],
+ [1, 3],
+ [2, 7]]],
+ ['edgelist',
+ 'G894',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [5, 6],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 4],
+ [1, 3],
+ [5, 7]]],
+ ['edgelist',
+ 'G895',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [1, 3],
+ [7, 2],
+ [7, 6]]],
+ ['edgelist',
+ 'G896',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [6, 2],
+ [6, 4],
+ [3, 6],
+ [2, 1],
+ [5, 2],
+ [2, 7]]],
+ ['edgelist',
+ 'G897',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [3, 5],
+ [6, 3],
+ [2, 6],
+ [4, 6],
+ [1, 4],
+ [6, 7]]],
+ ['edgelist',
+ 'G898',
+ 7,
+ [[4, 7],
+ [2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [7, 6],
+ [3, 7],
+ [6, 2],
+ [1, 4],
+ [2, 7],
+ [1, 2],
+ [2, 5]]],
+ ['edgelist',
+ 'G899',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [3, 5],
+ [6, 3],
+ [2, 6],
+ [4, 6],
+ [1, 4],
+ [4, 7]]],
+ ['edgelist',
+ 'G900',
+ 7,
+ [[1, 2],
+ [3, 5],
+ [1, 3],
+ [3, 2],
+ [4, 2],
+ [4, 3],
+ [5, 2],
+ [6, 2],
+ [6, 3],
+ [6, 4],
+ [1, 4],
+ [5, 7]]],
+ ['edgelist',
+ 'G901',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [6, 2],
+ [6, 4],
+ [3, 6],
+ [2, 1],
+ [5, 2],
+ [3, 7]]],
+ ['edgelist',
+ 'G902',
+ 7,
+ [[4, 7],
+ [2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [7, 6],
+ [3, 7],
+ [6, 2],
+ [1, 4],
+ [2, 7],
+ [1, 2],
+ [1, 5]]],
+ ['edgelist',
+ 'G903',
+ 7,
+ [[2, 4],
+ [5, 2],
+ [4, 5],
+ [3, 4],
+ [1, 3],
+ [5, 1],
+ [6, 5],
+ [3, 6],
+ [5, 3],
+ [1, 6],
+ [2, 6],
+ [4, 7]]],
+ ['edgelist',
+ 'G904',
+ 7,
+ [[2, 4],
+ [5, 2],
+ [4, 5],
+ [3, 4],
+ [1, 3],
+ [5, 1],
+ [6, 5],
+ [3, 6],
+ [5, 3],
+ [1, 6],
+ [2, 6],
+ [1, 7]]],
+ ['edgelist',
+ 'G905',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 1],
+ [5, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G906',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [3, 5],
+ [6, 3],
+ [2, 6],
+ [2, 5],
+ [1, 4],
+ [6, 7]]],
+ ['edgelist',
+ 'G907',
+ 7,
+ [[2, 6],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [3, 6],
+ [5, 3],
+ [4, 5],
+ [6, 4],
+ [1, 2],
+ [3, 1],
+ [4, 3],
+ [5, 7]]],
+ ['edgelist',
+ 'G908',
+ 7,
+ [[2, 6],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [3, 6],
+ [5, 3],
+ [4, 5],
+ [6, 4],
+ [1, 2],
+ [3, 1],
+ [4, 3],
+ [1, 7]]],
+ ['edgelist',
+ 'G909',
+ 7,
+ [[4, 7],
+ [2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [7, 6],
+ [3, 7],
+ [6, 2],
+ [1, 4],
+ [2, 7],
+ [1, 2],
+ [5, 6]]],
+ ['edgelist',
+ 'G910',
+ 7,
+ [[2, 6],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [3, 6],
+ [5, 3],
+ [4, 5],
+ [6, 4],
+ [1, 2],
+ [3, 1],
+ [4, 3],
+ [4, 7]]],
+ ['edgelist',
+ 'G911',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [3, 5],
+ [6, 3],
+ [2, 6],
+ [2, 5],
+ [1, 4],
+ [1, 7]]],
+ ['edgelist',
+ 'G912',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [5, 6],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 4],
+ [1, 3],
+ [6, 7]]],
+ ['edgelist',
+ 'G913',
+ 7,
+ [[1, 4],
+ [7, 1],
+ [6, 7],
+ [4, 6],
+ [2, 4],
+ [7, 2],
+ [5, 7],
+ [4, 5],
+ [3, 4],
+ [7, 3],
+ [4, 7],
+ [6, 5]]],
+ ['edgelist',
+ 'G914',
+ 7,
+ [[1, 2],
+ [5, 1],
+ [6, 5],
+ [2, 6],
+ [5, 2],
+ [3, 5],
+ [2, 3],
+ [7, 2],
+ [5, 7],
+ [3, 7],
+ [4, 3],
+ [5, 4]]],
+ ['edgelist',
+ 'G915',
+ 7,
+ [[5, 2],
+ [4, 3],
+ [4, 1],
+ [5, 3],
+ [6, 2],
+ [6, 1],
+ [4, 6],
+ [5, 4],
+ [6, 5],
+ [7, 6],
+ [4, 7],
+ [5, 7]]],
+ ['edgelist',
+ 'G916',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [1, 3],
+ [4, 1],
+ [4, 2],
+ [4, 3],
+ [5, 3],
+ [4, 5],
+ [6, 4],
+ [1, 6],
+ [7, 4],
+ [2, 7]]],
+ ['edgelist',
+ 'G917',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [1, 3],
+ [4, 1],
+ [4, 2],
+ [4, 3],
+ [7, 4],
+ [1, 7],
+ [6, 1],
+ [2, 6],
+ [5, 2],
+ [3, 5]]],
+ ['edgelist',
+ 'G918',
+ 7,
+ [[7, 3],
+ [6, 7],
+ [4, 6],
+ [3, 4],
+ [2, 3],
+ [5, 2],
+ [6, 5],
+ [3, 6],
+ [5, 3],
+ [1, 5],
+ [2, 1],
+ [6, 2]]],
+ ['edgelist',
+ 'G919',
+ 7,
+ [[6, 5],
+ [7, 6],
+ [4, 7],
+ [5, 4],
+ [1, 5],
+ [4, 1],
+ [2, 4],
+ [1, 2],
+ [5, 2],
+ [4, 6],
+ [3, 4],
+ [5, 3]]],
+ ['edgelist',
+ 'G920',
+ 7,
+ [[1, 5],
+ [4, 1],
+ [3, 4],
+ [5, 3],
+ [2, 5],
+ [4, 2],
+ [2, 1],
+ [3, 2],
+ [6, 1],
+ [2, 6],
+ [7, 2],
+ [1, 7]]],
+ ['edgelist',
+ 'G921',
+ 7,
+ [[2, 3],
+ [1, 2],
+ [3, 1],
+ [4, 3],
+ [1, 4],
+ [2, 4],
+ [5, 3],
+ [1, 5],
+ [6, 5],
+ [3, 6],
+ [7, 3],
+ [2, 7]]],
+ ['edgelist',
+ 'G922',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [5, 3],
+ [5, 4],
+ [6, 3],
+ [5, 6],
+ [7, 5],
+ [4, 7]]],
+ ['edgelist',
+ 'G923',
+ 7,
+ [[1, 5],
+ [4, 1],
+ [3, 4],
+ [5, 3],
+ [2, 5],
+ [4, 2],
+ [2, 1],
+ [3, 2],
+ [6, 1],
+ [2, 6],
+ [7, 2],
+ [3, 7]]],
+ ['edgelist',
+ 'G924',
+ 7,
+ [[2, 3],
+ [1, 2],
+ [3, 1],
+ [4, 3],
+ [1, 4],
+ [2, 4],
+ [5, 3],
+ [1, 5],
+ [7, 5],
+ [3, 7],
+ [6, 3],
+ [5, 6]]],
+ ['edgelist',
+ 'G925',
+ 7,
+ [[2, 1],
+ [3, 2],
+ [1, 3],
+ [4, 1],
+ [5, 4],
+ [1, 5],
+ [6, 1],
+ [4, 6],
+ [5, 6],
+ [7, 5],
+ [4, 7],
+ [7, 1]]],
+ ['edgelist',
+ 'G926',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [1, 3],
+ [4, 1],
+ [4, 2],
+ [4, 3],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [3, 6],
+ [7, 6],
+ [3, 7]]],
+ ['edgelist',
+ 'G927',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [5, 3],
+ [5, 4],
+ [7, 5],
+ [1, 7],
+ [6, 1],
+ [4, 6]]],
+ ['edgelist',
+ 'G928',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [5, 3],
+ [2, 6],
+ [7, 2],
+ [1, 7],
+ [6, 1],
+ [5, 6]]],
+ ['edgelist',
+ 'G929',
+ 7,
+ [[1, 5],
+ [4, 1],
+ [3, 4],
+ [5, 3],
+ [2, 5],
+ [4, 2],
+ [2, 1],
+ [3, 2],
+ [7, 2],
+ [1, 7],
+ [6, 1],
+ [3, 6]]],
+ ['edgelist',
+ 'G930',
+ 7,
+ [[6, 5],
+ [4, 6],
+ [5, 4],
+ [7, 5],
+ [4, 7],
+ [3, 4],
+ [5, 3],
+ [1, 5],
+ [4, 1],
+ [2, 1],
+ [3, 2],
+ [7, 3]]],
+ ['edgelist',
+ 'G931',
+ 7,
+ [[5, 2],
+ [4, 3],
+ [4, 1],
+ [5, 3],
+ [6, 2],
+ [6, 1],
+ [4, 6],
+ [5, 4],
+ [6, 5],
+ [7, 6],
+ [1, 7],
+ [4, 7]]],
+ ['edgelist',
+ 'G932',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [5, 3],
+ [5, 4],
+ [7, 2],
+ [1, 7],
+ [6, 1],
+ [2, 6]]],
+ ['edgelist',
+ 'G933',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [5, 3],
+ [5, 4],
+ [4, 2],
+ [6, 2],
+ [7, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G934',
+ 7,
+ [[1, 5],
+ [4, 1],
+ [3, 4],
+ [5, 3],
+ [2, 5],
+ [4, 2],
+ [2, 1],
+ [3, 2],
+ [6, 5],
+ [4, 6],
+ [7, 4],
+ [5, 7]]],
+ ['edgelist',
+ 'G935',
+ 7,
+ [[2, 1],
+ [3, 2],
+ [4, 3],
+ [1, 4],
+ [5, 4],
+ [2, 5],
+ [5, 1],
+ [6, 5],
+ [1, 6],
+ [4, 6],
+ [7, 1],
+ [2, 7]]],
+ ['edgelist',
+ 'G936',
+ 7,
+ [[1, 5],
+ [4, 1],
+ [3, 4],
+ [5, 3],
+ [2, 5],
+ [4, 2],
+ [2, 1],
+ [3, 2],
+ [7, 3],
+ [5, 7],
+ [6, 1],
+ [2, 6]]],
+ ['edgelist',
+ 'G937',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [5, 3],
+ [6, 5],
+ [3, 6],
+ [2, 6],
+ [7, 2],
+ [6, 7]]],
+ ['edgelist',
+ 'G938',
+ 7,
+ [[1, 3],
+ [2, 1],
+ [3, 2],
+ [1, 4],
+ [4, 2],
+ [5, 3],
+ [6, 4],
+ [7, 2],
+ [7, 5],
+ [5, 1],
+ [4, 5],
+ [2, 6]]],
+ ['edgelist',
+ 'G939',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [1, 3],
+ [4, 1],
+ [4, 3],
+ [5, 4],
+ [5, 2],
+ [1, 5],
+ [6, 3],
+ [6, 5],
+ [7, 1],
+ [4, 7]]],
+ ['edgelist',
+ 'G940',
+ 7,
+ [[6, 1],
+ [3, 6],
+ [7, 3],
+ [4, 7],
+ [3, 4],
+ [2, 3],
+ [1, 2],
+ [5, 1],
+ [2, 5],
+ [6, 2],
+ [7, 6],
+ [1, 3]]],
+ ['edgelist',
+ 'G941',
+ 7,
+ [[1, 5],
+ [4, 1],
+ [3, 4],
+ [5, 3],
+ [2, 5],
+ [4, 2],
+ [2, 1],
+ [3, 2],
+ [7, 5],
+ [3, 7],
+ [6, 3],
+ [4, 6]]],
+ ['edgelist',
+ 'G942',
+ 7,
+ [[1, 3],
+ [2, 1],
+ [6, 2],
+ [4, 6],
+ [7, 4],
+ [3, 7],
+ [5, 3],
+ [4, 5],
+ [6, 5],
+ [3, 6],
+ [2, 3],
+ [5, 2]]],
+ ['edgelist',
+ 'G943',
+ 7,
+ [[1, 3],
+ [2, 1],
+ [3, 2],
+ [1, 4],
+ [4, 2],
+ [5, 1],
+ [2, 5],
+ [5, 3],
+ [4, 5],
+ [6, 5],
+ [7, 6],
+ [4, 7]]],
+ ['edgelist',
+ 'G944',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [1, 3],
+ [4, 1],
+ [4, 2],
+ [4, 3],
+ [7, 2],
+ [3, 7],
+ [5, 7],
+ [4, 5],
+ [6, 4],
+ [7, 6]]],
+ ['edgelist',
+ 'G945',
+ 7,
+ [[1, 5],
+ [4, 1],
+ [3, 4],
+ [5, 3],
+ [2, 5],
+ [4, 2],
+ [2, 1],
+ [3, 2],
+ [6, 1],
+ [7, 6],
+ [1, 7],
+ [4, 5]]],
+ ['edgelist',
+ 'G946',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [5, 3],
+ [5, 4],
+ [6, 2],
+ [3, 6],
+ [7, 1],
+ [4, 7]]],
+ ['edgelist',
+ 'G947',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 4],
+ [6, 2],
+ [6, 5],
+ [7, 4],
+ [5, 7],
+ [2, 7],
+ [7, 6]]],
+ ['edgelist',
+ 'G948',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G949',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 3],
+ [7, 6],
+ [1, 7],
+ [7, 3],
+ [1, 6],
+ [2, 6],
+ [7, 2]]],
+ ['edgelist',
+ 'G950',
+ 7,
+ [[1, 2],
+ [7, 6],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [7, 1],
+ [7, 2],
+ [7, 3],
+ [7, 4],
+ [7, 5],
+ [6, 2]]],
+ ['edgelist',
+ 'G951',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 1],
+ [6, 2],
+ [6, 3],
+ [6, 4],
+ [6, 5],
+ [7, 2],
+ [6, 7]]],
+ ['edgelist',
+ 'G952',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [5, 3],
+ [5, 4],
+ [6, 2],
+ [5, 6],
+ [7, 5],
+ [6, 7]]],
+ ['edgelist',
+ 'G953',
+ 7,
+ [[3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [6, 2],
+ [6, 4],
+ [3, 6],
+ [2, 1],
+ [5, 2],
+ [7, 4],
+ [7, 2]]],
+ ['edgelist',
+ 'G954',
+ 7,
+ [[1, 5],
+ [1, 7],
+ [2, 4],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 6],
+ [3, 7],
+ [4, 6],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G955',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 7],
+ [5, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G956',
+ 7,
+ [[1, 2],
+ [3, 5],
+ [1, 3],
+ [3, 2],
+ [5, 7],
+ [5, 2],
+ [6, 2],
+ [6, 3],
+ [6, 4],
+ [1, 4],
+ [7, 2],
+ [3, 7]]],
+ ['edgelist',
+ 'G957',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [6, 5],
+ [1, 5],
+ [6, 4],
+ [6, 2],
+ [7, 4],
+ [7, 5],
+ [5, 3],
+ [1, 4],
+ [5, 4]]],
+ ['edgelist',
+ 'G958',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [6, 4],
+ [3, 6],
+ [2, 1],
+ [5, 2],
+ [7, 2],
+ [7, 6]]],
+ ['edgelist',
+ 'G959',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [5, 3],
+ [5, 4],
+ [6, 2],
+ [7, 6],
+ [1, 7],
+ [2, 7]]],
+ ['edgelist',
+ 'G960',
+ 7,
+ [[1, 4],
+ [5, 1],
+ [3, 5],
+ [4, 3],
+ [2, 4],
+ [5, 2],
+ [2, 1],
+ [6, 2],
+ [6, 3],
+ [7, 2],
+ [3, 7],
+ [5, 7]]],
+ ['edgelist',
+ 'G961',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 3],
+ [6, 1],
+ [6, 5],
+ [5, 2],
+ [2, 7],
+ [6, 4],
+ [2, 6],
+ [7, 3]]],
+ ['edgelist',
+ 'G962',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 3],
+ [6, 1],
+ [6, 5],
+ [5, 2],
+ [2, 6],
+ [6, 4],
+ [7, 2],
+ [5, 7]]],
+ ['edgelist',
+ 'G963',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 3],
+ [6, 1],
+ [6, 5],
+ [5, 2],
+ [2, 6],
+ [6, 4],
+ [7, 2],
+ [1, 7]]],
+ ['edgelist',
+ 'G964',
+ 7,
+ [[5, 4],
+ [2, 3],
+ [1, 2],
+ [1, 4],
+ [5, 1],
+ [7, 5],
+ [5, 3],
+ [6, 5],
+ [7, 3],
+ [7, 4],
+ [4, 3],
+ [6, 2]]],
+ ['edgelist',
+ 'G965',
+ 7,
+ [[3, 4],
+ [5, 3],
+ [1, 5],
+ [7, 1],
+ [7, 6],
+ [5, 6],
+ [2, 4],
+ [6, 2],
+ [1, 6],
+ [7, 2],
+ [4, 7],
+ [6, 4]]],
+ ['edgelist',
+ 'G966',
+ 7,
+ [[1, 4],
+ [1, 6],
+ [2, 3],
+ [2, 6],
+ [2, 7],
+ [3, 5],
+ [3, 7],
+ [4, 5],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G967',
+ 7,
+ [[1, 4],
+ [1, 7],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G968',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [5, 6],
+ [1, 5],
+ [5, 4],
+ [6, 4],
+ [7, 2],
+ [4, 7],
+ [7, 3],
+ [1, 7],
+ [5, 7]]],
+ ['edgelist',
+ 'G969',
+ 7,
+ [[1, 2],
+ [3, 5],
+ [1, 3],
+ [7, 2],
+ [4, 2],
+ [4, 3],
+ [5, 2],
+ [6, 2],
+ [6, 3],
+ [6, 4],
+ [1, 4],
+ [5, 7]]],
+ ['edgelist',
+ 'G970',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [6, 4],
+ [3, 6],
+ [2, 1],
+ [5, 2],
+ [7, 4],
+ [2, 7]]],
+ ['edgelist',
+ 'G971',
+ 7,
+ [[5, 4],
+ [2, 3],
+ [6, 1],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [5, 3],
+ [6, 2],
+ [7, 3],
+ [7, 4],
+ [4, 3],
+ [7, 5]]],
+ ['edgelist',
+ 'G972',
+ 7,
+ [[3, 4],
+ [5, 3],
+ [6, 5],
+ [1, 6],
+ [7, 1],
+ [2, 7],
+ [4, 2],
+ [7, 4],
+ [6, 4],
+ [2, 6],
+ [5, 1],
+ [4, 1]]],
+ ['edgelist',
+ 'G973',
+ 7,
+ [[1, 4],
+ [1, 6],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 7],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G974',
+ 7,
+ [[4, 3],
+ [2, 3],
+ [6, 1],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [7, 5],
+ [6, 2],
+ [7, 3],
+ [7, 4],
+ [7, 2],
+ [1, 7]]],
+ ['edgelist',
+ 'G975',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 5],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 7],
+ [4, 6],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G976',
+ 7,
+ [[1, 4],
+ [1, 6],
+ [2, 3],
+ [2, 5],
+ [2, 7],
+ [3, 5],
+ [3, 7],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G977',
+ 7,
+ [[1, 4],
+ [1, 7],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G978',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 7]]],
+ ['edgelist',
+ 'G979',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 3],
+ [6, 1],
+ [6, 5],
+ [5, 2],
+ [4, 5],
+ [6, 4],
+ [3, 7],
+ [7, 2]]],
+ ['edgelist',
+ 'G980',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 3],
+ [6, 1],
+ [6, 5],
+ [5, 2],
+ [4, 5],
+ [6, 4],
+ [7, 2],
+ [7, 6]]],
+ ['edgelist',
+ 'G981',
+ 7,
+ [[1, 3],
+ [1, 7],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 6],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G982',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 5],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [4, 5],
+ [4, 7],
+ [5, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G983',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 2],
+ [3, 5],
+ [6, 3],
+ [1, 6],
+ [5, 4],
+ [7, 6],
+ [7, 5],
+ [4, 6]]],
+ ['edgelist',
+ 'G984',
+ 7,
+ [[1, 3],
+ [1, 7],
+ [2, 3],
+ [2, 5],
+ [2, 6],
+ [3, 4],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G985',
+ 7,
+ [[1, 3],
+ [1, 7],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [3, 5],
+ [3, 6],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G986',
+ 7,
+ [[1, 3],
+ [1, 7],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G987',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 5],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [5, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G988',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G989',
+ 7,
+ [[4, 1],
+ [3, 4],
+ [5, 3],
+ [1, 5],
+ [6, 2],
+ [6, 3],
+ [7, 2],
+ [7, 1],
+ [4, 7],
+ [6, 4],
+ [5, 6],
+ [7, 5]]],
+ ['edgelist',
+ 'G990',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G991',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G992',
+ 7,
+ [[4, 1],
+ [3, 4],
+ [5, 3],
+ [1, 5],
+ [6, 2],
+ [6, 3],
+ [7, 2],
+ [7, 1],
+ [4, 7],
+ [6, 4],
+ [7, 5],
+ [2, 4]]],
+ ['edgelist',
+ 'G993',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [4, 7],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G994',
+ 7,
+ [[3, 4],
+ [5, 3],
+ [6, 3],
+ [5, 2],
+ [7, 1],
+ [4, 1],
+ [4, 2],
+ [7, 4],
+ [6, 7],
+ [2, 6],
+ [5, 1],
+ [4, 5]]],
+ ['edgelist',
+ 'G995',
+ 7,
+ [[3, 4],
+ [5, 3],
+ [5, 2],
+ [3, 6],
+ [7, 1],
+ [7, 5],
+ [4, 2],
+ [7, 4],
+ [1, 4],
+ [2, 6],
+ [5, 1],
+ [6, 4]]],
+ ['edgelist',
+ 'G996',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 7],
+ [3, 4],
+ [3, 7],
+ [4, 6],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G997',
+ 7,
+ [[4, 1],
+ [3, 4],
+ [5, 3],
+ [1, 5],
+ [6, 2],
+ [6, 3],
+ [7, 2],
+ [7, 1],
+ [4, 7],
+ [2, 4],
+ [7, 5],
+ [6, 5]]],
+ ['edgelist',
+ 'G998',
+ 7,
+ [[7, 4],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 3],
+ [6, 1],
+ [1, 7],
+ [5, 2],
+ [4, 5],
+ [7, 6],
+ [6, 2],
+ [1, 5]]],
+ ['edgelist',
+ 'G999',
+ 7,
+ [[1, 4],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 6],
+ [2, 7],
+ [3, 5],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1000',
+ 7,
+ [[1, 4],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 5],
+ [2, 7],
+ [3, 4],
+ [3, 6],
+ [4, 5],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1001',
+ 7,
+ [[1, 4],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [4, 5],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1002',
+ 7,
+ [[1, 5],
+ [4, 1],
+ [2, 4],
+ [5, 2],
+ [2, 1],
+ [5, 6],
+ [3, 5],
+ [7, 3],
+ [6, 7],
+ [3, 6],
+ [4, 3],
+ [7, 4]]],
+ ['edgelist',
+ 'G1003',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [6, 3],
+ [6, 5],
+ [7, 5],
+ [7, 4],
+ [7, 3],
+ [6, 4]]],
+ ['edgelist',
+ 'G1004',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1005',
+ 7,
+ [[4, 1],
+ [5, 3],
+ [4, 2],
+ [5, 1],
+ [6, 3],
+ [6, 2],
+ [5, 4],
+ [6, 5],
+ [4, 6],
+ [7, 2],
+ [1, 7],
+ [3, 7]]],
+ ['edgelist',
+ 'G1006',
+ 7,
+ [[2, 1],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [7, 6],
+ [2, 7],
+ [4, 5],
+ [6, 4],
+ [3, 4],
+ [6, 3],
+ [7, 4],
+ [3, 7]]],
+ ['edgelist',
+ 'G1007',
+ 7,
+ [[1, 2],
+ [3, 1],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [1, 6],
+ [7, 2],
+ [5, 7],
+ [7, 6],
+ [3, 7],
+ [4, 2],
+ [6, 4]]],
+ ['edgelist',
+ 'G1008',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [7, 1],
+ [7, 2],
+ [7, 3],
+ [7, 4],
+ [7, 5],
+ [7, 6]]],
+ ['edgelist',
+ 'G1009',
+ 7,
+ [[4, 7],
+ [2, 3],
+ [1, 7],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [5, 3],
+ [5, 4],
+ [6, 2],
+ [3, 6],
+ [5, 6],
+ [7, 5]]],
+ ['edgelist',
+ 'G1010',
+ 7,
+ [[2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1011',
+ 7,
+ [[2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1012',
+ 7,
+ [[1, 7],
+ [2, 5],
+ [2, 6],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1013',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [5, 6],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 4],
+ [5, 4],
+ [1, 3],
+ [7, 5]]],
+ ['edgelist',
+ 'G1014',
+ 7,
+ [[4, 5],
+ [2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [3, 5],
+ [6, 2],
+ [1, 4],
+ [2, 5],
+ [1, 2],
+ [1, 5],
+ [2, 7]]],
+ ['edgelist',
+ 'G1015',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 7],
+ [5, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G1016',
+ 7,
+ [[1, 7],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1017',
+ 7,
+ [[1, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1018',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1019',
+ 7,
+ [[1, 7],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G1020',
+ 7,
+ [[1, 7],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1021',
+ 7,
+ [[2, 6],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [3, 6],
+ [5, 3],
+ [4, 5],
+ [6, 4],
+ [1, 2],
+ [3, 1],
+ [4, 3],
+ [5, 6],
+ [2, 7]]],
+ ['edgelist',
+ 'G1022',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [5, 6],
+ [1, 5],
+ [2, 4],
+ [5, 2],
+ [3, 5],
+ [1, 4],
+ [6, 4],
+ [5, 4],
+ [1, 3],
+ [6, 7]]],
+ ['edgelist',
+ 'G1023',
+ 7,
+ [[1, 6],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 7],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1024',
+ 7,
+ [[1, 7],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1025',
+ 7,
+ [[6, 7],
+ [1, 6],
+ [7, 1],
+ [5, 7],
+ [6, 5],
+ [2, 6],
+ [7, 2],
+ [4, 7],
+ [6, 4],
+ [3, 6],
+ [7, 3],
+ [2, 1],
+ [3, 2]]],
+ ['edgelist',
+ 'G1026',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [3, 4],
+ [3, 5],
+ [3, 7]]],
+ ['edgelist',
+ 'G1027',
+ 7,
+ [[4, 5],
+ [1, 4],
+ [5, 1],
+ [2, 5],
+ [4, 2],
+ [3, 4],
+ [5, 3],
+ [2, 1],
+ [3, 2],
+ [7, 1],
+ [4, 7],
+ [6, 4],
+ [5, 6]]],
+ ['edgelist',
+ 'G1028',
+ 7,
+ [[4, 5],
+ [1, 4],
+ [5, 1],
+ [2, 5],
+ [4, 2],
+ [3, 4],
+ [5, 3],
+ [2, 1],
+ [3, 2],
+ [7, 1],
+ [4, 7],
+ [6, 4],
+ [1, 6]]],
+ ['edgelist',
+ 'G1029',
+ 7,
+ [[4, 5],
+ [1, 4],
+ [5, 1],
+ [2, 5],
+ [4, 2],
+ [3, 4],
+ [5, 3],
+ [2, 1],
+ [3, 2],
+ [7, 5],
+ [1, 7],
+ [6, 1],
+ [4, 6]]],
+ ['edgelist',
+ 'G1030',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 5],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1031',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 5],
+ [4, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1032',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 5],
+ [4, 5],
+ [6, 2],
+ [7, 6],
+ [2, 7]]],
+ ['edgelist',
+ 'G1033',
+ 7,
+ [[1, 5],
+ [1, 7],
+ [2, 4],
+ [2, 6],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1034',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 5],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G1035',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1036',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 5],
+ [4, 5],
+ [6, 4],
+ [7, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1037',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1038',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [6, 4],
+ [3, 6],
+ [2, 1],
+ [5, 2],
+ [7, 2],
+ [7, 6],
+ [6, 2]]],
+ ['edgelist',
+ 'G1039',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [5, 3],
+ [5, 4],
+ [6, 5],
+ [1, 6],
+ [7, 1],
+ [4, 7],
+ [7, 5]]],
+ ['edgelist',
+ 'G1040',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [6, 4],
+ [3, 6],
+ [2, 1],
+ [5, 2],
+ [7, 2],
+ [6, 2],
+ [3, 7]]],
+ ['edgelist',
+ 'G1041',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [3, 4],
+ [3, 5],
+ [6, 7]]],
+ ['edgelist',
+ 'G1042',
+ 7,
+ [[2, 1],
+ [3, 2],
+ [5, 3],
+ [2, 5],
+ [4, 2],
+ [1, 4],
+ [3, 4],
+ [6, 3],
+ [2, 6],
+ [1, 6],
+ [7, 1],
+ [2, 7],
+ [3, 7]]],
+ ['edgelist',
+ 'G1043',
+ 7,
+ [[3, 6],
+ [7, 3],
+ [6, 7],
+ [5, 6],
+ [4, 5],
+ [1, 4],
+ [5, 1],
+ [2, 5],
+ [4, 2],
+ [7, 4],
+ [3, 2],
+ [5, 3],
+ [4, 3]]],
+ ['edgelist',
+ 'G1044',
+ 7,
+ [[1, 4],
+ [1, 7],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1045',
+ 7,
+ [[3, 5],
+ [4, 3],
+ [2, 4],
+ [5, 2],
+ [1, 5],
+ [4, 1],
+ [7, 4],
+ [2, 7],
+ [6, 2],
+ [5, 6],
+ [7, 5],
+ [4, 6],
+ [2, 3]]],
+ ['edgelist',
+ 'G1046',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [3, 5],
+ [3, 6],
+ [4, 6],
+ [4, 7]]],
+ ['edgelist',
+ 'G1047',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 5],
+ [4, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G1048',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 6],
+ [4, 7],
+ [5, 6]]],
+ ['edgelist',
+ 'G1049',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 7],
+ [3, 5],
+ [3, 6],
+ [4, 5],
+ [4, 6]]],
+ ['edgelist',
+ 'G1050',
+ 7,
+ [[1, 3],
+ [1, 7],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 6],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1051',
+ 7,
+ [[3, 6],
+ [2, 3],
+ [6, 2],
+ [5, 6],
+ [4, 5],
+ [1, 4],
+ [5, 1],
+ [4, 3],
+ [5, 3],
+ [2, 4],
+ [7, 4],
+ [3, 7],
+ [2, 7]]],
+ ['edgelist',
+ 'G1052',
+ 7,
+ [[1, 5],
+ [1, 7],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G1053',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [6, 4],
+ [3, 6],
+ [2, 1],
+ [5, 2],
+ [1, 5],
+ [7, 2],
+ [5, 7]]],
+ ['edgelist',
+ 'G1054',
+ 7,
+ [[3, 4],
+ [1, 3],
+ [4, 1],
+ [5, 4],
+ [2, 5],
+ [6, 2],
+ [5, 6],
+ [2, 1],
+ [6, 3],
+ [6, 1],
+ [7, 6],
+ [2, 7],
+ [5, 1]]],
+ ['edgelist',
+ 'G1055',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [1, 3],
+ [4, 1],
+ [2, 4],
+ [4, 3],
+ [5, 1],
+ [3, 6],
+ [4, 5],
+ [6, 4],
+ [1, 6],
+ [7, 5],
+ [7, 2]]],
+ ['edgelist',
+ 'G1056',
+ 7,
+ [[3, 4],
+ [1, 3],
+ [4, 1],
+ [5, 4],
+ [2, 5],
+ [6, 2],
+ [5, 6],
+ [2, 1],
+ [6, 3],
+ [7, 3],
+ [6, 7],
+ [1, 6],
+ [2, 3]]],
+ ['edgelist',
+ 'G1057',
+ 7,
+ [[6, 5],
+ [7, 3],
+ [7, 5],
+ [5, 4],
+ [6, 1],
+ [4, 2],
+ [4, 3],
+ [7, 4],
+ [6, 7],
+ [5, 1],
+ [2, 5],
+ [6, 2],
+ [1, 4]]],
+ ['edgelist',
+ 'G1058',
+ 7,
+ [[1, 3],
+ [1, 7],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1059',
+ 7,
+ [[2, 6],
+ [5, 2],
+ [1, 5],
+ [6, 1],
+ [3, 6],
+ [5, 3],
+ [4, 5],
+ [6, 4],
+ [1, 2],
+ [3, 1],
+ [4, 3],
+ [7, 6],
+ [7, 5]]],
+ ['edgelist',
+ 'G1060',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [6, 4],
+ [3, 6],
+ [2, 1],
+ [5, 2],
+ [1, 5],
+ [1, 7],
+ [5, 7]]],
+ ['edgelist',
+ 'G1061',
+ 7,
+ [[1, 5],
+ [4, 1],
+ [3, 4],
+ [5, 3],
+ [2, 5],
+ [4, 2],
+ [2, 1],
+ [6, 1],
+ [5, 6],
+ [2, 6],
+ [7, 2],
+ [1, 7],
+ [4, 7]]],
+ ['edgelist',
+ 'G1062',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1063',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [5, 2],
+ [6, 4],
+ [3, 6],
+ [2, 1],
+ [7, 4],
+ [5, 7],
+ [6, 2]]],
+ ['edgelist',
+ 'G1064',
+ 7,
+ [[6, 3],
+ [1, 3],
+ [4, 1],
+ [5, 4],
+ [2, 5],
+ [6, 2],
+ [5, 6],
+ [2, 1],
+ [7, 3],
+ [7, 4],
+ [2, 3],
+ [4, 2],
+ [5, 1]]],
+ ['edgelist',
+ 'G1065',
+ 7,
+ [[2, 1],
+ [3, 2],
+ [1, 3],
+ [1, 4],
+ [4, 3],
+ [7, 3],
+ [2, 7],
+ [6, 2],
+ [7, 6],
+ [5, 7],
+ [6, 5],
+ [1, 6],
+ [5, 1]]],
+ ['edgelist',
+ 'G1066',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 7]]],
+ ['edgelist',
+ 'G1067',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 5],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1068',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [5, 2],
+ [4, 2],
+ [1, 5],
+ [3, 4],
+ [1, 4],
+ [3, 1],
+ [6, 1],
+ [7, 6],
+ [5, 7],
+ [4, 6],
+ [5, 3]]],
+ ['edgelist',
+ 'G1069',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1070',
+ 7,
+ [[4, 5],
+ [2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [3, 5],
+ [6, 2],
+ [1, 4],
+ [1, 5],
+ [3, 4],
+ [7, 6],
+ [1, 7]]],
+ ['edgelist',
+ 'G1071',
+ 7,
+ [[6, 3],
+ [1, 3],
+ [4, 1],
+ [5, 4],
+ [2, 5],
+ [6, 2],
+ [5, 6],
+ [2, 1],
+ [7, 3],
+ [7, 4],
+ [3, 4],
+ [6, 1],
+ [5, 1]]],
+ ['edgelist',
+ 'G1072',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [6, 5],
+ [1, 5],
+ [6, 1],
+ [6, 2],
+ [6, 3],
+ [6, 4],
+ [7, 4],
+ [7, 5],
+ [5, 3],
+ [1, 4]]],
+ ['edgelist',
+ 'G1073',
+ 7,
+ [[1, 2],
+ [1, 7],
+ [2, 5],
+ [2, 6],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1074',
+ 7,
+ [[1, 2],
+ [1, 7],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1075',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [7, 3],
+ [1, 7],
+ [6, 1],
+ [3, 6],
+ [6, 4],
+ [5, 6],
+ [7, 5],
+ [4, 7]]],
+ ['edgelist',
+ 'G1076',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [7, 6],
+ [1, 7],
+ [1, 3],
+ [3, 6],
+ [6, 4],
+ [5, 6],
+ [7, 5],
+ [4, 7]]],
+ ['edgelist',
+ 'G1077',
+ 7,
+ [[4, 5],
+ [1, 4],
+ [5, 1],
+ [4, 7],
+ [4, 2],
+ [3, 4],
+ [5, 3],
+ [2, 1],
+ [3, 2],
+ [6, 3],
+ [4, 6],
+ [7, 3],
+ [6, 7]]],
+ ['edgelist',
+ 'G1078',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 1],
+ [6, 2],
+ [6, 3],
+ [6, 4],
+ [6, 5],
+ [7, 6],
+ [4, 7],
+ [5, 7]]],
+ ['edgelist',
+ 'G1079',
+ 7,
+ [[2, 1],
+ [3, 2],
+ [7, 1],
+ [2, 5],
+ [4, 2],
+ [1, 4],
+ [3, 4],
+ [2, 7],
+ [2, 6],
+ [3, 7],
+ [5, 4],
+ [6, 5],
+ [7, 6]]],
+ ['edgelist',
+ 'G1080',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [3, 6],
+ [3, 7],
+ [4, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1081',
+ 7,
+ [[1, 7],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 3],
+ [6, 1],
+ [7, 4],
+ [5, 2],
+ [4, 5],
+ [7, 6],
+ [2, 6],
+ [4, 6],
+ [2, 4]]],
+ ['edgelist',
+ 'G1082',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [7, 5],
+ [5, 4],
+ [6, 5],
+ [6, 3],
+ [7, 1],
+ [4, 7],
+ [4, 6]]],
+ ['edgelist',
+ 'G1083',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 7],
+ [1, 5],
+ [7, 6],
+ [1, 7],
+ [7, 5],
+ [3, 6],
+ [6, 4],
+ [5, 6],
+ [2, 6],
+ [7, 2]]],
+ ['edgelist',
+ 'G1084',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 7],
+ [5, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G1085',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1086',
+ 7,
+ [[3, 4],
+ [6, 3],
+ [7, 6],
+ [4, 7],
+ [5, 4],
+ [6, 5],
+ [1, 6],
+ [3, 1],
+ [2, 3],
+ [1, 2],
+ [6, 2],
+ [5, 3],
+ [7, 5]]],
+ ['edgelist',
+ 'G1087',
+ 7,
+ [[3, 2],
+ [1, 6],
+ [7, 1],
+ [5, 7],
+ [6, 5],
+ [2, 6],
+ [7, 2],
+ [4, 7],
+ [6, 4],
+ [3, 6],
+ [7, 3],
+ [2, 1],
+ [4, 5]]],
+ ['edgelist',
+ 'G1088',
+ 7,
+ [[1, 2],
+ [3, 1],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [1, 6],
+ [7, 2],
+ [5, 7],
+ [7, 6],
+ [3, 7],
+ [4, 2],
+ [6, 4],
+ [7, 1]]],
+ ['edgelist',
+ 'G1089',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1090',
+ 7,
+ [[3, 4],
+ [1, 3],
+ [4, 1],
+ [5, 4],
+ [5, 7],
+ [6, 2],
+ [5, 6],
+ [4, 2],
+ [6, 3],
+ [7, 1],
+ [7, 2],
+ [3, 2],
+ [5, 2]]],
+ ['edgelist',
+ 'G1091',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [5, 4],
+ [6, 5],
+ [6, 3],
+ [6, 2],
+ [7, 6],
+ [2, 7],
+ [3, 7]]],
+ ['edgelist',
+ 'G1092',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 7]]],
+ ['edgelist',
+ 'G1093',
+ 7,
+ [[4, 1],
+ [3, 4],
+ [5, 3],
+ [1, 5],
+ [6, 2],
+ [6, 3],
+ [7, 2],
+ [7, 1],
+ [4, 7],
+ [2, 4],
+ [7, 5],
+ [6, 5],
+ [6, 4]]],
+ ['edgelist',
+ 'G1094',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 5],
+ [2, 6],
+ [3, 4],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1095',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [4, 5],
+ [4, 7],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1096',
+ 7,
+ [[1, 3],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 5],
+ [2, 7],
+ [3, 4],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1097',
+ 7,
+ [[4, 5],
+ [6, 1],
+ [4, 6],
+ [1, 7],
+ [7, 5],
+ [3, 4],
+ [5, 3],
+ [2, 1],
+ [3, 2],
+ [2, 7],
+ [6, 2],
+ [3, 6],
+ [7, 3]]],
+ ['edgelist',
+ 'G1098',
+ 7,
+ [[1, 3],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1099',
+ 7,
+ [[4, 1],
+ [3, 4],
+ [5, 3],
+ [1, 5],
+ [6, 4],
+ [6, 3],
+ [6, 5],
+ [2, 4],
+ [2, 1],
+ [5, 2],
+ [7, 1],
+ [4, 7],
+ [2, 7]]],
+ ['edgelist',
+ 'G1100',
+ 7,
+ [[3, 4],
+ [1, 3],
+ [4, 1],
+ [5, 4],
+ [2, 5],
+ [6, 2],
+ [5, 6],
+ [7, 1],
+ [2, 7],
+ [7, 4],
+ [5, 7],
+ [2, 3],
+ [6, 1]]],
+ ['edgelist',
+ 'G1101',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 7],
+ [3, 4],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1102',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 7],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6]]],
+ ['edgelist',
+ 'G1103',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [6, 4],
+ [6, 5],
+ [7, 5],
+ [7, 3],
+ [7, 6],
+ [6, 3],
+ [4, 7]]],
+ ['edgelist',
+ 'G1104',
+ 7,
+ [[1, 2],
+ [1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 7],
+ [5, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G1105',
+ 7,
+ [[1, 2],
+ [1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1106',
+ 7,
+ [[1, 2],
+ [3, 1],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [1, 6],
+ [7, 2],
+ [5, 7],
+ [7, 6],
+ [3, 7],
+ [4, 2],
+ [6, 4],
+ [3, 2]]],
+ ['edgelist',
+ 'G1107',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [3, 5],
+ [6, 3],
+ [2, 6],
+ [2, 5],
+ [2, 4],
+ [3, 1],
+ [5, 1],
+ [6, 4]]],
+ ['edgelist',
+ 'G1108',
+ 7,
+ [[4, 5],
+ [2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [3, 5],
+ [6, 2],
+ [1, 4],
+ [2, 5],
+ [1, 2],
+ [3, 4],
+ [1, 5],
+ [2, 7]]],
+ ['edgelist',
+ 'G1109',
+ 7,
+ [[1, 7],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1110',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1111',
+ 7,
+ [[1, 7],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1112',
+ 7,
+ [[1, 4],
+ [2, 3],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1113',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 5],
+ [4, 5],
+ [4, 7],
+ [6, 4],
+ [5, 6],
+ [7, 5]]],
+ ['edgelist',
+ 'G1114',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 5],
+ [4, 5],
+ [7, 3],
+ [2, 7],
+ [6, 2],
+ [1, 6]]],
+ ['edgelist',
+ 'G1115',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 5],
+ [4, 5],
+ [6, 3],
+ [4, 6],
+ [7, 5],
+ [1, 7]]],
+ ['edgelist',
+ 'G1116',
+ 7,
+ [[4, 5],
+ [2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [3, 5],
+ [6, 2],
+ [1, 4],
+ [2, 5],
+ [1, 2],
+ [1, 5],
+ [7, 5],
+ [1, 7]]],
+ ['edgelist',
+ 'G1117',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [4, 5],
+ [4, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1118',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 7],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1119',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 5],
+ [3, 6],
+ [3, 7],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1120',
+ 7,
+ [[4, 5],
+ [2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [1, 2],
+ [6, 2],
+ [1, 5],
+ [2, 5],
+ [6, 4],
+ [3, 6],
+ [7, 5],
+ [1, 7]]],
+ ['edgelist',
+ 'G1121',
+ 7,
+ [[2, 4],
+ [3, 2],
+ [1, 3],
+ [6, 1],
+ [5, 6],
+ [4, 5],
+ [6, 4],
+ [3, 6],
+ [2, 1],
+ [5, 2],
+ [7, 2],
+ [6, 2],
+ [3, 7],
+ [1, 5]]],
+ ['edgelist',
+ 'G1122',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 7],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1123',
+ 7,
+ [[3, 4],
+ [5, 3],
+ [7, 4],
+ [5, 1],
+ [7, 1],
+ [4, 5],
+ [4, 2],
+ [6, 5],
+ [6, 1],
+ [1, 4],
+ [2, 6],
+ [6, 4],
+ [7, 5],
+ [7, 2]]],
+ ['edgelist',
+ 'G1124',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 5],
+ [4, 5],
+ [6, 4],
+ [7, 6],
+ [5, 7],
+ [6, 5]]],
+ ['edgelist',
+ 'G1125',
+ 7,
+ [[4, 2],
+ [2, 5],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [2, 6],
+ [1, 2],
+ [1, 3],
+ [3, 6],
+ [6, 4],
+ [5, 6],
+ [2, 3],
+ [7, 3],
+ [6, 7]]],
+ ['edgelist',
+ 'G1126',
+ 7,
+ [[1, 4],
+ [1, 7],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1127',
+ 7,
+ [[1, 4],
+ [1, 7],
+ [2, 3],
+ [2, 5],
+ [2, 6],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1128',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1129',
+ 7,
+ [[1, 2],
+ [1, 7],
+ [2, 5],
+ [2, 6],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1130',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1131',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 5],
+ [5, 7],
+ [6, 3],
+ [2, 6],
+ [1, 6],
+ [7, 4]]],
+ ['edgelist',
+ 'G1132',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [6, 1],
+ [6, 2],
+ [6, 3],
+ [6, 4],
+ [6, 5],
+ [5, 3],
+ [4, 1],
+ [7, 2],
+ [6, 7]]],
+ ['edgelist',
+ 'G1133',
+ 7,
+ [[1, 5],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1134',
+ 7,
+ [[1, 5],
+ [1, 7],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G1135',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 7],
+ [5, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G1136',
+ 7,
+ [[3, 4],
+ [1, 3],
+ [4, 1],
+ [5, 4],
+ [2, 5],
+ [6, 2],
+ [2, 3],
+ [6, 3],
+ [5, 1],
+ [4, 2],
+ [6, 1],
+ [7, 6],
+ [7, 5],
+ [1, 2]]],
+ ['edgelist',
+ 'G1137',
+ 7,
+ [[3, 4],
+ [1, 3],
+ [4, 1],
+ [5, 4],
+ [2, 5],
+ [6, 2],
+ [5, 6],
+ [6, 3],
+ [7, 1],
+ [7, 2],
+ [6, 1],
+ [2, 3],
+ [4, 2],
+ [5, 1]]],
+ ['edgelist',
+ 'G1138',
+ 7,
+ [[6, 7],
+ [1, 6],
+ [7, 1],
+ [5, 7],
+ [6, 5],
+ [2, 6],
+ [7, 2],
+ [4, 7],
+ [6, 4],
+ [3, 6],
+ [7, 3],
+ [2, 1],
+ [3, 2],
+ [4, 5]]],
+ ['edgelist',
+ 'G1139',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 6],
+ [2, 7],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 7],
+ [5, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G1140',
+ 7,
+ [[1, 2],
+ [3, 1],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [1, 6],
+ [7, 2],
+ [5, 7],
+ [7, 6],
+ [3, 7],
+ [4, 2],
+ [6, 4],
+ [7, 1],
+ [4, 7]]],
+ ['edgelist',
+ 'G1141',
+ 7,
+ [[4, 2],
+ [5, 3],
+ [5, 6],
+ [5, 1],
+ [2, 5],
+ [1, 4],
+ [6, 1],
+ [6, 3],
+ [7, 2],
+ [4, 7],
+ [7, 1],
+ [6, 7],
+ [7, 3],
+ [5, 7]]],
+ ['edgelist',
+ 'G1142',
+ 7,
+ [[1, 5],
+ [4, 1],
+ [3, 4],
+ [5, 3],
+ [2, 5],
+ [4, 2],
+ [2, 1],
+ [3, 2],
+ [6, 5],
+ [2, 6],
+ [7, 2],
+ [4, 7],
+ [1, 6],
+ [7, 1]]],
+ ['edgelist',
+ 'G1143',
+ 7,
+ [[4, 5],
+ [5, 3],
+ [2, 6],
+ [5, 1],
+ [2, 5],
+ [6, 4],
+ [4, 1],
+ [6, 3],
+ [7, 5],
+ [1, 7],
+ [4, 7],
+ [3, 7],
+ [6, 7],
+ [2, 7]]],
+ ['edgelist',
+ 'G1144',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 5],
+ [2, 6],
+ [3, 4],
+ [3, 7],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1145',
+ 7,
+ [[3, 4],
+ [5, 3],
+ [7, 4],
+ [5, 1],
+ [5, 6],
+ [4, 5],
+ [4, 2],
+ [6, 3],
+ [2, 7],
+ [6, 7],
+ [7, 1],
+ [6, 4],
+ [7, 5],
+ [1, 2]]],
+ ['edgelist',
+ 'G1146',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1147',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1148',
+ 7,
+ [[3, 4],
+ [5, 3],
+ [7, 4],
+ [5, 1],
+ [2, 5],
+ [7, 1],
+ [4, 2],
+ [6, 3],
+ [5, 6],
+ [6, 7],
+ [2, 6],
+ [6, 4],
+ [7, 5],
+ [4, 1]]],
+ ['edgelist',
+ 'G1149',
+ 7,
+ [[4, 2],
+ [5, 3],
+ [1, 4],
+ [5, 1],
+ [2, 5],
+ [6, 4],
+ [6, 1],
+ [6, 3],
+ [7, 5],
+ [2, 7],
+ [7, 4],
+ [1, 7],
+ [7, 6],
+ [3, 7]]],
+ ['edgelist',
+ 'G1150',
+ 7,
+ [[1, 2],
+ [5, 3],
+ [4, 1],
+ [5, 1],
+ [5, 6],
+ [6, 4],
+ [2, 4],
+ [6, 3],
+ [7, 5],
+ [3, 7],
+ [7, 6],
+ [4, 7],
+ [7, 2],
+ [1, 7]]],
+ ['edgelist',
+ 'G1151',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 7],
+ [3, 6],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1152',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1153',
+ 7,
+ [[3, 4],
+ [5, 3],
+ [7, 4],
+ [5, 1],
+ [2, 5],
+ [7, 2],
+ [4, 2],
+ [6, 3],
+ [6, 1],
+ [6, 7],
+ [5, 6],
+ [6, 4],
+ [7, 1],
+ [4, 1]]],
+ ['edgelist',
+ 'G1154',
+ 7,
+ [[3, 4],
+ [5, 3],
+ [4, 1],
+ [5, 1],
+ [5, 6],
+ [4, 5],
+ [4, 2],
+ [6, 3],
+ [1, 2],
+ [6, 7],
+ [7, 1],
+ [6, 4],
+ [7, 5],
+ [2, 7]]],
+ ['edgelist',
+ 'G1155',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 7],
+ [5, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G1156',
+ 7,
+ [[1, 4],
+ [1, 5],
+ [1, 7],
+ [2, 3],
+ [2, 5],
+ [2, 6],
+ [3, 4],
+ [3, 6],
+ [3, 7],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1157',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 7],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G1158',
+ 7,
+ [[1, 2],
+ [1, 6],
+ [1, 7],
+ [2, 5],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G1159',
+ 7,
+ [[1, 2],
+ [1, 5],
+ [1, 7],
+ [2, 4],
+ [2, 6],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1160',
+ 7,
+ [[3, 4],
+ [5, 3],
+ [7, 4],
+ [5, 1],
+ [2, 5],
+ [5, 6],
+ [4, 2],
+ [6, 3],
+ [6, 1],
+ [7, 2],
+ [1, 7],
+ [6, 4],
+ [7, 5],
+ [4, 1]]],
+ ['edgelist',
+ 'G1161',
+ 7,
+ [[1, 2],
+ [1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1162',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 6],
+ [3, 7],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1163',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1164',
+ 7,
+ [[3, 4],
+ [5, 3],
+ [7, 4],
+ [5, 1],
+ [5, 6],
+ [4, 6],
+ [4, 2],
+ [6, 3],
+ [4, 1],
+ [2, 5],
+ [7, 1],
+ [2, 7],
+ [7, 5],
+ [1, 2]]],
+ ['edgelist',
+ 'G1165',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 6],
+ [4, 7],
+ [5, 7]]],
+ ['edgelist',
+ 'G1166',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1167',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1168',
+ 7,
+ [[1, 4],
+ [1, 5],
+ [1, 6],
+ [2, 3],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 7],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1169',
+ 7,
+ [[1, 4],
+ [1, 5],
+ [1, 6],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 7],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1170',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [6, 7],
+ [1, 7],
+ [1, 3],
+ [6, 1],
+ [4, 6],
+ [2, 4],
+ [7, 2],
+ [5, 7],
+ [3, 5]]],
+ ['edgelist',
+ 'G1171',
+ 7,
+ [[1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 7],
+ [5, 6]]],
+ ['edgelist',
+ 'G1172',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [4, 5],
+ [4, 6],
+ [5, 6]]],
+ ['edgelist',
+ 'G1173',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [3, 5],
+ [6, 3],
+ [2, 6],
+ [2, 5],
+ [2, 4],
+ [3, 1],
+ [5, 1],
+ [6, 4],
+ [2, 7]]],
+ ['edgelist',
+ 'G1174',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [3, 5],
+ [6, 3],
+ [2, 6],
+ [2, 5],
+ [2, 4],
+ [3, 1],
+ [5, 1],
+ [6, 4],
+ [1, 7]]],
+ ['edgelist',
+ 'G1175',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1176',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 6],
+ [2, 7],
+ [3, 5],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1177',
+ 7,
+ [[4, 5],
+ [5, 6],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [4, 7],
+ [5, 7],
+ [6, 7],
+ [2, 6],
+ [4, 6],
+ [3, 4],
+ [3, 5],
+ [2, 7],
+ [1, 2]]],
+ ['edgelist',
+ 'G1178',
+ 7,
+ [[4, 5],
+ [5, 6],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [4, 7],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [4, 6],
+ [3, 4],
+ [3, 5],
+ [7, 2],
+ [5, 7]]],
+ ['edgelist',
+ 'G1179',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 2],
+ [5, 3],
+ [5, 4],
+ [6, 2],
+ [1, 6],
+ [6, 3],
+ [4, 6],
+ [5, 6],
+ [7, 2],
+ [6, 7]]],
+ ['edgelist',
+ 'G1180',
+ 7,
+ [[5, 4],
+ [5, 6],
+ [6, 4],
+ [1, 2],
+ [1, 6],
+ [1, 4],
+ [3, 5],
+ [2, 6],
+ [2, 4],
+ [7, 6],
+ [4, 7],
+ [7, 1],
+ [2, 7],
+ [7, 3],
+ [5, 7]]],
+ ['edgelist',
+ 'G1181',
+ 7,
+ [[4, 5],
+ [5, 6],
+ [6, 7],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [4, 7],
+ [2, 4],
+ [5, 7],
+ [2, 6],
+ [4, 6],
+ [3, 4],
+ [3, 5],
+ [2, 7],
+ [1, 2]]],
+ ['edgelist',
+ 'G1182',
+ 7,
+ [[1, 3],
+ [1, 7],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1183',
+ 7,
+ [[7, 2],
+ [5, 6],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [4, 7],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [4, 6],
+ [3, 4],
+ [3, 5],
+ [6, 7],
+ [5, 7]]],
+ ['edgelist',
+ 'G1184',
+ 7,
+ [[4, 5],
+ [5, 6],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [5, 7],
+ [4, 7],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [4, 6],
+ [3, 4],
+ [3, 5],
+ [6, 7],
+ [6, 3]]],
+ ['edgelist',
+ 'G1185',
+ 7,
+ [[4, 5],
+ [5, 6],
+ [1, 4],
+ [1, 5],
+ [7, 1],
+ [5, 7],
+ [4, 7],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [4, 6],
+ [3, 4],
+ [3, 5],
+ [6, 7],
+ [6, 3]]],
+ ['edgelist',
+ 'G1186',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [1, 3],
+ [4, 1],
+ [2, 4],
+ [3, 4],
+ [6, 2],
+ [4, 6],
+ [5, 4],
+ [3, 5],
+ [7, 3],
+ [4, 7],
+ [7, 2],
+ [1, 6],
+ [5, 1]]],
+ ['edgelist',
+ 'G1187',
+ 7,
+ [[1, 2],
+ [3, 1],
+ [4, 3],
+ [5, 4],
+ [2, 5],
+ [4, 2],
+ [5, 3],
+ [1, 5],
+ [4, 1],
+ [7, 4],
+ [5, 7],
+ [6, 5],
+ [4, 6],
+ [7, 3],
+ [6, 2]]],
+ ['edgelist',
+ 'G1188',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 5],
+ [4, 5],
+ [6, 4],
+ [5, 6],
+ [7, 5],
+ [6, 7],
+ [7, 4]]],
+ ['edgelist',
+ 'G1189',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [1, 4],
+ [5, 1],
+ [5, 4],
+ [5, 3],
+ [7, 2],
+ [3, 7],
+ [6, 3],
+ [5, 6],
+ [7, 5],
+ [1, 7],
+ [7, 6],
+ [4, 7]]],
+ ['edgelist',
+ 'G1190',
+ 7,
+ [[1, 2],
+ [6, 4],
+ [2, 4],
+ [1, 5],
+ [4, 1],
+ [5, 4],
+ [3, 5],
+ [6, 3],
+ [5, 6],
+ [7, 5],
+ [3, 7],
+ [7, 6],
+ [4, 7],
+ [7, 2],
+ [1, 7]]],
+ ['edgelist',
+ 'G1191',
+ 7,
+ [[6, 3],
+ [5, 6],
+ [4, 2],
+ [1, 5],
+ [1, 6],
+ [1, 4],
+ [3, 5],
+ [2, 6],
+ [2, 5],
+ [7, 4],
+ [2, 7],
+ [7, 1],
+ [6, 7],
+ [7, 3],
+ [5, 7]]],
+ ['edgelist',
+ 'G1192',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6],
+ [1, 6],
+ [1, 4],
+ [3, 1],
+ [4, 2],
+ [7, 5],
+ [6, 7],
+ [7, 4],
+ [1, 7],
+ [7, 3],
+ [2, 7]]],
+ ['edgelist',
+ 'G1193',
+ 7,
+ [[6, 3],
+ [4, 1],
+ [6, 4],
+ [1, 5],
+ [1, 6],
+ [5, 4],
+ [3, 5],
+ [2, 6],
+ [2, 5],
+ [7, 5],
+ [3, 7],
+ [7, 1],
+ [6, 7],
+ [7, 4],
+ [2, 7]]],
+ ['edgelist',
+ 'G1194',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1195',
+ 7,
+ [[7, 2],
+ [5, 6],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [5, 7],
+ [4, 7],
+ [2, 4],
+ [2, 5],
+ [1, 7],
+ [4, 6],
+ [3, 4],
+ [3, 5],
+ [6, 7],
+ [6, 3]]],
+ ['edgelist',
+ 'G1196',
+ 7,
+ [[4, 5],
+ [1, 2],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [5, 7],
+ [4, 7],
+ [2, 4],
+ [2, 5],
+ [7, 1],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [6, 7],
+ [6, 3]]],
+ ['edgelist',
+ 'G1197',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 6],
+ [2, 7],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 7]]],
+ ['edgelist',
+ 'G1198',
+ 7,
+ [[6, 3],
+ [5, 6],
+ [6, 4],
+ [1, 5],
+ [1, 2],
+ [2, 4],
+ [3, 5],
+ [4, 1],
+ [2, 5],
+ [7, 5],
+ [2, 7],
+ [7, 1],
+ [4, 7],
+ [7, 3],
+ [6, 7]]],
+ ['edgelist',
+ 'G1199',
+ 7,
+ [[6, 1],
+ [5, 4],
+ [6, 4],
+ [6, 3],
+ [1, 2],
+ [2, 4],
+ [3, 5],
+ [4, 1],
+ [2, 5],
+ [7, 3],
+ [6, 7],
+ [7, 5],
+ [4, 7],
+ [7, 1],
+ [2, 7]]],
+ ['edgelist',
+ 'G1200',
+ 7,
+ [[4, 5],
+ [5, 6],
+ [1, 4],
+ [5, 7],
+ [1, 2],
+ [2, 7],
+ [4, 7],
+ [2, 4],
+ [2, 5],
+ [7, 1],
+ [1, 6],
+ [3, 4],
+ [3, 5],
+ [6, 7],
+ [6, 3]]],
+ ['edgelist',
+ 'G1201',
+ 7,
+ [[1, 3],
+ [1, 4],
+ [1, 7],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1202',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1203',
+ 7,
+ [[4, 5],
+ [6, 1],
+ [1, 4],
+ [1, 5],
+ [5, 7],
+ [2, 7],
+ [4, 7],
+ [2, 4],
+ [2, 5],
+ [7, 1],
+ [2, 6],
+ [3, 4],
+ [3, 5],
+ [6, 7],
+ [6, 3]]],
+ ['edgelist',
+ 'G1204',
+ 7,
+ [[7, 5],
+ [6, 3],
+ [1, 4],
+ [1, 5],
+ [3, 5],
+ [2, 7],
+ [4, 7],
+ [2, 4],
+ [2, 5],
+ [7, 1],
+ [4, 6],
+ [3, 4],
+ [1, 2],
+ [6, 7],
+ [5, 6]]],
+ ['edgelist',
+ 'G1205',
+ 7,
+ [[1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1206',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1207',
+ 7,
+ [[3, 4],
+ [1, 3],
+ [4, 1],
+ [5, 4],
+ [2, 5],
+ [6, 2],
+ [5, 6],
+ [2, 1],
+ [6, 3],
+ [7, 3],
+ [6, 7],
+ [7, 2],
+ [1, 7],
+ [7, 5],
+ [4, 7]]],
+ ['edgelist',
+ 'G1208',
+ 7,
+ [[4, 1],
+ [4, 6],
+ [4, 5],
+ [3, 1],
+ [3, 6],
+ [3, 5],
+ [2, 5],
+ [2, 6],
+ [2, 1],
+ [7, 1],
+ [2, 7],
+ [7, 6],
+ [4, 7],
+ [7, 5],
+ [3, 7]]],
+ ['edgelist',
+ 'G1209',
+ 7,
+ [[1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 7],
+ [5, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G1210',
+ 7,
+ [[4, 5],
+ [7, 3],
+ [1, 4],
+ [1, 5],
+ [6, 1],
+ [2, 7],
+ [4, 7],
+ [2, 4],
+ [2, 5],
+ [7, 1],
+ [5, 6],
+ [3, 4],
+ [3, 5],
+ [6, 2],
+ [6, 3]]],
+ ['edgelist',
+ 'G1211',
+ 7,
+ [[4, 5],
+ [7, 3],
+ [1, 4],
+ [1, 5],
+ [6, 1],
+ [6, 7],
+ [4, 7],
+ [2, 4],
+ [2, 5],
+ [1, 2],
+ [5, 7],
+ [3, 4],
+ [3, 5],
+ [6, 2],
+ [6, 3]]],
+ ['edgelist',
+ 'G1212',
+ 7,
+ [[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [1, 5],
+ [7, 3],
+ [2, 7],
+ [7, 1],
+ [5, 7],
+ [6, 5],
+ [1, 6],
+ [4, 6],
+ [7, 4],
+ [6, 3],
+ [2, 6]]],
+ ['edgelist',
+ 'G1213',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [4, 5],
+ [4, 6],
+ [5, 6],
+ [3, 7]]],
+ ['edgelist',
+ 'G1214',
+ 7,
+ [[4, 1],
+ [5, 2],
+ [5, 4],
+ [2, 4],
+ [5, 1],
+ [3, 6],
+ [7, 3],
+ [6, 7],
+ [2, 6],
+ [5, 6],
+ [4, 6],
+ [1, 6],
+ [1, 7],
+ [4, 7],
+ [5, 7],
+ [7, 2]]],
+ ['edgelist',
+ 'G1215',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1216',
+ 7,
+ [[1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1217',
+ 7,
+ [[4, 5],
+ [6, 2],
+ [1, 4],
+ [1, 5],
+ [6, 1],
+ [5, 7],
+ [4, 7],
+ [2, 4],
+ [2, 5],
+ [7, 1],
+ [4, 6],
+ [3, 4],
+ [3, 5],
+ [6, 7],
+ [5, 6],
+ [3, 6]]],
+ ['edgelist',
+ 'G1218',
+ 7,
+ [[3, 5],
+ [4, 2],
+ [4, 1],
+ [5, 4],
+ [5, 1],
+ [6, 3],
+ [5, 6],
+ [6, 1],
+ [4, 6],
+ [6, 2],
+ [7, 6],
+ [2, 7],
+ [4, 7],
+ [7, 1],
+ [5, 7],
+ [7, 3]]],
+ ['edgelist',
+ 'G1219',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1220',
+ 7,
+ [[3, 5],
+ [5, 2],
+ [4, 1],
+ [4, 2],
+ [5, 1],
+ [6, 3],
+ [5, 6],
+ [6, 1],
+ [4, 6],
+ [7, 6],
+ [2, 6],
+ [7, 2],
+ [4, 7],
+ [5, 7],
+ [7, 3],
+ [7, 1]]],
+ ['edgelist',
+ 'G1221',
+ 7,
+ [[1, 2],
+ [1, 4],
+ [1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 6],
+ [2, 7],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1222',
+ 7,
+ [[3, 6],
+ [1, 2],
+ [5, 6],
+ [2, 4],
+ [6, 1],
+ [5, 4],
+ [6, 4],
+ [3, 5],
+ [2, 5],
+ [4, 1],
+ [7, 4],
+ [3, 7],
+ [7, 5],
+ [6, 7],
+ [7, 2],
+ [1, 7]]],
+ ['edgelist',
+ 'G1223',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 4],
+ [2, 7],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1224',
+ 7,
+ [[3, 6],
+ [6, 2],
+ [4, 2],
+ [1, 5],
+ [6, 1],
+ [5, 4],
+ [6, 4],
+ [3, 5],
+ [2, 5],
+ [4, 1],
+ [7, 3],
+ [5, 7],
+ [6, 7],
+ [7, 2],
+ [1, 7],
+ [4, 7]]],
+ ['edgelist',
+ 'G1225',
+ 7,
+ [[2, 7],
+ [1, 2],
+ [1, 4],
+ [1, 5],
+ [6, 1],
+ [5, 7],
+ [4, 7],
+ [2, 4],
+ [2, 5],
+ [7, 1],
+ [4, 6],
+ [3, 4],
+ [3, 5],
+ [6, 7],
+ [5, 6],
+ [3, 6]]],
+ ['edgelist',
+ 'G1226',
+ 7,
+ [[4, 5],
+ [6, 2],
+ [1, 4],
+ [1, 5],
+ [6, 1],
+ [5, 7],
+ [4, 7],
+ [2, 4],
+ [2, 5],
+ [7, 1],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [6, 7],
+ [1, 2],
+ [3, 6]]],
+ ['edgelist',
+ 'G1227',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 7],
+ [5, 6]]],
+ ['edgelist',
+ 'G1228',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 7],
+ [5, 6],
+ [6, 7]]],
+ ['edgelist',
+ 'G1229',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1230',
+ 7,
+ [[3, 6],
+ [6, 2],
+ [4, 6],
+ [1, 5],
+ [1, 2],
+ [5, 4],
+ [4, 3],
+ [3, 5],
+ [2, 5],
+ [1, 6],
+ [7, 5],
+ [3, 7],
+ [7, 4],
+ [6, 7],
+ [7, 2],
+ [1, 7]]],
+ ['edgelist',
+ 'G1231',
+ 7,
+ [[6, 7],
+ [6, 2],
+ [1, 4],
+ [1, 5],
+ [1, 2],
+ [5, 7],
+ [4, 7],
+ [2, 4],
+ [2, 5],
+ [7, 1],
+ [4, 6],
+ [3, 4],
+ [3, 5],
+ [7, 3],
+ [5, 6],
+ [3, 6]]],
+ ['edgelist',
+ 'G1232',
+ 7,
+ [[4, 5],
+ [6, 2],
+ [1, 4],
+ [1, 5],
+ [1, 2],
+ [5, 7],
+ [4, 7],
+ [2, 4],
+ [2, 5],
+ [7, 1],
+ [6, 1],
+ [3, 4],
+ [3, 5],
+ [7, 3],
+ [7, 6],
+ [3, 6]]],
+ ['edgelist',
+ 'G1233',
+ 7,
+ [[6, 1],
+ [6, 2],
+ [1, 4],
+ [1, 5],
+ [7, 2],
+ [5, 7],
+ [4, 7],
+ [2, 4],
+ [2, 5],
+ [7, 1],
+ [4, 6],
+ [3, 4],
+ [3, 5],
+ [7, 3],
+ [5, 6],
+ [3, 6]]],
+ ['edgelist',
+ 'G1234',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [4, 5],
+ [4, 6],
+ [5, 6],
+ [7, 3],
+ [2, 7]]],
+ ['edgelist',
+ 'G1235',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1236',
+ 7,
+ [[5, 1],
+ [5, 4],
+ [1, 2],
+ [4, 1],
+ [3, 5],
+ [4, 2],
+ [6, 4],
+ [5, 6],
+ [6, 3],
+ [7, 6],
+ [6, 1],
+ [2, 6],
+ [7, 2],
+ [1, 7],
+ [7, 5],
+ [3, 7],
+ [4, 7]]],
+ ['edgelist',
+ 'G1237',
+ 7,
+ [[1, 2],
+ [6, 2],
+ [6, 4],
+ [1, 5],
+ [6, 1],
+ [5, 4],
+ [4, 2],
+ [3, 6],
+ [2, 5],
+ [4, 1],
+ [3, 5],
+ [7, 3],
+ [6, 7],
+ [7, 4],
+ [2, 7],
+ [7, 1],
+ [5, 7]]],
+ ['edgelist',
+ 'G1238',
+ 7,
+ [[4, 5],
+ [6, 2],
+ [1, 4],
+ [1, 5],
+ [5, 6],
+ [5, 7],
+ [4, 7],
+ [2, 4],
+ [2, 5],
+ [1, 2],
+ [4, 6],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [6, 1],
+ [6, 7],
+ [7, 3]]],
+ ['edgelist',
+ 'G1239',
+ 7,
+ [[4, 3],
+ [5, 2],
+ [1, 2],
+ [4, 1],
+ [3, 5],
+ [5, 4],
+ [6, 2],
+ [5, 6],
+ [6, 3],
+ [1, 6],
+ [6, 4],
+ [7, 6],
+ [3, 7],
+ [7, 4],
+ [1, 7],
+ [2, 7],
+ [5, 7]]],
+ ['edgelist',
+ 'G1240',
+ 7,
+ [[4, 3],
+ [5, 2],
+ [5, 1],
+ [4, 1],
+ [3, 5],
+ [4, 2],
+ [6, 3],
+ [5, 6],
+ [6, 1],
+ [4, 6],
+ [6, 2],
+ [7, 6],
+ [3, 7],
+ [7, 1],
+ [4, 7],
+ [7, 5],
+ [2, 7]]],
+ ['edgelist',
+ 'G1241',
+ 7,
+ [[4, 3],
+ [6, 2],
+ [6, 1],
+ [1, 5],
+ [1, 2],
+ [5, 4],
+ [6, 4],
+ [3, 6],
+ [2, 5],
+ [4, 1],
+ [3, 5],
+ [7, 5],
+ [6, 7],
+ [7, 3],
+ [4, 7],
+ [7, 1],
+ [2, 7]]],
+ ['edgelist',
+ 'G1242',
+ 7,
+ [[4, 3],
+ [6, 2],
+ [6, 1],
+ [1, 5],
+ [5, 6],
+ [1, 2],
+ [4, 2],
+ [3, 6],
+ [2, 5],
+ [4, 1],
+ [3, 5],
+ [7, 1],
+ [4, 7],
+ [7, 2],
+ [6, 7],
+ [7, 3],
+ [5, 7]]],
+ ['edgelist',
+ 'G1243',
+ 7,
+ [[1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7]]],
+ ['edgelist',
+ 'G1244',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [4, 5],
+ [4, 6],
+ [5, 6],
+ [7, 2],
+ [1, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1245',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7]]],
+ ['edgelist',
+ 'G1246',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 5],
+ [2, 6],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1247',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1248',
+ 7,
+ [[5, 1],
+ [5, 6],
+ [4, 1],
+ [4, 6],
+ [3, 1],
+ [3, 6],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 1],
+ [3, 4],
+ [3, 5],
+ [7, 1],
+ [6, 7],
+ [7, 2],
+ [3, 7],
+ [7, 5],
+ [4, 7]]],
+ ['edgelist',
+ 'G1249',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1250',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1251',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]],
+ ['edgelist',
+ 'G1252',
+ 7,
+ [[1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 5],
+ [4, 6],
+ [4, 7],
+ [5, 6],
+ [5, 7],
+ [6, 7]]]]
+
+ GAG=[]
+
+ for i in range(1253):
+ g=make_small_graph(descr_list[i])
+ GAG.append(g)
+
+ return GAG
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/bipartite.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/bipartite.py
new file mode 100644
index 0000000..435ed0d
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/bipartite.py
@@ -0,0 +1,529 @@
+# -*- coding: utf-8 -*-
+"""
+Generators and functions for bipartite graphs.
+
+"""
+# Copyright (C) 2006-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import math
+import random
+import networkx
+from functools import reduce
+import networkx as nx
+__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Dan Schult(dschult@colgate.edu)'])
+__all__=['bipartite_configuration_model',
+ 'bipartite_havel_hakimi_graph',
+ 'bipartite_reverse_havel_hakimi_graph',
+ 'bipartite_alternating_havel_hakimi_graph',
+ 'bipartite_preferential_attachment_graph',
+ 'bipartite_random_graph',
+ 'bipartite_gnmk_random_graph',
+ ]
+
+
+def bipartite_configuration_model(aseq, bseq, create_using=None, seed=None):
+ """Return a random bipartite graph from two given degree sequences.
+
+ Parameters
+ ----------
+ aseq : list or iterator
+ Degree sequence for node set A.
+ bseq : list or iterator
+ Degree sequence for node set B.
+ create_using : NetworkX graph instance, optional
+ Return graph of this type.
+ seed : integer, optional
+ Seed for random number generator.
+
+ Nodes from the set A are connected to nodes in the set B by
+ choosing randomly from the possible free stubs, one in A and
+ one in B.
+
+ Notes
+ -----
+ The sum of the two sequences must be equal: sum(aseq)=sum(bseq)
+ If no graph type is specified use MultiGraph with parallel edges.
+ If you want a graph with no parallel edges use create_using=Graph()
+ but then the resulting degree sequences might not be exact.
+
+ The nodes are assigned the attribute 'bipartite' with the value 0 or 1
+ to indicate which bipartite set the node belongs to.
+ """
+ if create_using is None:
+ create_using=networkx.MultiGraph()
+ elif create_using.is_directed():
+ raise networkx.NetworkXError(\
+ "Directed Graph not supported")
+
+
+ G=networkx.empty_graph(0,create_using)
+
+ if not seed is None:
+ random.seed(seed)
+
+ # length and sum of each sequence
+ lena=len(aseq)
+ lenb=len(bseq)
+ suma=sum(aseq)
+ sumb=sum(bseq)
+
+ if not suma==sumb:
+ raise networkx.NetworkXError(\
+ 'invalid degree sequences, sum(aseq)!=sum(bseq),%s,%s'\
+ %(suma,sumb))
+
+ G=_add_nodes_with_bipartite_label(G,lena,lenb)
+
+ if max(aseq)==0: return G # done if no edges
+
+ # build lists of degree-repeated vertex numbers
+ stubs=[]
+ stubs.extend([[v]*aseq[v] for v in range(0,lena)])
+ astubs=[]
+ astubs=[x for subseq in stubs for x in subseq]
+
+ stubs=[]
+ stubs.extend([[v]*bseq[v-lena] for v in range(lena,lena+lenb)])
+ bstubs=[]
+ bstubs=[x for subseq in stubs for x in subseq]
+
+ # shuffle lists
+ random.shuffle(astubs)
+ random.shuffle(bstubs)
+
+ G.add_edges_from([[astubs[i],bstubs[i]] for i in range(suma)])
+
+ G.name="bipartite_configuration_model"
+ return G
+
+
+def bipartite_havel_hakimi_graph(aseq, bseq, create_using=None):
+ """Return a bipartite graph from two given degree sequences using a
+ Havel-Hakimi style construction.
+
+ Nodes from the set A are connected to nodes in the set B by
+ connecting the highest degree nodes in set A to the highest degree
+ nodes in set B until all stubs are connected.
+
+ Parameters
+ ----------
+ aseq : list or iterator
+ Degree sequence for node set A.
+ bseq : list or iterator
+ Degree sequence for node set B.
+ create_using : NetworkX graph instance, optional
+ Return graph of this type.
+
+ Notes
+ -----
+ The sum of the two sequences must be equal: sum(aseq)=sum(bseq)
+ If no graph type is specified use MultiGraph with parallel edges.
+ If you want a graph with no parallel edges use create_using=Graph()
+ but then the resulting degree sequences might not be exact.
+
+ The nodes are assigned the attribute 'bipartite' with the value 0 or 1
+ to indicate which bipartite set the node belongs to.
+ """
+ if create_using is None:
+ create_using=networkx.MultiGraph()
+ elif create_using.is_directed():
+ raise networkx.NetworkXError(\
+ "Directed Graph not supported")
+
+ G=networkx.empty_graph(0,create_using)
+
+ # length of the each sequence
+ naseq=len(aseq)
+ nbseq=len(bseq)
+
+ suma=sum(aseq)
+ sumb=sum(bseq)
+
+ if not suma==sumb:
+ raise networkx.NetworkXError(\
+ 'invalid degree sequences, sum(aseq)!=sum(bseq),%s,%s'\
+ %(suma,sumb))
+
+ G=_add_nodes_with_bipartite_label(G,naseq,nbseq)
+
+ if max(aseq)==0: return G # done if no edges
+
+ # build list of degree-repeated vertex numbers
+ astubs=[[aseq[v],v] for v in range(0,naseq)]
+ bstubs=[[bseq[v-naseq],v] for v in range(naseq,naseq+nbseq)]
+ astubs.sort()
+ while astubs:
+ (degree,u)=astubs.pop() # take of largest degree node in the a set
+ if degree==0: break # done, all are zero
+ # connect the source to largest degree nodes in the b set
+ bstubs.sort()
+ for target in bstubs[-degree:]:
+ v=target[1]
+ G.add_edge(u,v)
+ target[0] -= 1 # note this updates bstubs too.
+ if target[0]==0:
+ bstubs.remove(target)
+
+ G.name="bipartite_havel_hakimi_graph"
+ return G
+
+def bipartite_reverse_havel_hakimi_graph(aseq, bseq, create_using=None):
+ """Return a bipartite graph from two given degree sequences using a
+ Havel-Hakimi style construction.
+
+ Nodes from set A are connected to nodes in the set B by connecting
+ the highest degree nodes in set A to the lowest degree nodes in
+ set B until all stubs are connected.
+
+ Parameters
+ ----------
+ aseq : list or iterator
+ Degree sequence for node set A.
+ bseq : list or iterator
+ Degree sequence for node set B.
+ create_using : NetworkX graph instance, optional
+ Return graph of this type.
+
+
+ Notes
+ -----
+ The sum of the two sequences must be equal: sum(aseq)=sum(bseq)
+ If no graph type is specified use MultiGraph with parallel edges.
+ If you want a graph with no parallel edges use create_using=Graph()
+ but then the resulting degree sequences might not be exact.
+
+ The nodes are assigned the attribute 'bipartite' with the value 0 or 1
+ to indicate which bipartite set the node belongs to.
+ """
+ if create_using is None:
+ create_using=networkx.MultiGraph()
+ elif create_using.is_directed():
+ raise networkx.NetworkXError(\
+ "Directed Graph not supported")
+
+ G=networkx.empty_graph(0,create_using)
+
+
+ # length of the each sequence
+ lena=len(aseq)
+ lenb=len(bseq)
+ suma=sum(aseq)
+ sumb=sum(bseq)
+
+ if not suma==sumb:
+ raise networkx.NetworkXError(\
+ 'invalid degree sequences, sum(aseq)!=sum(bseq),%s,%s'\
+ %(suma,sumb))
+
+ G=_add_nodes_with_bipartite_label(G,lena,lenb)
+
+ if max(aseq)==0: return G # done if no edges
+
+ # build list of degree-repeated vertex numbers
+ astubs=[[aseq[v],v] for v in range(0,lena)]
+ bstubs=[[bseq[v-lena],v] for v in range(lena,lena+lenb)]
+ astubs.sort()
+ bstubs.sort()
+ while astubs:
+ (degree,u)=astubs.pop() # take of largest degree node in the a set
+ if degree==0: break # done, all are zero
+ # connect the source to the smallest degree nodes in the b set
+ for target in bstubs[0:degree]:
+ v=target[1]
+ G.add_edge(u,v)
+ target[0] -= 1 # note this updates bstubs too.
+ if target[0]==0:
+ bstubs.remove(target)
+
+ G.name="bipartite_reverse_havel_hakimi_graph"
+ return G
+
+
+def bipartite_alternating_havel_hakimi_graph(aseq, bseq,create_using=None):
+ """Return a bipartite graph from two given degree sequences using
+ an alternating Havel-Hakimi style construction.
+
+ Nodes from the set A are connected to nodes in the set B by
+ connecting the highest degree nodes in set A to alternatively the
+ highest and the lowest degree nodes in set B until all stubs are
+ connected.
+
+ Parameters
+ ----------
+ aseq : list or iterator
+ Degree sequence for node set A.
+ bseq : list or iterator
+ Degree sequence for node set B.
+ create_using : NetworkX graph instance, optional
+ Return graph of this type.
+
+
+ Notes
+ -----
+ The sum of the two sequences must be equal: sum(aseq)=sum(bseq)
+ If no graph type is specified use MultiGraph with parallel edges.
+ If you want a graph with no parallel edges use create_using=Graph()
+ but then the resulting degree sequences might not be exact.
+
+ The nodes are assigned the attribute 'bipartite' with the value 0 or 1
+ to indicate which bipartite set the node belongs to.
+ """
+ if create_using is None:
+ create_using=networkx.MultiGraph()
+ elif create_using.is_directed():
+ raise networkx.NetworkXError(\
+ "Directed Graph not supported")
+
+ G=networkx.empty_graph(0,create_using)
+
+ # length of the each sequence
+ naseq=len(aseq)
+ nbseq=len(bseq)
+ suma=sum(aseq)
+ sumb=sum(bseq)
+
+ if not suma==sumb:
+ raise networkx.NetworkXError(\
+ 'invalid degree sequences, sum(aseq)!=sum(bseq),%s,%s'\
+ %(suma,sumb))
+
+ G=_add_nodes_with_bipartite_label(G,naseq,nbseq)
+
+ if max(aseq)==0: return G # done if no edges
+ # build list of degree-repeated vertex numbers
+ astubs=[[aseq[v],v] for v in range(0,naseq)]
+ bstubs=[[bseq[v-naseq],v] for v in range(naseq,naseq+nbseq)]
+ while astubs:
+ astubs.sort()
+ (degree,u)=astubs.pop() # take of largest degree node in the a set
+ if degree==0: break # done, all are zero
+ bstubs.sort()
+ small=bstubs[0:degree // 2] # add these low degree targets
+ large=bstubs[(-degree+degree // 2):] # and these high degree targets
+ stubs=[x for z in zip(large,small) for x in z] # combine, sorry
+ if len(stubs)<len(small)+len(large): # check for zip truncation
+ stubs.append(large.pop())
+ for target in stubs:
+ v=target[1]
+ G.add_edge(u,v)
+ target[0] -= 1 # note this updates bstubs too.
+ if target[0]==0:
+ bstubs.remove(target)
+
+ G.name="bipartite_alternating_havel_hakimi_graph"
+ return G
+
+def bipartite_preferential_attachment_graph(aseq,p,create_using=None,seed=None):
+ """Create a bipartite graph with a preferential attachment model from
+ a given single degree sequence.
+
+ Parameters
+ ----------
+ aseq : list or iterator
+ Degree sequence for node set A.
+ p : float
+ Probability that a new bottom node is added.
+ create_using : NetworkX graph instance, optional
+ Return graph of this type.
+ seed : integer, optional
+ Seed for random number generator.
+
+ References
+ ----------
+ .. [1] Jean-Loup Guillaume and Matthieu Latapy,
+ Bipartite structure of all complex networks,
+ Inf. Process. Lett. 90, 2004, pg. 215-221
+ http://dx.doi.org/10.1016/j.ipl.2004.03.007
+ """
+ if create_using is None:
+ create_using=networkx.MultiGraph()
+ elif create_using.is_directed():
+ raise networkx.NetworkXError(\
+ "Directed Graph not supported")
+
+ if p > 1:
+ raise networkx.NetworkXError("probability %s > 1"%(p))
+
+ G=networkx.empty_graph(0,create_using)
+
+ if not seed is None:
+ random.seed(seed)
+
+ naseq=len(aseq)
+ G=_add_nodes_with_bipartite_label(G,naseq,0)
+ vv=[ [v]*aseq[v] for v in range(0,naseq)]
+ while vv:
+ while vv[0]:
+ source=vv[0][0]
+ vv[0].remove(source)
+ if random.random() < p or G.number_of_nodes() == naseq:
+ target=G.number_of_nodes()
+ G.add_node(target,bipartite=1)
+ G.add_edge(source,target)
+ else:
+ bb=[ [b]*G.degree(b) for b in range(naseq,G.number_of_nodes())]
+ # flatten the list of lists into a list.
+ bbstubs=reduce(lambda x,y: x+y, bb)
+ # choose preferentially a bottom node.
+ target=random.choice(bbstubs)
+ G.add_node(target,bipartite=1)
+ G.add_edge(source,target)
+ vv.remove(vv[0])
+ G.name="bipartite_preferential_attachment_model"
+ return G
+
+
+
+def bipartite_random_graph(n, m, p, seed=None, directed=False):
+ """Return a bipartite random graph.
+
+ This is a bipartite version of the binomial (Erdős-Rényi) graph.
+
+ Parameters
+ ----------
+ n : int
+ The number of nodes in the first bipartite set.
+ m : int
+ The number of nodes in the second bipartite set.
+ p : float
+ Probability for edge creation.
+ seed : int, optional
+ Seed for random number generator (default=None).
+ directed : bool, optional (default=False)
+ If True return a directed graph
+
+ Notes
+ -----
+ The bipartite random graph algorithm chooses each of the n*m (undirected)
+ or 2*nm (directed) possible edges with probability p.
+
+ This algorithm is O(n+m) where m is the expected number of edges.
+
+ The nodes are assigned the attribute 'bipartite' with the value 0 or 1
+ to indicate which bipartite set the node belongs to.
+
+ See Also
+ --------
+ gnp_random_graph, bipartite_configuration_model
+
+ References
+ ----------
+ .. [1] Vladimir Batagelj and Ulrik Brandes,
+ "Efficient generation of large random networks",
+ Phys. Rev. E, 71, 036113, 2005.
+ """
+ G=nx.Graph()
+ G=_add_nodes_with_bipartite_label(G,n,m)
+ if directed:
+ G=nx.DiGraph(G)
+ G.name="fast_gnp_random_graph(%s,%s,%s)"%(n,m,p)
+
+ if not seed is None:
+ random.seed(seed)
+
+ if p <= 0:
+ return G
+ if p >= 1:
+ return nx.complete_bipartite_graph(n,m)
+
+ lp = math.log(1.0 - p)
+
+ v = 0
+ w = -1
+ while v < n:
+ lr = math.log(1.0 - random.random())
+ w = w + 1 + int(lr/lp)
+ while w >= m and v < n:
+ w = w - m
+ v = v + 1
+ if v < n:
+ G.add_edge(v, n+w)
+
+ if directed:
+ # use the same algorithm to
+ # add edges from the "m" to "n" set
+ v = 0
+ w = -1
+ while v < n:
+ lr = math.log(1.0 - random.random())
+ w = w + 1 + int(lr/lp)
+ while w>= m and v < n:
+ w = w - m
+ v = v + 1
+ if v < n:
+ G.add_edge(n+w, v)
+
+ return G
+
+def bipartite_gnmk_random_graph(n, m, k, seed=None, directed=False):
+ """Return a random bipartite graph G_{n,m,k}.
+
+ Produces a bipartite graph chosen randomly out of the set of all graphs
+ with n top nodes, m bottom nodes, and k edges.
+
+ Parameters
+ ----------
+ n : int
+ The number of nodes in the first bipartite set.
+ m : int
+ The number of nodes in the second bipartite set.
+ k : int
+ The number of edges
+ seed : int, optional
+ Seed for random number generator (default=None).
+ directed : bool, optional (default=False)
+ If True return a directed graph
+
+ Examples
+ --------
+ G = nx.bipartite_gnmk_random_graph(10,20,50)
+
+ See Also
+ --------
+ gnm_random_graph
+
+ Notes
+ -----
+ If k > m * n then a complete bipartite graph is returned.
+
+ This graph is a bipartite version of the `G_{nm}` random graph model.
+ """
+ G = networkx.Graph()
+ G=_add_nodes_with_bipartite_label(G,n,m)
+ if directed:
+ G=nx.DiGraph(G)
+ G.name="bipartite_gnm_random_graph(%s,%s,%s)"%(n,m,k)
+ if seed is not None:
+ random.seed(seed)
+ if n == 1 or m == 1:
+ return G
+ max_edges = n*m # max_edges for bipartite networks
+ if k >= max_edges: # Maybe we should raise an exception here
+ return networkx.complete_bipartite_graph(n, m, create_using=G)
+
+ top = [n for n,d in G.nodes(data=True) if d['bipartite']==0]
+ bottom = list(set(G) - set(top))
+ edge_count = 0
+ while edge_count < k:
+ # generate random edge,u,v
+ u = random.choice(top)
+ v = random.choice(bottom)
+ if v in G[u]:
+ continue
+ else:
+ G.add_edge(u,v)
+ edge_count += 1
+ return G
+
+def _add_nodes_with_bipartite_label(G, lena, lenb):
+ G.add_nodes_from(range(0,lena+lenb))
+ b=dict(zip(range(0,lena),[0]*lena))
+ b.update(dict(zip(range(lena,lena+lenb),[1]*lenb)))
+ nx.set_node_attributes(G,'bipartite',b)
+ return G
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/classic.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/classic.py
new file mode 100644
index 0000000..f8ca43b
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/classic.py
@@ -0,0 +1,508 @@
+"""
+Generators for some classic graphs.
+
+The typical graph generator is called as follows:
+
+>>> G=nx.complete_graph(100)
+
+returning the complete graph on n nodes labeled 0,..,99
+as a simple graph. Except for empty_graph, all the generators
+in this module return a Graph class (i.e. a simple, undirected graph).
+
+"""
+# Copyright (C) 2004-2010 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import itertools
+__author__ ="""Aric Hagberg (hagberg@lanl.gov)\nPieter Swart (swart@lanl.gov)"""
+
+__all__ = [ 'balanced_tree',
+ 'barbell_graph',
+ 'complete_graph',
+ 'complete_bipartite_graph',
+ 'circular_ladder_graph',
+ 'cycle_graph',
+ 'dorogovtsev_goltsev_mendes_graph',
+ 'empty_graph',
+ 'full_rary_tree',
+ 'grid_graph',
+ 'grid_2d_graph',
+ 'hypercube_graph',
+ 'ladder_graph',
+ 'lollipop_graph',
+ 'null_graph',
+ 'path_graph',
+ 'star_graph',
+ 'trivial_graph',
+ 'wheel_graph']
+
+
+#-------------------------------------------------------------------
+# Some Classic Graphs
+#-------------------------------------------------------------------
+import networkx as nx
+from networkx.utils import is_list_of_ints, flatten
+
+def _tree_edges(n,r):
+ # helper function for trees
+ # yields edges in rooted tree at 0 with n nodes and branching ratio r
+ nodes=iter(range(n))
+ parents=[next(nodes)] # stack of max length r
+ while parents:
+ source=parents.pop(0)
+ for i in range(r):
+ try:
+ target=next(nodes)
+ parents.append(target)
+ yield source,target
+ except StopIteration:
+ break
+
+def full_rary_tree(r, n, create_using=None):
+ """Creates a full r-ary tree of n vertices.
+
+ Sometimes called a k-ary, n-ary, or m-ary tree. "... all non-leaf
+ vertices have exactly r children and all levels are full except
+ for some rightmost position of the bottom level (if a leaf at the
+ bottom level is missing, then so are all of the leaves to its
+ right." [1]_
+
+ Parameters
+ ----------
+ r : int
+ branching factor of the tree
+ n : int
+ Number of nodes in the tree
+ create_using : NetworkX graph type, optional
+ Use specified type to construct graph (default = networkx.Graph)
+
+ Returns
+ -------
+ G : networkx Graph
+ An r-ary tree with n nodes
+
+ References
+ ----------
+ .. [1] An introduction to data structures and algorithms,
+ James Andrew Storer, Birkhauser Boston 2001, (page 225).
+ """
+ G=nx.empty_graph(n,create_using)
+ G.add_edges_from(_tree_edges(n,r))
+ return G
+
+def balanced_tree(r, h, create_using=None):
+ """Return the perfectly balanced r-tree of height h.
+
+ Parameters
+ ----------
+ r : int
+ Branching factor of the tree
+ h : int
+ Height of the tree
+ create_using : NetworkX graph type, optional
+ Use specified type to construct graph (default = networkx.Graph)
+
+ Returns
+ -------
+ G : networkx Graph
+ A tree with n nodes
+
+ Notes
+ -----
+ This is the rooted tree where all leaves are at distance h from
+ the root. The root has degree r and all other internal nodes have
+ degree r+1.
+
+ Node labels are the integers 0 (the root) up to number_of_nodes - 1.
+
+ Also refered to as a complete r-ary tree.
+ """
+ # number of nodes is n=1+r+..+r^h
+ if r==1:
+ n=2
+ else:
+ n = int((1-r**(h+1))/(1-r)) # sum of geometric series r!=1
+ G=nx.empty_graph(n,create_using)
+ G.add_edges_from(_tree_edges(n,r))
+ return G
+
+ return nx.full_rary_tree(r,n,create_using)
+
+def barbell_graph(m1,m2,create_using=None):
+ """Return the Barbell Graph: two complete graphs connected by a path.
+
+ For m1 > 1 and m2 >= 0.
+
+ Two identical complete graphs K_{m1} form the left and right bells,
+ and are connected by a path P_{m2}.
+
+ The 2*m1+m2 nodes are numbered
+ 0,...,m1-1 for the left barbell,
+ m1,...,m1+m2-1 for the path,
+ and m1+m2,...,2*m1+m2-1 for the right barbell.
+
+ The 3 subgraphs are joined via the edges (m1-1,m1) and (m1+m2-1,m1+m2).
+ If m2=0, this is merely two complete graphs joined together.
+
+ This graph is an extremal example in David Aldous
+ and Jim Fill's etext on Random Walks on Graphs.
+
+ """
+ if create_using is not None and create_using.is_directed():
+ raise nx.NetworkXError("Directed Graph not supported")
+ if m1<2:
+ raise nx.NetworkXError(\
+ "Invalid graph description, m1 should be >=2")
+ if m2<0:
+ raise nx.NetworkXError(\
+ "Invalid graph description, m2 should be >=0")
+
+ # left barbell
+ G=complete_graph(m1,create_using)
+ G.name="barbell_graph(%d,%d)"%(m1,m2)
+
+ # connecting path
+ G.add_nodes_from([v for v in range(m1,m1+m2-1)])
+ if m2>1:
+ G.add_edges_from([(v,v+1) for v in range(m1,m1+m2-1)])
+ # right barbell
+ G.add_edges_from( (u,v) for u in range(m1+m2,2*m1+m2) for v in range(u+1,2*m1+m2))
+ # connect it up
+ G.add_edge(m1-1,m1)
+ if m2>0:
+ G.add_edge(m1+m2-1,m1+m2)
+ return G
+
+def complete_graph(n,create_using=None):
+ """ Return the complete graph K_n with n nodes.
+
+ Node labels are the integers 0 to n-1.
+ """
+ G=empty_graph(n,create_using)
+ G.name="complete_graph(%d)"%(n)
+ if n>1:
+ if G.is_directed():
+ edges=itertools.permutations(range(n),2)
+ else:
+ edges=itertools.combinations(range(n),2)
+ G.add_edges_from(edges)
+ return G
+
+
+def complete_bipartite_graph(n1,n2,create_using=None):
+ """Return the complete bipartite graph K_{n1_n2}.
+
+ Composed of two partitions with n1 nodes in the first
+ and n2 nodes in the second. Each node in the first is
+ connected to each node in the second.
+
+ Node labels are the integers 0 to n1+n2-1
+
+ """
+ if create_using is not None and create_using.is_directed():
+ raise nx.NetworkXError("Directed Graph not supported")
+ G=empty_graph(n1+n2,create_using)
+ G.name="complete_bipartite_graph(%d,%d)"%(n1,n2)
+ for v1 in range(n1):
+ for v2 in range(n2):
+ G.add_edge(v1,n1+v2)
+ return G
+
+def circular_ladder_graph(n,create_using=None):
+ """Return the circular ladder graph CL_n of length n.
+
+ CL_n consists of two concentric n-cycles in which
+ each of the n pairs of concentric nodes are joined by an edge.
+
+ Node labels are the integers 0 to n-1
+
+ """
+ G=ladder_graph(n,create_using)
+ G.name="circular_ladder_graph(%d)"%n
+ G.add_edge(0,n-1)
+ G.add_edge(n,2*n-1)
+ return G
+
+def cycle_graph(n,create_using=None):
+ """Return the cycle graph C_n over n nodes.
+
+ C_n is the n-path with two end-nodes connected.
+
+ Node labels are the integers 0 to n-1
+ If create_using is a DiGraph, the direction is in increasing order.
+
+ """
+ G=path_graph(n,create_using)
+ G.name="cycle_graph(%d)"%n
+ if n>1: G.add_edge(n-1,0)
+ return G
+
+def dorogovtsev_goltsev_mendes_graph(n,create_using=None):
+ """Return the hierarchically constructed Dorogovtsev-Goltsev-Mendes graph.
+
+ n is the generation.
+ See: arXiv:/cond-mat/0112143 by Dorogovtsev, Goltsev and Mendes.
+
+ """
+ if create_using is not None:
+ if create_using.is_directed():
+ raise nx.NetworkXError("Directed Graph not supported")
+ if create_using.is_multigraph():
+ raise nx.NetworkXError("Multigraph not supported")
+ G=empty_graph(0,create_using)
+ G.name="Dorogovtsev-Goltsev-Mendes Graph"
+ G.add_edge(0,1)
+ if n==0:
+ return G
+ new_node = 2 # next node to be added
+ for i in range(1,n+1): #iterate over number of generations.
+ last_generation_edges = G.edges()
+ number_of_edges_in_last_generation = len(last_generation_edges)
+ for j in range(0,number_of_edges_in_last_generation):
+ G.add_edge(new_node,last_generation_edges[j][0])
+ G.add_edge(new_node,last_generation_edges[j][1])
+ new_node += 1
+ return G
+
+def empty_graph(n=0,create_using=None):
+ """Return the empty graph with n nodes and zero edges.
+
+ Node labels are the integers 0 to n-1
+
+ For example:
+ >>> G=nx.empty_graph(10)
+ >>> G.number_of_nodes()
+ 10
+ >>> G.number_of_edges()
+ 0
+
+ The variable create_using should point to a "graph"-like object that
+ will be cleaned (nodes and edges will be removed) and refitted as
+ an empty "graph" with n nodes with integer labels. This capability
+ is useful for specifying the class-nature of the resulting empty
+ "graph" (i.e. Graph, DiGraph, MyWeirdGraphClass, etc.).
+
+ The variable create_using has two main uses:
+ Firstly, the variable create_using can be used to create an
+ empty digraph, network,etc. For example,
+
+ >>> n=10
+ >>> G=nx.empty_graph(n,create_using=nx.DiGraph())
+
+ will create an empty digraph on n nodes.
+
+ Secondly, one can pass an existing graph (digraph, pseudograph,
+ etc.) via create_using. For example, if G is an existing graph
+ (resp. digraph, pseudograph, etc.), then empty_graph(n,create_using=G)
+ will empty G (i.e. delete all nodes and edges using G.clear() in
+ base) and then add n nodes and zero edges, and return the modified
+ graph (resp. digraph, pseudograph, etc.).
+
+ See also create_empty_copy(G).
+
+ """
+ if create_using is None:
+ # default empty graph is a simple graph
+ G=nx.Graph()
+ else:
+ G=create_using
+ G.clear()
+
+ G.add_nodes_from(range(n))
+ G.name="empty_graph(%d)"%n
+ return G
+
+def grid_2d_graph(m,n,periodic=False,create_using=None):
+ """ Return the 2d grid graph of mxn nodes,
+ each connected to its nearest neighbors.
+ Optional argument periodic=True will connect
+ boundary nodes via periodic boundary conditions.
+ """
+ G=empty_graph(0,create_using)
+ G.name="grid_2d_graph"
+ rows=range(m)
+ columns=range(n)
+ G.add_nodes_from( (i,j) for i in rows for j in columns )
+ G.add_edges_from( ((i,j),(i-1,j)) for i in rows for j in columns if i>0 )
+ G.add_edges_from( ((i,j),(i,j-1)) for i in rows for j in columns if j>0 )
+ if G.is_directed():
+ G.add_edges_from( ((i,j),(i+1,j)) for i in rows for j in columns if i<m-1 )
+ G.add_edges_from( ((i,j),(i,j+1)) for i in rows for j in columns if j<n-1 )
+ if periodic:
+ if n>2:
+ G.add_edges_from( ((i,0),(i,n-1)) for i in rows )
+ if G.is_directed():
+ G.add_edges_from( ((i,n-1),(i,0)) for i in rows )
+ if m>2:
+ G.add_edges_from( ((0,j),(m-1,j)) for j in columns )
+ if G.is_directed():
+ G.add_edges_from( ((m-1,j),(0,j)) for j in columns )
+ G.name="periodic_grid_2d_graph(%d,%d)"%(m,n)
+ return G
+
+
+def grid_graph(dim,periodic=False):
+ """ Return the n-dimensional grid graph.
+
+ The dimension is the length of the list 'dim' and the
+ size in each dimension is the value of the list element.
+
+ E.g. G=grid_graph(dim=[2,3]) produces a 2x3 grid graph.
+
+ If periodic=True then join grid edges with periodic boundary conditions.
+
+ """
+ dlabel="%s"%dim
+ if dim==[]:
+ G=empty_graph(0)
+ G.name="grid_graph(%s)"%dim
+ return G
+ if not is_list_of_ints(dim):
+ raise nx.NetworkXError("dim is not a list of integers")
+ if min(dim)<=0:
+ raise nx.NetworkXError(\
+ "dim is not a list of strictly positive integers")
+ if periodic:
+ func=cycle_graph
+ else:
+ func=path_graph
+
+ dim=list(dim)
+ current_dim=dim.pop()
+ G=func(current_dim)
+ while len(dim)>0:
+ current_dim=dim.pop()
+ # order matters: copy before it is cleared during the creation of Gnew
+ Gold=G.copy()
+ Gnew=func(current_dim)
+ # explicit: create_using=None
+ # This is so that we get a new graph of Gnew's class.
+ G=nx.cartesian_product(Gnew,Gold)
+ # graph G is done but has labels of the form (1,(2,(3,1)))
+ # so relabel
+ H=nx.relabel_nodes(G, flatten)
+ H.name="grid_graph(%s)"%dlabel
+ return H
+
+def hypercube_graph(n):
+ """Return the n-dimensional hypercube.
+
+ Node labels are the integers 0 to 2**n - 1.
+
+ """
+ dim=n*[2]
+ G=grid_graph(dim)
+ G.name="hypercube_graph_(%d)"%n
+ return G
+
+def ladder_graph(n,create_using=None):
+ """Return the Ladder graph of length n.
+
+ This is two rows of n nodes, with
+ each pair connected by a single edge.
+
+ Node labels are the integers 0 to 2*n - 1.
+
+ """
+ if create_using is not None and create_using.is_directed():
+ raise nx.NetworkXError("Directed Graph not supported")
+ G=empty_graph(2*n,create_using)
+ G.name="ladder_graph_(%d)"%n
+ G.add_edges_from([(v,v+1) for v in range(n-1)])
+ G.add_edges_from([(v,v+1) for v in range(n,2*n-1)])
+ G.add_edges_from([(v,v+n) for v in range(n)])
+ return G
+
+def lollipop_graph(m,n,create_using=None):
+ """Return the Lollipop Graph; K_m connected to P_n.
+
+ This is the Barbell Graph without the right barbell.
+
+ For m>1 and n>=0, the complete graph K_m is connected to the
+ path P_n. The resulting m+n nodes are labelled 0,...,m-1 for the
+ complete graph and m,...,m+n-1 for the path. The 2 subgraphs
+ are joined via the edge (m-1,m). If n=0, this is merely a complete
+ graph.
+
+ Node labels are the integers 0 to number_of_nodes - 1.
+
+ (This graph is an extremal example in David Aldous and Jim
+ Fill's etext on Random Walks on Graphs.)
+
+ """
+ if create_using is not None and create_using.is_directed():
+ raise nx.NetworkXError("Directed Graph not supported")
+ if m<2:
+ raise nx.NetworkXError(\
+ "Invalid graph description, m should be >=2")
+ if n<0:
+ raise nx.NetworkXError(\
+ "Invalid graph description, n should be >=0")
+ # the ball
+ G=complete_graph(m,create_using)
+ # the stick
+ G.add_nodes_from([v for v in range(m,m+n)])
+ if n>1:
+ G.add_edges_from([(v,v+1) for v in range(m,m+n-1)])
+ # connect ball to stick
+ if m>0: G.add_edge(m-1,m)
+ G.name="lollipop_graph(%d,%d)"%(m,n)
+ return G
+
+def null_graph(create_using=None):
+ """ Return the Null graph with no nodes or edges.
+
+ See empty_graph for the use of create_using.
+
+ """
+ G=empty_graph(0,create_using)
+ G.name="null_graph()"
+ return G
+
+def path_graph(n,create_using=None):
+ """Return the Path graph P_n of n nodes linearly connected by n-1 edges.
+
+ Node labels are the integers 0 to n - 1.
+ If create_using is a DiGraph then the edges are directed in
+ increasing order.
+
+ """
+ G=empty_graph(n,create_using)
+ G.name="path_graph(%d)"%n
+ G.add_edges_from([(v,v+1) for v in range(n-1)])
+ return G
+
+def star_graph(n,create_using=None):
+ """ Return the Star graph with n+1 nodes: one center node, connected to n outer nodes.
+
+ Node labels are the integers 0 to n.
+
+ """
+ G=complete_bipartite_graph(1,n,create_using)
+ G.name="star_graph(%d)"%n
+ return G
+
+def trivial_graph(create_using=None):
+ """ Return the Trivial graph with one node (with integer label 0) and no edges.
+
+ """
+ G=empty_graph(1,create_using)
+ G.name="trivial_graph()"
+ return G
+
+def wheel_graph(n,create_using=None):
+ """ Return the wheel graph: a single hub node connected to each node of the (n-1)-node cycle graph.
+
+ Node labels are the integers 0 to n - 1.
+
+ """
+ G=star_graph(n-1,create_using)
+ G.name="wheel_graph(%d)"%n
+ G.add_edges_from([(v,v+1) for v in range(1,n-1)])
+ if n>2:
+ G.add_edge(1,n-1)
+ return G
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/degree_seq.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/degree_seq.py
new file mode 100644
index 0000000..c5b8f95
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/degree_seq.py
@@ -0,0 +1,793 @@
+# -*- coding: utf-8 -*-
+"""Generate graphs with a given degree sequence or expected degree sequence.
+"""
+# Copyright (C) 2004-2013 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import heapq
+from itertools import combinations, permutations
+import math
+from operator import itemgetter
+import random
+import networkx as nx
+from networkx.utils import random_weighted_sample
+
+__author__ = "\n".join(['Aric Hagberg <aric.hagberg@gmail.com>',
+ 'Pieter Swart <swart@lanl.gov>',
+ 'Dan Schult <dschult@colgate.edu>'
+ 'Joel Miller <joel.c.miller.research@gmail.com>',
+ 'Nathan Lemons <nlemons@gmail.com>'
+ 'Brian Cloteaux <brian.cloteaux@nist.gov>'])
+
+__all__ = ['configuration_model',
+ 'directed_configuration_model',
+ 'expected_degree_graph',
+ 'havel_hakimi_graph',
+ 'directed_havel_hakimi_graph',
+ 'degree_sequence_tree',
+ 'random_degree_sequence_graph']
+
+
+def configuration_model(deg_sequence,create_using=None,seed=None):
+ """Return a random graph with the given degree sequence.
+
+ The configuration model generates a random pseudograph (graph with
+ parallel edges and self loops) by randomly assigning edges to
+ match the given degree sequence.
+
+ Parameters
+ ----------
+ deg_sequence : list of integers
+ Each list entry corresponds to the degree of a node.
+ create_using : graph, optional (default MultiGraph)
+ Return graph of this type. The instance will be cleared.
+ seed : hashable object, optional
+ Seed for random number generator.
+
+ Returns
+ -------
+ G : MultiGraph
+ A graph with the specified degree sequence.
+ Nodes are labeled starting at 0 with an index
+ corresponding to the position in deg_sequence.
+
+ Raises
+ ------
+ NetworkXError
+ If the degree sequence does not have an even sum.
+
+ See Also
+ --------
+ is_valid_degree_sequence
+
+ Notes
+ -----
+ As described by Newman [1]_.
+
+ A non-graphical degree sequence (not realizable by some simple
+ graph) is allowed since this function returns graphs with self
+ loops and parallel edges. An exception is raised if the degree
+ sequence does not have an even sum.
+
+ This configuration model construction process can lead to
+ duplicate edges and loops. You can remove the self-loops and
+ parallel edges (see below) which will likely result in a graph
+ that doesn't have the exact degree sequence specified. This
+ "finite-size effect" decreases as the size of the graph increases.
+
+ References
+ ----------
+ .. [1] M.E.J. Newman, "The structure and function of complex networks",
+ SIAM REVIEW 45-2, pp 167-256, 2003.
+
+ Examples
+ --------
+ >>> from networkx.utils import powerlaw_sequence
+ >>> z=nx.utils.create_degree_sequence(100,powerlaw_sequence)
+ >>> G=nx.configuration_model(z)
+
+ To remove parallel edges:
+
+ >>> G=nx.Graph(G)
+
+ To remove self loops:
+
+ >>> G.remove_edges_from(G.selfloop_edges())
+ """
+ if not sum(deg_sequence)%2 ==0:
+ raise nx.NetworkXError('Invalid degree sequence')
+
+ if create_using is None:
+ create_using = nx.MultiGraph()
+ elif create_using.is_directed():
+ raise nx.NetworkXError("Directed Graph not supported")
+
+ if not seed is None:
+ random.seed(seed)
+
+ # start with empty N-node graph
+ N=len(deg_sequence)
+
+ # allow multiedges and selfloops
+ G=nx.empty_graph(N,create_using)
+
+ if N==0 or max(deg_sequence)==0: # done if no edges
+ return G
+
+ # build stublist, a list of available degree-repeated stubs
+ # e.g. for deg_sequence=[3,2,1,1,1]
+ # initially, stublist=[1,1,1,2,2,3,4,5]
+ # i.e., node 1 has degree=3 and is repeated 3 times, etc.
+ stublist=[]
+ for n in G:
+ for i in range(deg_sequence[n]):
+ stublist.append(n)
+
+ # shuffle stublist and assign pairs by removing 2 elements at a time
+ random.shuffle(stublist)
+ while stublist:
+ n1 = stublist.pop()
+ n2 = stublist.pop()
+ G.add_edge(n1,n2)
+
+ G.name="configuration_model %d nodes %d edges"%(G.order(),G.size())
+ return G
+
+
+def directed_configuration_model(in_degree_sequence,
+ out_degree_sequence,
+ create_using=None,seed=None):
+ """Return a directed_random graph with the given degree sequences.
+
+ The configuration model generates a random directed pseudograph
+ (graph with parallel edges and self loops) by randomly assigning
+ edges to match the given degree sequences.
+
+ Parameters
+ ----------
+ in_degree_sequence : list of integers
+ Each list entry corresponds to the in-degree of a node.
+ out_degree_sequence : list of integers
+ Each list entry corresponds to the out-degree of a node.
+ create_using : graph, optional (default MultiDiGraph)
+ Return graph of this type. The instance will be cleared.
+ seed : hashable object, optional
+ Seed for random number generator.
+
+ Returns
+ -------
+ G : MultiDiGraph
+ A graph with the specified degree sequences.
+ Nodes are labeled starting at 0 with an index
+ corresponding to the position in deg_sequence.
+
+ Raises
+ ------
+ NetworkXError
+ If the degree sequences do not have the same sum.
+
+ See Also
+ --------
+ configuration_model
+
+ Notes
+ -----
+ Algorithm as described by Newman [1]_.
+
+ A non-graphical degree sequence (not realizable by some simple
+ graph) is allowed since this function returns graphs with self
+ loops and parallel edges. An exception is raised if the degree
+ sequences does not have the same sum.
+
+ This configuration model construction process can lead to
+ duplicate edges and loops. You can remove the self-loops and
+ parallel edges (see below) which will likely result in a graph
+ that doesn't have the exact degree sequence specified. This
+ "finite-size effect" decreases as the size of the graph increases.
+
+ References
+ ----------
+ .. [1] Newman, M. E. J. and Strogatz, S. H. and Watts, D. J.
+ Random graphs with arbitrary degree distributions and their applications
+ Phys. Rev. E, 64, 026118 (2001)
+
+ Examples
+ --------
+ >>> D=nx.DiGraph([(0,1),(1,2),(2,3)]) # directed path graph
+ >>> din=list(D.in_degree().values())
+ >>> dout=list(D.out_degree().values())
+ >>> din.append(1)
+ >>> dout[0]=2
+ >>> D=nx.directed_configuration_model(din,dout)
+
+ To remove parallel edges:
+
+ >>> D=nx.DiGraph(D)
+
+ To remove self loops:
+
+ >>> D.remove_edges_from(D.selfloop_edges())
+ """
+ if not sum(in_degree_sequence) == sum(out_degree_sequence):
+ raise nx.NetworkXError('Invalid degree sequences. '
+ 'Sequences must have equal sums.')
+
+ if create_using is None:
+ create_using = nx.MultiDiGraph()
+
+ if not seed is None:
+ random.seed(seed)
+
+ nin=len(in_degree_sequence)
+ nout=len(out_degree_sequence)
+
+ # pad in- or out-degree sequence with zeros to match lengths
+ if nin>nout:
+ out_degree_sequence.extend((nin-nout)*[0])
+ else:
+ in_degree_sequence.extend((nout-nin)*[0])
+
+ # start with empty N-node graph
+ N=len(in_degree_sequence)
+
+ # allow multiedges and selfloops
+ G=nx.empty_graph(N,create_using)
+
+ if N==0 or max(in_degree_sequence)==0: # done if no edges
+ return G
+
+ # build stublists of available degree-repeated stubs
+ # e.g. for degree_sequence=[3,2,1,1,1]
+ # initially, stublist=[1,1,1,2,2,3,4,5]
+ # i.e., node 1 has degree=3 and is repeated 3 times, etc.
+ in_stublist=[]
+ for n in G:
+ for i in range(in_degree_sequence[n]):
+ in_stublist.append(n)
+
+ out_stublist=[]
+ for n in G:
+ for i in range(out_degree_sequence[n]):
+ out_stublist.append(n)
+
+ # shuffle stublists and assign pairs by removing 2 elements at a time
+ random.shuffle(in_stublist)
+ random.shuffle(out_stublist)
+ while in_stublist and out_stublist:
+ source = out_stublist.pop()
+ target = in_stublist.pop()
+ G.add_edge(source,target)
+
+ G.name="directed configuration_model %d nodes %d edges"%(G.order(),G.size())
+ return G
+
+
+def expected_degree_graph(w, seed=None, selfloops=True):
+ r"""Return a random graph with given expected degrees.
+
+ Given a sequence of expected degrees `W=(w_0,w_1,\ldots,w_{n-1}`)
+ of length `n` this algorithm assigns an edge between node `u` and
+ node `v` with probability
+
+ .. math::
+
+ p_{uv} = \frac{w_u w_v}{\sum_k w_k} .
+
+ Parameters
+ ----------
+ w : list
+ The list of expected degrees.
+ selfloops: bool (default=True)
+ Set to False to remove the possibility of self-loop edges.
+ seed : hashable object, optional
+ The seed for the random number generator.
+
+ Returns
+ -------
+ Graph
+
+ Examples
+ --------
+ >>> z=[10 for i in range(100)]
+ >>> G=nx.expected_degree_graph(z)
+
+ Notes
+ -----
+ The nodes have integer labels corresponding to index of expected degrees
+ input sequence.
+
+ The complexity of this algorithm is `\mathcal{O}(n+m)` where `n` is the
+ number of nodes and `m` is the expected number of edges.
+
+ The model in [1]_ includes the possibility of self-loop edges.
+ Set selfloops=False to produce a graph without self loops.
+
+ For finite graphs this model doesn't produce exactly the given
+ expected degree sequence. Instead the expected degrees are as
+ follows.
+
+ For the case without self loops (selfloops=False),
+
+ .. math::
+
+ E[deg(u)] = \sum_{v \ne u} p_{uv}
+ = w_u \left( 1 - \frac{w_u}{\sum_k w_k} \right) .
+
+
+ NetworkX uses the standard convention that a self-loop edge counts 2
+ in the degree of a node, so with self loops (selfloops=True),
+
+ .. math::
+
+ E[deg(u)] = \sum_{v \ne u} p_{uv} + 2 p_{uu}
+ = w_u \left( 1 + \frac{w_u}{\sum_k w_k} \right) .
+
+ References
+ ----------
+ .. [1] Fan Chung and L. Lu, Connected components in random graphs with
+ given expected degree sequences, Ann. Combinatorics, 6,
+ pp. 125-145, 2002.
+ .. [2] Joel Miller and Aric Hagberg,
+ Efficient generation of networks with given expected degrees,
+ in Algorithms and Models for the Web-Graph (WAW 2011),
+ Alan Frieze, Paul Horn, and Paweł Prałat (Eds), LNCS 6732,
+ pp. 115-126, 2011.
+ """
+ n = len(w)
+ G=nx.empty_graph(n)
+ if n==0 or max(w)==0: # done if no edges
+ return G
+ if seed is not None:
+ random.seed(seed)
+ rho = 1/float(sum(w))
+ # sort weights, largest first
+ # preserve order of weights for integer node label mapping
+ order = sorted(enumerate(w),key=itemgetter(1),reverse=True)
+ mapping = dict((c,uv[0]) for c,uv in enumerate(order))
+ seq = [v for u,v in order]
+ last=n
+ if not selfloops:
+ last-=1
+ for u in range(last):
+ v = u
+ if not selfloops:
+ v += 1
+ factor = seq[u] * rho
+ p = seq[v]*factor
+ if p>1:
+ p = 1
+ while v<n and p>0:
+ if p != 1:
+ r = random.random()
+ v += int(math.floor(math.log(r)/math.log(1-p)))
+ if v < n:
+ q = seq[v]*factor
+ if q>1:
+ q = 1
+ if random.random() < q/p:
+ G.add_edge(mapping[u],mapping[v])
+ v += 1
+ p = q
+ return G
+
+def havel_hakimi_graph(deg_sequence,create_using=None):
+ """Return a simple graph with given degree sequence constructed
+ using the Havel-Hakimi algorithm.
+
+ Parameters
+ ----------
+ deg_sequence: list of integers
+ Each integer corresponds to the degree of a node (need not be sorted).
+ create_using : graph, optional (default Graph)
+ Return graph of this type. The instance will be cleared.
+ Directed graphs are not allowed.
+
+ Raises
+ ------
+ NetworkXException
+ For a non-graphical degree sequence (i.e. one
+ not realizable by some simple graph).
+
+ Notes
+ -----
+ The Havel-Hakimi algorithm constructs a simple graph by
+ successively connecting the node of highest degree to other nodes
+ of highest degree, resorting remaining nodes by degree, and
+ repeating the process. The resulting graph has a high
+ degree-associativity. Nodes are labeled 1,.., len(deg_sequence),
+ corresponding to their position in deg_sequence.
+
+ The basic algorithm is from Hakimi [1]_ and was generalized by
+ Kleitman and Wang [2]_.
+
+ References
+ ----------
+ .. [1] Hakimi S., On Realizability of a Set of Integers as
+ Degrees of the Vertices of a Linear Graph. I,
+ Journal of SIAM, 10(3), pp. 496-506 (1962)
+ .. [2] Kleitman D.J. and Wang D.L.
+ Algorithms for Constructing Graphs and Digraphs with Given Valences
+ and Factors Discrete Mathematics, 6(1), pp. 79-88 (1973)
+ """
+ if not nx.is_valid_degree_sequence(deg_sequence):
+ raise nx.NetworkXError('Invalid degree sequence')
+ if create_using is not None:
+ if create_using.is_directed():
+ raise nx.NetworkXError("Directed graphs are not supported")
+
+ p = len(deg_sequence)
+ G=nx.empty_graph(p,create_using)
+ num_degs = []
+ for i in range(p):
+ num_degs.append([])
+ dmax, dsum, n = 0, 0, 0
+ for d in deg_sequence:
+ # Process only the non-zero integers
+ if d>0:
+ num_degs[d].append(n)
+ dmax, dsum, n = max(dmax,d), dsum+d, n+1
+ # Return graph if no edges
+ if n==0:
+ return G
+
+ modstubs = [(0,0)]*(dmax+1)
+ # Successively reduce degree sequence by removing the maximum degree
+ while n > 0:
+ # Retrieve the maximum degree in the sequence
+ while len(num_degs[dmax]) == 0:
+ dmax -= 1;
+ # If there are not enough stubs to connect to, then the sequence is
+ # not graphical
+ if dmax > n-1:
+ raise nx.NetworkXError('Non-graphical integer sequence')
+
+ # Remove largest stub in list
+ source = num_degs[dmax].pop()
+ n -= 1
+ # Reduce the next dmax largest stubs
+ mslen = 0
+ k = dmax
+ for i in range(dmax):
+ while len(num_degs[k]) == 0:
+ k -= 1
+ target = num_degs[k].pop()
+ G.add_edge(source, target)
+ n -= 1
+ if k > 1:
+ modstubs[mslen] = (k-1,target)
+ mslen += 1
+ # Add back to the list any nonzero stubs that were removed
+ for i in range(mslen):
+ (stubval, stubtarget) = modstubs[i]
+ num_degs[stubval].append(stubtarget)
+ n += 1
+
+ G.name="havel_hakimi_graph %d nodes %d edges"%(G.order(),G.size())
+ return G
+
+def directed_havel_hakimi_graph(in_deg_sequence,
+ out_deg_sequence,
+ create_using=None):
+ """Return a directed graph with the given degree sequences.
+
+ Parameters
+ ----------
+ in_deg_sequence : list of integers
+ Each list entry corresponds to the in-degree of a node.
+ out_deg_sequence : list of integers
+ Each list entry corresponds to the out-degree of a node.
+ create_using : graph, optional (default DiGraph)
+ Return graph of this type. The instance will be cleared.
+
+ Returns
+ -------
+ G : DiGraph
+ A graph with the specified degree sequences.
+ Nodes are labeled starting at 0 with an index
+ corresponding to the position in deg_sequence
+
+ Raises
+ ------
+ NetworkXError
+ If the degree sequences are not digraphical.
+
+ See Also
+ --------
+ configuration_model
+
+ Notes
+ -----
+ Algorithm as described by Kleitman and Wang [1]_.
+
+ References
+ ----------
+ .. [1] D.J. Kleitman and D.L. Wang
+ Algorithms for Constructing Graphs and Digraphs with Given Valences
+ and Factors Discrete Mathematics, 6(1), pp. 79-88 (1973)
+ """
+ assert(nx.utils.is_list_of_ints(in_deg_sequence))
+ assert(nx.utils.is_list_of_ints(out_deg_sequence))
+
+ if create_using is None:
+ create_using = nx.DiGraph()
+
+ # Process the sequences and form two heaps to store degree pairs with
+ # either zero or nonzero out degrees
+ sumin, sumout, nin, nout = 0, 0, len(in_deg_sequence), len(out_deg_sequence)
+ maxn = max(nin, nout)
+ G = nx.empty_graph(maxn,create_using)
+ if maxn==0:
+ return G
+ maxin = 0
+ stubheap, zeroheap = [ ], [ ]
+ for n in range(maxn):
+ in_deg, out_deg = 0, 0
+ if n<nout:
+ out_deg = out_deg_sequence[n]
+ if n<nin:
+ in_deg = in_deg_sequence[n]
+ if in_deg<0 or out_deg<0:
+ raise nx.NetworkXError(
+ 'Invalid degree sequences. Sequence values must be positive.')
+ sumin, sumout, maxin = sumin+in_deg, sumout+out_deg, max(maxin, in_deg)
+ if in_deg > 0:
+ stubheap.append((-1*out_deg, -1*in_deg,n))
+ elif out_deg > 0:
+ zeroheap.append((-1*out_deg,n))
+ if sumin != sumout:
+ raise nx.NetworkXError(
+ 'Invalid degree sequences. Sequences must have equal sums.')
+ heapq.heapify(stubheap)
+ heapq.heapify(zeroheap)
+
+ modstubs = [(0,0,0)]*(maxin+1)
+ # Successively reduce degree sequence by removing the maximum
+ while stubheap:
+ # Remove first value in the sequence with a non-zero in degree
+ (freeout, freein, target) = heapq.heappop(stubheap)
+ freein *= -1
+ if freein > len(stubheap)+len(zeroheap):
+ raise nx.NetworkXError('Non-digraphical integer sequence')
+
+ # Attach arcs from the nodes with the most stubs
+ mslen = 0
+ for i in range(freein):
+ if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0][0]):
+ (stubout, stubsource) = heapq.heappop(zeroheap)
+ stubin = 0
+ else:
+ (stubout, stubin, stubsource) = heapq.heappop(stubheap)
+ if stubout == 0:
+ raise nx.NetworkXError('Non-digraphical integer sequence')
+ G.add_edge(stubsource, target)
+ # Check if source is now totally connected
+ if stubout+1<0 or stubin<0:
+ modstubs[mslen] = (stubout+1, stubin, stubsource)
+ mslen += 1
+
+ # Add the nodes back to the heaps that still have available stubs
+ for i in range(mslen):
+ stub = modstubs[i]
+ if stub[1] < 0:
+ heapq.heappush(stubheap, stub)
+ else:
+ heapq.heappush(zeroheap, (stub[0], stub[2]))
+ if freeout<0:
+ heapq.heappush(zeroheap, (freeout, target))
+
+ G.name="directed_havel_hakimi_graph %d nodes %d edges"%(G.order(),G.size())
+ return G
+
+def degree_sequence_tree(deg_sequence,create_using=None):
+ """Make a tree for the given degree sequence.
+
+ A tree has #nodes-#edges=1 so
+ the degree sequence must have
+ len(deg_sequence)-sum(deg_sequence)/2=1
+ """
+
+ if not len(deg_sequence)-sum(deg_sequence)/2.0 == 1.0:
+ raise nx.NetworkXError("Degree sequence invalid")
+ if create_using is not None and create_using.is_directed():
+ raise nx.NetworkXError("Directed Graph not supported")
+
+ # single node tree
+ if len(deg_sequence)==1:
+ G=nx.empty_graph(0,create_using)
+ return G
+
+ # all degrees greater than 1
+ deg=[s for s in deg_sequence if s>1]
+ deg.sort(reverse=True)
+
+ # make path graph as backbone
+ n=len(deg)+2
+ G=nx.path_graph(n,create_using)
+ last=n
+
+ # add the leaves
+ for source in range(1,n-1):
+ nedges=deg.pop()-2
+ for target in range(last,last+nedges):
+ G.add_edge(source, target)
+ last+=nedges
+
+ # in case we added one too many
+ if len(G.degree())>len(deg_sequence):
+ G.remove_node(0)
+ return G
+
+def random_degree_sequence_graph(sequence, seed=None, tries=10):
+ r"""Return a simple random graph with the given degree sequence.
+
+ If the maximum degree `d_m` in the sequence is `O(m^{1/4})` then the
+ algorithm produces almost uniform random graphs in `O(m d_m)` time
+ where `m` is the number of edges.
+
+ Parameters
+ ----------
+ sequence : list of integers
+ Sequence of degrees
+ seed : hashable object, optional
+ Seed for random number generator
+ tries : int, optional
+ Maximum number of tries to create a graph
+
+ Returns
+ -------
+ G : Graph
+ A graph with the specified degree sequence.
+ Nodes are labeled starting at 0 with an index
+ corresponding to the position in the sequence.
+
+ Raises
+ ------
+ NetworkXUnfeasible
+ If the degree sequence is not graphical.
+ NetworkXError
+ If a graph is not produced in specified number of tries
+
+ See Also
+ --------
+ is_valid_degree_sequence, configuration_model
+
+ Notes
+ -----
+ The generator algorithm [1]_ is not guaranteed to produce a graph.
+
+ References
+ ----------
+ .. [1] Moshen Bayati, Jeong Han Kim, and Amin Saberi,
+ A sequential algorithm for generating random graphs.
+ Algorithmica, Volume 58, Number 4, 860-910,
+ DOI: 10.1007/s00453-009-9340-1
+
+ Examples
+ --------
+ >>> sequence = [1, 2, 2, 3]
+ >>> G = nx.random_degree_sequence_graph(sequence)
+ >>> sorted(G.degree().values())
+ [1, 2, 2, 3]
+ """
+ DSRG = DegreeSequenceRandomGraph(sequence, seed=seed)
+ for try_n in range(tries):
+ try:
+ return DSRG.generate()
+ except nx.NetworkXUnfeasible:
+ pass
+ raise nx.NetworkXError('failed to generate graph in %d tries'%tries)
+
+class DegreeSequenceRandomGraph(object):
+ # class to generate random graphs with a given degree sequence
+ # use random_degree_sequence_graph()
+ def __init__(self, degree, seed=None):
+ if not nx.is_valid_degree_sequence(degree):
+ raise nx.NetworkXUnfeasible('degree sequence is not graphical')
+ if seed is not None:
+ random.seed(seed)
+ self.degree = list(degree)
+ # node labels are integers 0,...,n-1
+ self.m = sum(self.degree)/2.0 # number of edges
+ try:
+ self.dmax = max(self.degree) # maximum degree
+ except ValueError:
+ self.dmax = 0
+
+ def generate(self):
+ # remaining_degree is mapping from int->remaining degree
+ self.remaining_degree = dict(enumerate(self.degree))
+ # add all nodes to make sure we get isolated nodes
+ self.graph = nx.Graph()
+ self.graph.add_nodes_from(self.remaining_degree)
+ # remove zero degree nodes
+ for n,d in list(self.remaining_degree.items()):
+ if d == 0:
+ del self.remaining_degree[n]
+ if len(self.remaining_degree) > 0:
+ # build graph in three phases according to how many unmatched edges
+ self.phase1()
+ self.phase2()
+ self.phase3()
+ return self.graph
+
+ def update_remaining(self, u, v, aux_graph=None):
+ # decrement remaining nodes, modify auxilliary graph if in phase3
+ if aux_graph is not None:
+ # remove edges from auxilliary graph
+ aux_graph.remove_edge(u,v)
+ if self.remaining_degree[u] == 1:
+ del self.remaining_degree[u]
+ if aux_graph is not None:
+ aux_graph.remove_node(u)
+ else:
+ self.remaining_degree[u] -= 1
+ if self.remaining_degree[v] == 1:
+ del self.remaining_degree[v]
+ if aux_graph is not None:
+ aux_graph.remove_node(v)
+ else:
+ self.remaining_degree[v] -= 1
+
+ def p(self,u,v):
+ # degree probability
+ return 1 - self.degree[u]*self.degree[v]/(4.0*self.m)
+
+ def q(self,u,v):
+ # remaining degree probability
+ norm = float(max(self.remaining_degree.values()))**2
+ return self.remaining_degree[u]*self.remaining_degree[v]/norm
+
+ def suitable_edge(self):
+ # Check if there is a suitable edge that is not in the graph
+ # True if an (arbitrary) remaining node has at least one possible
+ # connection to another remaining node
+ nodes = iter(self.remaining_degree)
+ u = next(nodes) # one arbitrary node
+ for v in nodes: # loop over all other remaining nodes
+ if not self.graph.has_edge(u, v):
+ return True
+ return False
+
+ def phase1(self):
+ # choose node pairs from (degree) weighted distribution
+ while sum(self.remaining_degree.values()) >= 2 * self.dmax**2:
+ u,v = sorted(random_weighted_sample(self.remaining_degree, 2))
+ if self.graph.has_edge(u,v):
+ continue
+ if random.random() < self.p(u,v): # accept edge
+ self.graph.add_edge(u,v)
+ self.update_remaining(u,v)
+
+ def phase2(self):
+ # choose remaining nodes uniformly at random and use rejection sampling
+ while len(self.remaining_degree) >= 2 * self.dmax:
+ norm = float(max(self.remaining_degree.values()))**2
+ while True:
+ u,v = sorted(random.sample(self.remaining_degree.keys(), 2))
+ if self.graph.has_edge(u,v):
+ continue
+ if random.random() < self.q(u,v):
+ break
+ if random.random() < self.p(u,v): # accept edge
+ self.graph.add_edge(u,v)
+ self.update_remaining(u,v)
+
+ def phase3(self):
+ # build potential remaining edges and choose with rejection sampling
+ potential_edges = combinations(self.remaining_degree, 2)
+ # build auxilliary graph of potential edges not already in graph
+ H = nx.Graph([(u,v) for (u,v) in potential_edges
+ if not self.graph.has_edge(u,v)])
+ while self.remaining_degree:
+ if not self.suitable_edge():
+ raise nx.NetworkXUnfeasible('no suitable edges left')
+ while True:
+ u,v = sorted(random.choice(H.edges()))
+ if random.random() < self.q(u,v):
+ break
+ if random.random() < self.p(u,v): # accept edge
+ self.graph.add_edge(u,v)
+ self.update_remaining(u,v, aux_graph=H)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/directed.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/directed.py
new file mode 100644
index 0000000..d1dc712
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/directed.py
@@ -0,0 +1,304 @@
+"""
+Generators for some directed graphs.
+
+gn_graph: growing network
+gnc_graph: growing network with copying
+gnr_graph: growing network with redirection
+scale_free_graph: scale free directed graph
+
+"""
+# Copyright (C) 2006-2009 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+__author__ ="""Aric Hagberg (hagberg@lanl.gov)\nWillem Ligtenberg (W.P.A.Ligtenberg@tue.nl)"""
+
+__all__ = ['gn_graph', 'gnc_graph', 'gnr_graph','scale_free_graph']
+
+import random
+
+import networkx as nx
+from networkx.generators.classic import empty_graph
+from networkx.utils import discrete_sequence
+
+
+def gn_graph(n,kernel=None,create_using=None,seed=None):
+ """Return the GN digraph with n nodes.
+
+ The GN (growing network) graph is built by adding nodes one at a time with
+ a link to one previously added node. The target node for the link is
+ chosen with probability based on degree. The default attachment kernel is
+ a linear function of degree.
+
+ The graph is always a (directed) tree.
+
+ Parameters
+ ----------
+ n : int
+ The number of nodes for the generated graph.
+ kernel : function
+ The attachment kernel.
+ create_using : graph, optional (default DiGraph)
+ Return graph of this type. The instance will be cleared.
+ seed : hashable object, optional
+ The seed for the random number generator.
+
+ Examples
+ --------
+ >>> D=nx.gn_graph(10) # the GN graph
+ >>> G=D.to_undirected() # the undirected version
+
+ To specify an attachment kernel use the kernel keyword
+
+ >>> D=nx.gn_graph(10,kernel=lambda x:x**1.5) # A_k=k^1.5
+
+ References
+ ----------
+ .. [1] P. L. Krapivsky and S. Redner,
+ Organization of Growing Random Networks,
+ Phys. Rev. E, 63, 066123, 2001.
+ """
+ if create_using is None:
+ create_using = nx.DiGraph()
+ elif not create_using.is_directed():
+ raise nx.NetworkXError("Directed Graph required in create_using")
+
+ if kernel is None:
+ kernel = lambda x: x
+
+ if seed is not None:
+ random.seed(seed)
+
+ G=empty_graph(1,create_using)
+ G.name="gn_graph(%s)"%(n)
+
+ if n==1:
+ return G
+
+ G.add_edge(1,0) # get started
+ ds=[1,1] # degree sequence
+
+ for source in range(2,n):
+ # compute distribution from kernel and degree
+ dist=[kernel(d) for d in ds]
+ # choose target from discrete distribution
+ target=discrete_sequence(1,distribution=dist)[0]
+ G.add_edge(source,target)
+ ds.append(1) # the source has only one link (degree one)
+ ds[target]+=1 # add one to the target link degree
+ return G
+
+
+def gnr_graph(n,p,create_using=None,seed=None):
+ """Return the GNR digraph with n nodes and redirection probability p.
+
+ The GNR (growing network with redirection) graph is built by adding nodes
+ one at a time with a link to one previously added node. The previous
+ target node is chosen uniformly at random. With probabiliy p the link is
+ instead "redirected" to the successor node of the target. The graph is
+ always a (directed) tree.
+
+ Parameters
+ ----------
+ n : int
+ The number of nodes for the generated graph.
+ p : float
+ The redirection probability.
+ create_using : graph, optional (default DiGraph)
+ Return graph of this type. The instance will be cleared.
+ seed : hashable object, optional
+ The seed for the random number generator.
+
+ Examples
+ --------
+ >>> D=nx.gnr_graph(10,0.5) # the GNR graph
+ >>> G=D.to_undirected() # the undirected version
+
+ References
+ ----------
+ .. [1] P. L. Krapivsky and S. Redner,
+ Organization of Growing Random Networks,
+ Phys. Rev. E, 63, 066123, 2001.
+ """
+ if create_using is None:
+ create_using = nx.DiGraph()
+ elif not create_using.is_directed():
+ raise nx.NetworkXError("Directed Graph required in create_using")
+
+ if not seed is None:
+ random.seed(seed)
+
+ G=empty_graph(1,create_using)
+ G.name="gnr_graph(%s,%s)"%(n,p)
+
+ if n==1:
+ return G
+
+ for source in range(1,n):
+ target=random.randrange(0,source)
+ if random.random() < p and target !=0:
+ target=G.successors(target)[0]
+ G.add_edge(source,target)
+
+ return G
+
+
+def gnc_graph(n,create_using=None,seed=None):
+ """Return the GNC digraph with n nodes.
+
+ The GNC (growing network with copying) graph is built by adding nodes one
+ at a time with a links to one previously added node (chosen uniformly at
+ random) and to all of that node's successors.
+
+ Parameters
+ ----------
+ n : int
+ The number of nodes for the generated graph.
+ create_using : graph, optional (default DiGraph)
+ Return graph of this type. The instance will be cleared.
+ seed : hashable object, optional
+ The seed for the random number generator.
+
+ References
+ ----------
+ .. [1] P. L. Krapivsky and S. Redner,
+ Network Growth by Copying,
+ Phys. Rev. E, 71, 036118, 2005k.},
+ """
+ if create_using is None:
+ create_using = nx.DiGraph()
+ elif not create_using.is_directed():
+ raise nx.NetworkXError("Directed Graph required in create_using")
+
+ if not seed is None:
+ random.seed(seed)
+
+ G=empty_graph(1,create_using)
+ G.name="gnc_graph(%s)"%(n)
+
+ if n==1:
+ return G
+
+ for source in range(1,n):
+ target=random.randrange(0,source)
+ for succ in G.successors(target):
+ G.add_edge(source,succ)
+ G.add_edge(source,target)
+
+ return G
+
+
+def scale_free_graph(n,
+ alpha=0.41,
+ beta=0.54,
+ gamma=0.05,
+ delta_in=0.2,
+ delta_out=0,
+ create_using=None,
+ seed=None):
+ """Return a scale free directed graph.
+
+ Parameters
+ ----------
+ n : integer
+ Number of nodes in graph
+ alpha : float
+ Probability for adding a new node connected to an existing node
+ chosen randomly according to the in-degree distribution.
+ beta : float
+ Probability for adding an edge between two existing nodes.
+ One existing node is chosen randomly according the in-degree
+ distribution and the other chosen randomly according to the out-degree
+ distribution.
+ gamma : float
+ Probability for adding a new node conecgted to an existing node
+ chosen randomly according to the out-degree distribution.
+ delta_in : float
+ Bias for choosing ndoes from in-degree distribution.
+ delta_out : float
+ Bias for choosing ndoes from out-degree distribution.
+ create_using : graph, optional (default MultiDiGraph)
+ Use this graph instance to start the process (default=3-cycle).
+ seed : integer, optional
+ Seed for random number generator
+
+ Examples
+ --------
+ >>> G=nx.scale_free_graph(100)
+
+ Notes
+ -----
+ The sum of alpha, beta, and gamma must be 1.
+
+ References
+ ----------
+ .. [1] B. Bollob{\'a}s, C. Borgs, J. Chayes, and O. Riordan,
+ Directed scale-free graphs,
+ Proceedings of the fourteenth annual ACM-SIAM symposium on
+ Discrete algorithms, 132--139, 2003.
+ """
+
+ def _choose_node(G,distribution,delta):
+ cumsum=0.0
+ # normalization
+ psum=float(sum(distribution.values()))+float(delta)*len(distribution)
+ r=random.random()
+ for i in range(0,len(distribution)):
+ cumsum+=(distribution[i]+delta)/psum
+ if r < cumsum:
+ break
+ return i
+
+ if create_using is None:
+ # start with 3-cycle
+ G = nx.MultiDiGraph()
+ G.add_edges_from([(0,1),(1,2),(2,0)])
+ else:
+ # keep existing graph structure?
+ G = create_using
+ if not (G.is_directed() and G.is_multigraph()):
+ raise nx.NetworkXError(\
+ "MultiDiGraph required in create_using")
+
+ if alpha <= 0:
+ raise ValueError('alpha must be >= 0.')
+ if beta <= 0:
+ raise ValueError('beta must be >= 0.')
+ if gamma <= 0:
+ raise ValueError('beta must be >= 0.')
+
+ if alpha+beta+gamma !=1.0:
+ raise ValueError('alpha+beta+gamma must equal 1.')
+
+ G.name="directed_scale_free_graph(%s,alpha=%s,beta=%s,gamma=%s,delta_in=%s,delta_out=%s)"%(n,alpha,beta,gamma,delta_in,delta_out)
+
+ # seed random number generated (uses None as default)
+ random.seed(seed)
+
+ while len(G)<n:
+ r = random.random()
+ # random choice in alpha,beta,gamma ranges
+ if r<alpha:
+ # alpha
+ # add new node v
+ v = len(G)
+ # choose w according to in-degree and delta_in
+ w = _choose_node(G, G.in_degree(),delta_in)
+ elif r < alpha+beta:
+ # beta
+ # choose v according to out-degree and delta_out
+ v = _choose_node(G, G.out_degree(),delta_out)
+ # choose w according to in-degree and delta_in
+ w = _choose_node(G, G.in_degree(),delta_in)
+ else:
+ # gamma
+ # choose v according to out-degree and delta_out
+ v = _choose_node(G, G.out_degree(),delta_out)
+ # add new node w
+ w = len(G)
+ G.add_edge(v,w)
+
+ return G
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/ego.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/ego.py
new file mode 100644
index 0000000..15e5bc2
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/ego.py
@@ -0,0 +1,70 @@
+"""
+Ego graph.
+"""
+# Copyright (C) 2010 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+__author__ = """\n""".join(['Drew Conway <drew.conway@nyu.edu>',
+ 'Aric Hagberg <hagberg@lanl.gov>'])
+__all__ = ['ego_graph']
+
+import networkx as nx
+
+def ego_graph(G,n,radius=1,center=True,undirected=False,distance=None):
+ """Returns induced subgraph of neighbors centered at node n within
+ a given radius.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX Graph or DiGraph
+
+ n : node
+ A single node
+
+ radius : number, optional
+ Include all neighbors of distance<=radius from n.
+
+ center : bool, optional
+ If False, do not include center node in graph
+
+ undirected : bool, optional
+ If True use both in- and out-neighbors of directed graphs.
+
+ distance : key, optional
+ Use specified edge data key as distance. For example, setting
+ distance='weight' will use the edge weight to measure the
+ distance from the node n.
+
+ Notes
+ -----
+ For directed graphs D this produces the "out" neighborhood
+ or successors. If you want the neighborhood of predecessors
+ first reverse the graph with D.reverse(). If you want both
+ directions use the keyword argument undirected=True.
+
+ Node, edge, and graph attributes are copied to the returned subgraph.
+ """
+ if undirected:
+ if distance is not None:
+ sp,_=nx.single_source_dijkstra(G.to_undirected(),
+ n,cutoff=radius,
+ weight=distance)
+ else:
+ sp=nx.single_source_shortest_path_length(G.to_undirected(),
+ n,cutoff=radius)
+ else:
+ if distance is not None:
+ sp,_=nx.single_source_dijkstra(G,
+ n,cutoff=radius,
+ weight=distance)
+ else:
+ sp=nx.single_source_shortest_path_length(G,n,cutoff=radius)
+
+ H=G.subgraph(sp).copy()
+ if not center:
+ H.remove_node(n)
+ return H
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/geometric.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/geometric.py
new file mode 100644
index 0000000..b64e0c2
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/geometric.py
@@ -0,0 +1,352 @@
+# -*- coding: utf-8 -*-
+"""
+Generators for geometric graphs.
+"""
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+from __future__ import print_function
+
+__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Dan Schult (dschult@colgate.edu)',
+ 'Ben Edwards (BJEdwards@gmail.com)'])
+
+__all__ = ['random_geometric_graph',
+ 'waxman_graph',
+ 'geographical_threshold_graph',
+ 'navigable_small_world_graph']
+
+from bisect import bisect_left
+from functools import reduce
+from itertools import product
+import math, random, sys
+import networkx as nx
+
+#---------------------------------------------------------------------------
+# Random Geometric Graphs
+#---------------------------------------------------------------------------
+
+def random_geometric_graph(n, radius, dim=2, pos=None):
+ r"""Return the random geometric graph in the unit cube.
+
+ The random geometric graph model places n nodes uniformly at random
+ in the unit cube Two nodes `u,v` are connected with an edge if
+ `d(u,v)<=r` where `d` is the Euclidean distance and `r` is a radius
+ threshold.
+
+ Parameters
+ ----------
+ n : int
+ Number of nodes
+ radius: float
+ Distance threshold value
+ dim : int, optional
+ Dimension of graph
+ pos : dict, optional
+ A dictionary keyed by node with node positions as values.
+
+ Returns
+ -------
+ Graph
+
+ Examples
+ --------
+ >>> G = nx.random_geometric_graph(20,0.1)
+
+ Notes
+ -----
+ This uses an `n^2` algorithm to build the graph. A faster algorithm
+ is possible using k-d trees.
+
+ The pos keyword can be used to specify node positions so you can create
+ an arbitrary distribution and domain for positions. If you need a distance
+ function other than Euclidean you'll have to hack the algorithm.
+
+ E.g to use a 2d Gaussian distribution of node positions with mean (0,0)
+ and std. dev. 2
+
+ >>> import random
+ >>> n=20
+ >>> p=dict((i,(random.gauss(0,2),random.gauss(0,2))) for i in range(n))
+ >>> G = nx.random_geometric_graph(n,0.2,pos=p)
+
+ References
+ ----------
+ .. [1] Penrose, Mathew, Random Geometric Graphs,
+ Oxford Studies in Probability, 5, 2003.
+ """
+ G=nx.Graph()
+ G.name="Random Geometric Graph"
+ G.add_nodes_from(range(n))
+ if pos is None:
+ # random positions
+ for n in G:
+ G.node[n]['pos']=[random.random() for i in range(0,dim)]
+ else:
+ nx.set_node_attributes(G,'pos',pos)
+ # connect nodes within "radius" of each other
+ # n^2 algorithm, could use a k-d tree implementation
+ nodes = G.nodes(data=True)
+ while nodes:
+ u,du = nodes.pop()
+ pu = du['pos']
+ for v,dv in nodes:
+ pv = dv['pos']
+ d = sum(((a-b)**2 for a,b in zip(pu,pv)))
+ if d <= radius**2:
+ G.add_edge(u,v)
+ return G
+
+def geographical_threshold_graph(n, theta, alpha=2, dim=2,
+ pos=None, weight=None):
+ r"""Return a geographical threshold graph.
+
+ The geographical threshold graph model places n nodes uniformly at random
+ in a rectangular domain. Each node `u` is assigned a weight `w_u`.
+ Two nodes `u,v` are connected with an edge if
+
+ .. math::
+
+ w_u + w_v \ge \theta r^{\alpha}
+
+ where `r` is the Euclidean distance between `u` and `v`,
+ and `\theta`, `\alpha` are parameters.
+
+ Parameters
+ ----------
+ n : int
+ Number of nodes
+ theta: float
+ Threshold value
+ alpha: float, optional
+ Exponent of distance function
+ dim : int, optional
+ Dimension of graph
+ pos : dict
+ Node positions as a dictionary of tuples keyed by node.
+ weight : dict
+ Node weights as a dictionary of numbers keyed by node.
+
+ Returns
+ -------
+ Graph
+
+ Examples
+ --------
+ >>> G = nx.geographical_threshold_graph(20,50)
+
+ Notes
+ -----
+ If weights are not specified they are assigned to nodes by drawing randomly
+ from an the exponential distribution with rate parameter `\lambda=1`.
+ To specify a weights from a different distribution assign them to a
+ dictionary and pass it as the weight= keyword
+
+ >>> import random
+ >>> n = 20
+ >>> w=dict((i,random.expovariate(5.0)) for i in range(n))
+ >>> G = nx.geographical_threshold_graph(20,50,weight=w)
+
+ If node positions are not specified they are randomly assigned from the
+ uniform distribution.
+
+ References
+ ----------
+ .. [1] Masuda, N., Miwa, H., Konno, N.:
+ Geographical threshold graphs with small-world and scale-free properties.
+ Physical Review E 71, 036108 (2005)
+ .. [2] Milan Bradonjić, Aric Hagberg and Allon G. Percus,
+ Giant component and connectivity in geographical threshold graphs,
+ in Algorithms and Models for the Web-Graph (WAW 2007),
+ Antony Bonato and Fan Chung (Eds), pp. 209--216, 2007
+ """
+ G=nx.Graph()
+ # add n nodes
+ G.add_nodes_from([v for v in range(n)])
+ if weight is None:
+ # choose weights from exponential distribution
+ for n in G:
+ G.node[n]['weight'] = random.expovariate(1.0)
+ else:
+ nx.set_node_attributes(G,'weight',weight)
+ if pos is None:
+ # random positions
+ for n in G:
+ G.node[n]['pos']=[random.random() for i in range(0,dim)]
+ else:
+ nx.set_node_attributes(G,'pos',pos)
+ G.add_edges_from(geographical_threshold_edges(G, theta, alpha))
+ return G
+
+def geographical_threshold_edges(G, theta, alpha=2):
+ # generate edges for a geographical threshold graph given a graph
+ # with positions and weights assigned as node attributes 'pos' and 'weight'.
+ nodes = G.nodes(data=True)
+ while nodes:
+ u,du = nodes.pop()
+ wu = du['weight']
+ pu = du['pos']
+ for v,dv in nodes:
+ wv = dv['weight']
+ pv = dv['pos']
+ r = math.sqrt(sum(((a-b)**2 for a,b in zip(pu,pv))))
+ if wu+wv >= theta*r**alpha:
+ yield(u,v)
+
+def waxman_graph(n, alpha=0.4, beta=0.1, L=None, domain=(0,0,1,1)):
+ r"""Return a Waxman random graph.
+
+ The Waxman random graph models place n nodes uniformly at random
+ in a rectangular domain. Two nodes u,v are connected with an edge
+ with probability
+
+ .. math::
+ p = \alpha*exp(-d/(\beta*L)).
+
+ This function implements both Waxman models.
+
+ Waxman-1: `L` not specified
+ The distance `d` is the Euclidean distance between the nodes u and v.
+ `L` is the maximum distance between all nodes in the graph.
+
+ Waxman-2: `L` specified
+ The distance `d` is chosen randomly in `[0,L]`.
+
+ Parameters
+ ----------
+ n : int
+ Number of nodes
+ alpha: float
+ Model parameter
+ beta: float
+ Model parameter
+ L : float, optional
+ Maximum distance between nodes. If not specified the actual distance
+ is calculated.
+ domain : tuple of numbers, optional
+ Domain size (xmin, ymin, xmax, ymax)
+
+ Returns
+ -------
+ G: Graph
+
+ References
+ ----------
+ .. [1] B. M. Waxman, Routing of multipoint connections.
+ IEEE J. Select. Areas Commun. 6(9),(1988) 1617-1622.
+ """
+ # build graph of n nodes with random positions in the unit square
+ G = nx.Graph()
+ G.add_nodes_from(range(n))
+ (xmin,ymin,xmax,ymax)=domain
+ for n in G:
+ G.node[n]['pos']=((xmin + (xmax-xmin))*random.random(),
+ (ymin + (ymax-ymin))*random.random())
+ if L is None:
+ # find maximum distance L between two nodes
+ l = 0
+ pos = list(nx.get_node_attributes(G,'pos').values())
+ while pos:
+ x1,y1 = pos.pop()
+ for x2,y2 in pos:
+ r2 = (x1-x2)**2 + (y1-y2)**2
+ if r2 > l:
+ l = r2
+ l=math.sqrt(l)
+ else:
+ # user specified maximum distance
+ l = L
+
+ nodes=G.nodes()
+ if L is None:
+ # Waxman-1 model
+ # try all pairs, connect randomly based on euclidean distance
+ while nodes:
+ u = nodes.pop()
+ x1,y1 = G.node[u]['pos']
+ for v in nodes:
+ x2,y2 = G.node[v]['pos']
+ r = math.sqrt((x1-x2)**2 + (y1-y2)**2)
+ if random.random() < alpha*math.exp(-r/(beta*l)):
+ G.add_edge(u,v)
+ else:
+ # Waxman-2 model
+ # try all pairs, connect randomly based on randomly chosen l
+ while nodes:
+ u = nodes.pop()
+ for v in nodes:
+ r = random.random()*l
+ if random.random() < alpha*math.exp(-r/(beta*l)):
+ G.add_edge(u,v)
+ return G
+
+
+def navigable_small_world_graph(n, p=1, q=1, r=2, dim=2, seed=None):
+ r"""Return a navigable small-world graph.
+
+ A navigable small-world graph is a directed grid with additional
+ long-range connections that are chosen randomly. From [1]_:
+
+ Begin with a set of nodes that are identified with the set of lattice
+ points in an `n \times n` square, `{(i,j): i\in {1,2,\ldots,n}, j\in {1,2,\ldots,n}}`
+ and define the lattice distance between two nodes `(i,j)` and `(k,l)`
+ to be the number of "lattice steps" separating them: `d((i,j),(k,l)) = |k-i|+|l-j|`.
+
+ For a universal constant `p`, the node `u` has a directed edge to every other
+ node within lattice distance `p` (local contacts) .
+
+ For universal constants `q\ge 0` and `r\ge 0` construct directed edges from `u` to `q`
+ other nodes (long-range contacts) using independent random trials; the i'th
+ directed edge from `u` has endpoint `v` with probability proportional to `d(u,v)^{-r}`.
+
+ Parameters
+ ----------
+ n : int
+ The number of nodes.
+ p : int
+ The diameter of short range connections. Each node is connected
+ to every other node within lattice distance p.
+ q : int
+ The number of long-range connections for each node.
+ r : float
+ Exponent for decaying probability of connections. The probability of
+ connecting to a node at lattice distance d is 1/d^r.
+ dim : int
+ Dimension of grid
+ seed : int, optional
+ Seed for random number generator (default=None).
+
+ References
+ ----------
+ .. [1] J. Kleinberg. The small-world phenomenon: An algorithmic
+ perspective. Proc. 32nd ACM Symposium on Theory of Computing, 2000.
+ """
+ if (p < 1):
+ raise nx.NetworkXException("p must be >= 1")
+ if (q < 0):
+ raise nx.NetworkXException("q must be >= 0")
+ if (r < 0):
+ raise nx.NetworkXException("r must be >= 1")
+ if not seed is None:
+ random.seed(seed)
+ G = nx.DiGraph()
+ nodes = list(product(range(n),repeat=dim))
+ for p1 in nodes:
+ probs = [0]
+ for p2 in nodes:
+ if p1==p2:
+ continue
+ d = sum((abs(b-a) for a,b in zip(p1,p2)))
+ if d <= p:
+ G.add_edge(p1,p2)
+ probs.append(d**-r)
+ cdf = list(nx.utils.cumulative_sum(probs))
+ for _ in range(q):
+ target = nodes[bisect_left(cdf,random.uniform(0, cdf[-1]))]
+ G.add_edge(p1,target)
+ return G
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/hybrid.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/hybrid.py
new file mode 100644
index 0000000..b4936fa
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/hybrid.py
@@ -0,0 +1,116 @@
+"""
+Hybrid
+
+"""
+__author__ = """Aric Hagberg (hagberg@lanl.gov)\nDan Schult (dschult@colgate.edu)"""
+# Copyright (C) 2004-2008 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+_all__ = ['kl_connected_subgraph', 'is_kl_connected']
+
+import copy
+import networkx as nx
+
+def kl_connected_subgraph(G,k,l,low_memory=False,same_as_graph=False):
+ """ Returns the maximum locally (k,l) connected subgraph of G.
+
+ (k,l)-connected subgraphs are presented by Fan Chung and Li
+ in "The Small World Phenomenon in hybrid power law graphs"
+ to appear in "Complex Networks" (Ed. E. Ben-Naim) Lecture
+ Notes in Physics, Springer (2004)
+
+ low_memory=True then use a slightly slower, but lower memory version
+ same_as_graph=True then return a tuple with subgraph and
+ pflag for if G is kl-connected
+ """
+ H=copy.deepcopy(G) # subgraph we construct by removing from G
+
+ graphOK=True
+ deleted_some=True # hack to start off the while loop
+ while deleted_some:
+ deleted_some=False
+ for edge in H.edges():
+ (u,v)=edge
+ ### Get copy of graph needed for this search
+ if low_memory:
+ verts=set([u,v])
+ for i in range(k):
+ [verts.update(G.neighbors(w)) for w in verts.copy()]
+ G2=G.subgraph(list(verts))
+ else:
+ G2=copy.deepcopy(G)
+ ###
+ path=[u,v]
+ cnt=0
+ accept=0
+ while path:
+ cnt += 1 # Found a path
+ if cnt>=l:
+ accept=1
+ break
+ # record edges along this graph
+ prev=u
+ for w in path:
+ if prev!=w:
+ G2.remove_edge(prev,w)
+ prev=w
+# path=shortest_path(G2,u,v,k) # ??? should "Cutoff" be k+1?
+ try:
+ path=nx.shortest_path(G2,u,v) # ??? should "Cutoff" be k+1?
+ except nx.NetworkXNoPath:
+ path = False
+ # No Other Paths
+ if accept==0:
+ H.remove_edge(u,v)
+ deleted_some=True
+ if graphOK: graphOK=False
+ # We looked through all edges and removed none of them.
+ # So, H is the maximal (k,l)-connected subgraph of G
+ if same_as_graph:
+ return (H,graphOK)
+ return H
+
+def is_kl_connected(G,k,l,low_memory=False):
+ """Returns True if G is kl connected."""
+ graphOK=True
+ for edge in G.edges():
+ (u,v)=edge
+ ### Get copy of graph needed for this search
+ if low_memory:
+ verts=set([u,v])
+ for i in range(k):
+ [verts.update(G.neighbors(w)) for w in verts.copy()]
+ G2=G.subgraph(verts)
+ else:
+ G2=copy.deepcopy(G)
+ ###
+ path=[u,v]
+ cnt=0
+ accept=0
+ while path:
+ cnt += 1 # Found a path
+ if cnt>=l:
+ accept=1
+ break
+ # record edges along this graph
+ prev=u
+ for w in path:
+ if w!=prev:
+ G2.remove_edge(prev,w)
+ prev=w
+# path=shortest_path(G2,u,v,k) # ??? should "Cutoff" be k+1?
+ try:
+ path=nx.shortest_path(G2,u,v) # ??? should "Cutoff" be k+1?
+ except nx.NetworkXNoPath:
+ path = False
+ # No Other Paths
+ if accept==0:
+ graphOK=False
+ break
+ # return status
+ return graphOK
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/intersection.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/intersection.py
new file mode 100644
index 0000000..cc7903d
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/intersection.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+"""
+Generators for random intersection graphs.
+"""
+# Copyright (C) 2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import random
+import networkx as nx
+__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)'])
+
+__all__ = ['uniform_random_intersection_graph',
+ 'k_random_intersection_graph',
+ 'general_random_intersection_graph',
+ ]
+
+def uniform_random_intersection_graph(n, m, p, seed=None):
+ """Return a uniform random intersection graph.
+
+ Parameters
+ ----------
+ n : int
+ The number of nodes in the first bipartite set (nodes)
+ m : int
+ The number of nodes in the second bipartite set (attributes)
+ p : float
+ Probability of connecting nodes between bipartite sets
+ seed : int, optional
+ Seed for random number generator (default=None).
+
+ See Also
+ --------
+ gnp_random_graph
+
+ References
+ ----------
+ .. [1] K.B. Singer-Cohen, Random Intersection Graphs, 1995,
+ PhD thesis, Johns Hopkins University
+ .. [2] Fill, J. A., Scheinerman, E. R., and Singer-Cohen, K. B.,
+ Random intersection graphs when m = !(n):
+ An equivalence theorem relating the evolution of the g(n, m, p)
+ and g(n, p) models. Random Struct. Algorithms 16, 2 (2000), 156–176.
+ """
+ G=nx.bipartite_random_graph(n, m, p, seed=seed)
+ return nx.projected_graph(G, range(n))
+
+def k_random_intersection_graph(n,m,k):
+ """Return a intersection graph with randomly chosen attribute sets for
+ each node that are of equal size (k).
+
+ Parameters
+ ----------
+ n : int
+ The number of nodes in the first bipartite set (nodes)
+ m : int
+ The number of nodes in the second bipartite set (attributes)
+ k : float
+ Size of attribute set to assign to each node.
+ seed : int, optional
+ Seed for random number generator (default=None).
+
+ See Also
+ --------
+ gnp_random_graph, uniform_random_intersection_graph
+
+ References
+ ----------
+ .. [1] Godehardt, E., and Jaworski, J.
+ Two models of random intersection graphs and their applications.
+ Electronic Notes in Discrete Mathematics 10 (2001), 129--132.
+ """
+ G = nx.empty_graph(n + m)
+ mset = range(n,n+m)
+ for v in range(n):
+ targets = random.sample(mset, k)
+ G.add_edges_from(zip([v]*len(targets), targets))
+ return nx.projected_graph(G, range(n))
+
+def general_random_intersection_graph(n,m,p):
+ """Return a random intersection graph with independent probabilities
+ for connections between node and attribute sets.
+
+ Parameters
+ ----------
+ n : int
+ The number of nodes in the first bipartite set (nodes)
+ m : int
+ The number of nodes in the second bipartite set (attributes)
+ p : list of floats of length m
+ Probabilities for connecting nodes to each attribute
+ seed : int, optional
+ Seed for random number generator (default=None).
+
+ See Also
+ --------
+ gnp_random_graph, uniform_random_intersection_graph
+
+ References
+ ----------
+ .. [1] Nikoletseas, S. E., Raptopoulos, C., and Spirakis, P. G.
+ The existence and efficient construction of large independent sets
+ in general random intersection graphs. In ICALP (2004), J. D´ıaz,
+ J. Karhum¨aki, A. Lepist¨o, and D. Sannella, Eds., vol. 3142
+ of Lecture Notes in Computer Science, Springer, pp. 1029–1040.
+ """
+ if len(p)!=m:
+ raise ValueError("Probability list p must have m elements.")
+ G = nx.empty_graph(n + m)
+ mset = range(n,n+m)
+ for u in range(n):
+ for v,q in zip(mset,p):
+ if random.random()<q:
+ G.add_edge(u,v)
+ return nx.projected_graph(G, range(n))
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/line.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/line.py
new file mode 100644
index 0000000..4d6c14d
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/line.py
@@ -0,0 +1,69 @@
+"""
+Line graphs.
+
+"""
+# Copyright (C) 2010 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+__author__ = """Aric Hagberg (hagberg@lanl.gov)\nPieter Swart (swart@lanl.gov)\nDan Schult(dschult@colgate.edu)"""
+
+__all__ = ['line_graph']
+
+import networkx as nx
+
+def line_graph(G):
+ """Return the line graph of the graph or digraph G.
+
+ The line graph of a graph G has a node for each edge
+ in G and an edge between those nodes if the two edges
+ in G share a common node.
+
+ For DiGraphs an edge an edge represents a directed path of length 2.
+
+ The original node labels are kept as two-tuple node labels
+ in the line graph.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX Graph or DiGraph
+
+ Examples
+ --------
+ >>> G=nx.star_graph(3)
+ >>> L=nx.line_graph(G)
+ >>> print(sorted(L.edges())) # makes a clique, K3
+ [((0, 1), (0, 2)), ((0, 1), (0, 3)), ((0, 3), (0, 2))]
+
+ Notes
+ -----
+ Not implemented for MultiGraph or MultiDiGraph classes.
+
+ Graph, node, and edge data are not propagated to the new graph.
+
+ """
+ if type(G) == nx.MultiGraph or type(G) == nx.MultiDiGraph:
+ raise Exception("Line graph not implemented for Multi(Di)Graphs")
+ L=G.__class__()
+ if G.is_directed():
+ for u,nlist in G.adjacency_iter(): # same as successors for digraph
+ # look for directed path of length two
+ for n in nlist:
+ nbrs=G[n] # successors
+ for nbr in nbrs:
+ if nbr!=u:
+ L.add_edge((u,n),(n,nbr))
+ else:
+ for u,nlist in G.adjacency_iter():
+ # label nodes as tuple of edge endpoints in original graph
+ # "node tuple" must be in lexigraphical order
+ nodes=[tuple(sorted(n)) for n in zip([u]*len(nlist),nlist)]
+ # add clique of nodes to graph
+ while nodes:
+ u=nodes.pop()
+ L.add_edges_from((u,v) for v in nodes)
+ return L
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/random_clustered.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/random_clustered.py
new file mode 100644
index 0000000..fe294e6
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/random_clustered.py
@@ -0,0 +1,125 @@
+# -*- coding: utf-8 -*-
+"""Generate graphs with given degree and triangle sequence.
+"""
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import random
+import networkx as nx
+__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Joel Miller (joel.c.miller.research@gmail.com)'])
+
+__all__ = ['random_clustered_graph']
+
+
+def random_clustered_graph(joint_degree_sequence, create_using=None, seed=None):
+ """Generate a random graph with the given joint degree and triangle
+ degree sequence.
+
+ This uses a configuration model-like approach to generate a
+ random pseudograph (graph with parallel edges and self loops) by
+ randomly assigning edges to match the given indepdenent edge
+ and triangle degree sequence.
+
+ Parameters
+ ----------
+ joint_degree_sequence : list of integer pairs
+ Each list entry corresponds to the independent edge degree and
+ triangle degree of a node.
+ create_using : graph, optional (default MultiGraph)
+ Return graph of this type. The instance will be cleared.
+ seed : hashable object, optional
+ The seed for the random number generator.
+
+ Returns
+ -------
+ G : MultiGraph
+ A graph with the specified degree sequence. Nodes are labeled
+ starting at 0 with an index corresponding to the position in
+ deg_sequence.
+
+ Raises
+ ------
+ NetworkXError
+ If the independent edge degree sequence sum is not even
+ or the triangle degree sequence sum is not divisible by 3.
+
+ Notes
+ -----
+ As described by Miller [1]_ (see also Newman [2]_ for an equivalent
+ description).
+
+ A non-graphical degree sequence (not realizable by some simple
+ graph) is allowed since this function returns graphs with self
+ loops and parallel edges. An exception is raised if the
+ independent degree sequence does not have an even sum or the
+ triangle degree sequence sum is not divisible by 3.
+
+ This configuration model-like construction process can lead to
+ duplicate edges and loops. You can remove the self-loops and
+ parallel edges (see below) which will likely result in a graph
+ that doesn't have the exact degree sequence specified. This
+ "finite-size effect" decreases as the size of the graph increases.
+
+ References
+ ----------
+ .. [1] J. C. Miller "Percolation and Epidemics on Random Clustered Graphs."
+ Physical Review E, Rapid Communication (to appear).
+ .. [2] M.E.J. Newman, "Random clustered networks".
+ Physical Review Letters (to appear).
+
+ Examples
+ --------
+ >>> deg_tri=[[1,0],[1,0],[1,0],[2,0],[1,0],[2,1],[0,1],[0,1]]
+ >>> G = nx.random_clustered_graph(deg_tri)
+
+ To remove parallel edges:
+
+ >>> G=nx.Graph(G)
+
+ To remove self loops:
+
+ >>> G.remove_edges_from(G.selfloop_edges())
+
+ """
+ if create_using is None:
+ create_using = nx.MultiGraph()
+ elif create_using.is_directed():
+ raise nx.NetworkXError("Directed Graph not supported")
+
+ if not seed is None:
+ random.seed(seed)
+
+ # In Python 3, zip() returns an iterator. Make this into a list.
+ joint_degree_sequence = list(joint_degree_sequence)
+
+ N = len(joint_degree_sequence)
+ G = nx.empty_graph(N,create_using)
+
+ ilist = []
+ tlist = []
+ for n in G:
+ degrees = joint_degree_sequence[n]
+ for icount in range(degrees[0]):
+ ilist.append(n)
+ for tcount in range(degrees[1]):
+ tlist.append(n)
+
+ if len(ilist)%2 != 0 or len(tlist)%3 != 0:
+ raise nx.NetworkXError('Invalid degree sequence')
+
+ random.shuffle(ilist)
+ random.shuffle(tlist)
+ while ilist:
+ G.add_edge(ilist.pop(),ilist.pop())
+ while tlist:
+ n1 = tlist.pop()
+ n2 = tlist.pop()
+ n3 = tlist.pop()
+ G.add_edges_from([(n1,n2),(n1,n3),(n2,n3)])
+ G.name = "random_clustered %d nodes %d edges"%(G.order(),G.size())
+ return G
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/random_graphs.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/random_graphs.py
new file mode 100644
index 0000000..81c20ab
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/random_graphs.py
@@ -0,0 +1,890 @@
+# -*- coding: utf-8 -*-
+"""
+Generators for random graphs.
+
+"""
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Dan Schult (dschult@colgate.edu)'])
+import itertools
+import random
+import math
+import networkx as nx
+from networkx.generators.classic import empty_graph, path_graph, complete_graph
+
+from collections import defaultdict
+
+__all__ = ['fast_gnp_random_graph',
+ 'gnp_random_graph',
+ 'dense_gnm_random_graph',
+ 'gnm_random_graph',
+ 'erdos_renyi_graph',
+ 'binomial_graph',
+ 'newman_watts_strogatz_graph',
+ 'watts_strogatz_graph',
+ 'connected_watts_strogatz_graph',
+ 'random_regular_graph',
+ 'barabasi_albert_graph',
+ 'powerlaw_cluster_graph',
+ 'random_lobster',
+ 'random_shell_graph',
+ 'random_powerlaw_tree',
+ 'random_powerlaw_tree_sequence']
+
+
+#-------------------------------------------------------------------------
+# Some Famous Random Graphs
+#-------------------------------------------------------------------------
+
+
+def fast_gnp_random_graph(n, p, seed=None, directed=False):
+ """Return a random graph G_{n,p} (Erdős-Rényi graph, binomial graph).
+
+ Parameters
+ ----------
+ n : int
+ The number of nodes.
+ p : float
+ Probability for edge creation.
+ seed : int, optional
+ Seed for random number generator (default=None).
+ directed : bool, optional (default=False)
+ If True return a directed graph
+
+ Notes
+ -----
+ The G_{n,p} graph algorithm chooses each of the [n(n-1)]/2
+ (undirected) or n(n-1) (directed) possible edges with probability p.
+
+ This algorithm is O(n+m) where m is the expected number of
+ edges m=p*n*(n-1)/2.
+
+ It should be faster than gnp_random_graph when p is small and
+ the expected number of edges is small (sparse graph).
+
+ See Also
+ --------
+ gnp_random_graph
+
+ References
+ ----------
+ .. [1] Vladimir Batagelj and Ulrik Brandes,
+ "Efficient generation of large random networks",
+ Phys. Rev. E, 71, 036113, 2005.
+ """
+ G = empty_graph(n)
+ G.name="fast_gnp_random_graph(%s,%s)"%(n,p)
+
+ if not seed is None:
+ random.seed(seed)
+
+ if p <= 0 or p >= 1:
+ return nx.gnp_random_graph(n,p,directed=directed)
+
+ v = 1 # Nodes in graph are from 0,n-1 (this is the second node index).
+ w = -1
+ lp = math.log(1.0 - p)
+
+ if directed:
+ G=nx.DiGraph(G)
+ while v < n:
+ lr = math.log(1.0 - random.random())
+ w = w + 1 + int(lr/lp)
+ if v == w: # avoid self loops
+ w = w + 1
+ while w >= n and v < n:
+ w = w - n
+ v = v + 1
+ if v == w: # avoid self loops
+ w = w + 1
+ if v < n:
+ G.add_edge(v, w)
+ else:
+ while v < n:
+ lr = math.log(1.0 - random.random())
+ w = w + 1 + int(lr/lp)
+ while w >= v and v < n:
+ w = w - v
+ v = v + 1
+ if v < n:
+ G.add_edge(v, w)
+ return G
+
+
+def gnp_random_graph(n, p, seed=None, directed=False):
+ """Return a random graph G_{n,p} (Erdős-Rényi graph, binomial graph).
+
+ Chooses each of the possible edges with probability p.
+
+ This is also called binomial_graph and erdos_renyi_graph.
+
+ Parameters
+ ----------
+ n : int
+ The number of nodes.
+ p : float
+ Probability for edge creation.
+ seed : int, optional
+ Seed for random number generator (default=None).
+ directed : bool, optional (default=False)
+ If True return a directed graph
+
+ See Also
+ --------
+ fast_gnp_random_graph
+
+ Notes
+ -----
+ This is an O(n^2) algorithm. For sparse graphs (small p) see
+ fast_gnp_random_graph for a faster algorithm.
+
+ References
+ ----------
+ .. [1] P. Erdős and A. Rényi, On Random Graphs, Publ. Math. 6, 290 (1959).
+ .. [2] E. N. Gilbert, Random Graphs, Ann. Math. Stat., 30, 1141 (1959).
+ """
+ if directed:
+ G=nx.DiGraph()
+ else:
+ G=nx.Graph()
+ G.add_nodes_from(range(n))
+ G.name="gnp_random_graph(%s,%s)"%(n,p)
+ if p<=0:
+ return G
+ if p>=1:
+ return complete_graph(n,create_using=G)
+
+ if not seed is None:
+ random.seed(seed)
+
+ if G.is_directed():
+ edges=itertools.permutations(range(n),2)
+ else:
+ edges=itertools.combinations(range(n),2)
+
+ for e in edges:
+ if random.random() < p:
+ G.add_edge(*e)
+ return G
+
+
+# add some aliases to common names
+binomial_graph=gnp_random_graph
+erdos_renyi_graph=gnp_random_graph
+
+def dense_gnm_random_graph(n, m, seed=None):
+ """Return the random graph G_{n,m}.
+
+ Gives a graph picked randomly out of the set of all graphs
+ with n nodes and m edges.
+ This algorithm should be faster than gnm_random_graph for dense graphs.
+
+ Parameters
+ ----------
+ n : int
+ The number of nodes.
+ m : int
+ The number of edges.
+ seed : int, optional
+ Seed for random number generator (default=None).
+
+ See Also
+ --------
+ gnm_random_graph()
+
+ Notes
+ -----
+ Algorithm by Keith M. Briggs Mar 31, 2006.
+ Inspired by Knuth's Algorithm S (Selection sampling technique),
+ in section 3.4.2 of [1]_.
+
+ References
+ ----------
+ .. [1] Donald E. Knuth, The Art of Computer Programming,
+ Volume 2/Seminumerical algorithms, Third Edition, Addison-Wesley, 1997.
+ """
+ mmax=n*(n-1)/2
+ if m>=mmax:
+ G=complete_graph(n)
+ else:
+ G=empty_graph(n)
+ G.name="dense_gnm_random_graph(%s,%s)"%(n,m)
+
+ if n==1 or m>=mmax:
+ return G
+
+ if seed is not None:
+ random.seed(seed)
+
+ u=0
+ v=1
+ t=0
+ k=0
+ while True:
+ if random.randrange(mmax-t)<m-k:
+ G.add_edge(u,v)
+ k+=1
+ if k==m: return G
+ t+=1
+ v+=1
+ if v==n: # go to next row of adjacency matrix
+ u+=1
+ v=u+1
+
+def gnm_random_graph(n, m, seed=None, directed=False):
+ """Return the random graph G_{n,m}.
+
+ Produces a graph picked randomly out of the set of all graphs
+ with n nodes and m edges.
+
+ Parameters
+ ----------
+ n : int
+ The number of nodes.
+ m : int
+ The number of edges.
+ seed : int, optional
+ Seed for random number generator (default=None).
+ directed : bool, optional (default=False)
+ If True return a directed graph
+ """
+ if directed:
+ G=nx.DiGraph()
+ else:
+ G=nx.Graph()
+ G.add_nodes_from(range(n))
+ G.name="gnm_random_graph(%s,%s)"%(n,m)
+
+ if seed is not None:
+ random.seed(seed)
+
+ if n==1:
+ return G
+ max_edges=n*(n-1)
+ if not directed:
+ max_edges/=2.0
+ if m>=max_edges:
+ return complete_graph(n,create_using=G)
+
+ nlist=G.nodes()
+ edge_count=0
+ while edge_count < m:
+ # generate random edge,u,v
+ u = random.choice(nlist)
+ v = random.choice(nlist)
+ if u==v or G.has_edge(u,v):
+ continue
+ else:
+ G.add_edge(u,v)
+ edge_count=edge_count+1
+ return G
+
+
+def newman_watts_strogatz_graph(n, k, p, seed=None):
+ """Return a Newman-Watts-Strogatz small world graph.
+
+ Parameters
+ ----------
+ n : int
+ The number of nodes
+ k : int
+ Each node is connected to k nearest neighbors in ring topology
+ p : float
+ The probability of adding a new edge for each edge
+ seed : int, optional
+ seed for random number generator (default=None)
+
+ Notes
+ -----
+ First create a ring over n nodes. Then each node in the ring is
+ connected with its k nearest neighbors (k-1 neighbors if k is odd).
+ Then shortcuts are created by adding new edges as follows:
+ for each edge u-v in the underlying "n-ring with k nearest neighbors"
+ with probability p add a new edge u-w with randomly-chosen existing
+ node w. In contrast with watts_strogatz_graph(), no edges are removed.
+
+ See Also
+ --------
+ watts_strogatz_graph()
+
+ References
+ ----------
+ .. [1] M. E. J. Newman and D. J. Watts,
+ Renormalization group analysis of the small-world network model,
+ Physics Letters A, 263, 341, 1999.
+ http://dx.doi.org/10.1016/S0375-9601(99)00757-4
+ """
+ if seed is not None:
+ random.seed(seed)
+ if k>=n:
+ raise nx.NetworkXError("k>=n, choose smaller k or larger n")
+ G=empty_graph(n)
+ G.name="newman_watts_strogatz_graph(%s,%s,%s)"%(n,k,p)
+ nlist = G.nodes()
+ fromv = nlist
+ # connect the k/2 neighbors
+ for j in range(1, k // 2+1):
+ tov = fromv[j:] + fromv[0:j] # the first j are now last
+ for i in range(len(fromv)):
+ G.add_edge(fromv[i], tov[i])
+ # for each edge u-v, with probability p, randomly select existing
+ # node w and add new edge u-w
+ e = G.edges()
+ for (u, v) in e:
+ if random.random() < p:
+ w = random.choice(nlist)
+ # no self-loops and reject if edge u-w exists
+ # is that the correct NWS model?
+ while w == u or G.has_edge(u, w):
+ w = random.choice(nlist)
+ if G.degree(u) >= n-1:
+ break # skip this rewiring
+ else:
+ G.add_edge(u,w)
+ return G
+
+
+def watts_strogatz_graph(n, k, p, seed=None):
+ """Return a Watts-Strogatz small-world graph.
+
+
+ Parameters
+ ----------
+ n : int
+ The number of nodes
+ k : int
+ Each node is connected to k nearest neighbors in ring topology
+ p : float
+ The probability of rewiring each edge
+ seed : int, optional
+ Seed for random number generator (default=None)
+
+ See Also
+ --------
+ newman_watts_strogatz_graph()
+ connected_watts_strogatz_graph()
+
+ Notes
+ -----
+ First create a ring over n nodes. Then each node in the ring is
+ connected with its k nearest neighbors (k-1 neighbors if k is odd).
+ Then shortcuts are created by replacing some edges as follows:
+ for each edge u-v in the underlying "n-ring with k nearest neighbors"
+ with probability p replace it with a new edge u-w with uniformly
+ random choice of existing node w.
+
+ In contrast with newman_watts_strogatz_graph(), the random
+ rewiring does not increase the number of edges. The rewired graph
+ is not guaranteed to be connected as in connected_watts_strogatz_graph().
+
+ References
+ ----------
+ .. [1] Duncan J. Watts and Steven H. Strogatz,
+ Collective dynamics of small-world networks,
+ Nature, 393, pp. 440--442, 1998.
+ """
+ if k>=n:
+ raise nx.NetworkXError("k>=n, choose smaller k or larger n")
+ if seed is not None:
+ random.seed(seed)
+
+ G = nx.Graph()
+ G.name="watts_strogatz_graph(%s,%s,%s)"%(n,k,p)
+ nodes = list(range(n)) # nodes are labeled 0 to n-1
+ # connect each node to k/2 neighbors
+ for j in range(1, k // 2+1):
+ targets = nodes[j:] + nodes[0:j] # first j nodes are now last in list
+ G.add_edges_from(zip(nodes,targets))
+ # rewire edges from each node
+ # loop over all nodes in order (label) and neighbors in order (distance)
+ # no self loops or multiple edges allowed
+ for j in range(1, k // 2+1): # outer loop is neighbors
+ targets = nodes[j:] + nodes[0:j] # first j nodes are now last in list
+ # inner loop in node order
+ for u,v in zip(nodes,targets):
+ if random.random() < p:
+ w = random.choice(nodes)
+ # Enforce no self-loops or multiple edges
+ while w == u or G.has_edge(u, w):
+ w = random.choice(nodes)
+ if G.degree(u) >= n-1:
+ break # skip this rewiring
+ else:
+ G.remove_edge(u,v)
+ G.add_edge(u,w)
+ return G
+
+def connected_watts_strogatz_graph(n, k, p, tries=100, seed=None):
+ """Return a connected Watts-Strogatz small-world graph.
+
+ Attempt to generate a connected realization by repeated
+ generation of Watts-Strogatz small-world graphs.
+ An exception is raised if the maximum number of tries is exceeded.
+
+ Parameters
+ ----------
+ n : int
+ The number of nodes
+ k : int
+ Each node is connected to k nearest neighbors in ring topology
+ p : float
+ The probability of rewiring each edge
+ tries : int
+ Number of attempts to generate a connected graph.
+ seed : int, optional
+ The seed for random number generator.
+
+ See Also
+ --------
+ newman_watts_strogatz_graph()
+ watts_strogatz_graph()
+
+ """
+ G = watts_strogatz_graph(n, k, p, seed)
+ t=1
+ while not nx.is_connected(G):
+ G = watts_strogatz_graph(n, k, p, seed)
+ t=t+1
+ if t>tries:
+ raise nx.NetworkXError("Maximum number of tries exceeded")
+ return G
+
+
+def random_regular_graph(d, n, seed=None):
+ """Return a random regular graph of n nodes each with degree d.
+
+ The resulting graph G has no self-loops or parallel edges.
+
+ Parameters
+ ----------
+ d : int
+ Degree
+ n : integer
+ Number of nodes. The value of n*d must be even.
+ seed : hashable object
+ The seed for random number generator.
+
+ Notes
+ -----
+ The nodes are numbered form 0 to n-1.
+
+ Kim and Vu's paper [2]_ shows that this algorithm samples in an
+ asymptotically uniform way from the space of random graphs when
+ d = O(n**(1/3-epsilon)).
+
+ References
+ ----------
+ .. [1] A. Steger and N. Wormald,
+ Generating random regular graphs quickly,
+ Probability and Computing 8 (1999), 377-396, 1999.
+ http://citeseer.ist.psu.edu/steger99generating.html
+
+ .. [2] Jeong Han Kim and Van H. Vu,
+ Generating random regular graphs,
+ Proceedings of the thirty-fifth ACM symposium on Theory of computing,
+ San Diego, CA, USA, pp 213--222, 2003.
+ http://portal.acm.org/citation.cfm?id=780542.780576
+ """
+ if (n * d) % 2 != 0:
+ raise nx.NetworkXError("n * d must be even")
+
+ if not 0 <= d < n:
+ raise nx.NetworkXError("the 0 <= d < n inequality must be satisfied")
+
+ if seed is not None:
+ random.seed(seed)
+
+ def _suitable(edges, potential_edges):
+ # Helper subroutine to check if there are suitable edges remaining
+ # If False, the generation of the graph has failed
+ if not potential_edges:
+ return True
+ for s1 in potential_edges:
+ for s2 in potential_edges:
+ # Two iterators on the same dictionary are guaranteed
+ # to visit it in the same order if there are no
+ # intervening modifications.
+ if s1 == s2:
+ # Only need to consider s1-s2 pair one time
+ break
+ if s1 > s2:
+ s1, s2 = s2, s1
+ if (s1, s2) not in edges:
+ return True
+ return False
+
+ def _try_creation():
+ # Attempt to create an edge set
+
+ edges = set()
+ stubs = list(range(n)) * d
+
+ while stubs:
+ potential_edges = defaultdict(lambda: 0)
+ random.shuffle(stubs)
+ stubiter = iter(stubs)
+ for s1, s2 in zip(stubiter, stubiter):
+ if s1 > s2:
+ s1, s2 = s2, s1
+ if s1 != s2 and ((s1, s2) not in edges):
+ edges.add((s1, s2))
+ else:
+ potential_edges[s1] += 1
+ potential_edges[s2] += 1
+
+ if not _suitable(edges, potential_edges):
+ return None # failed to find suitable edge set
+
+ stubs = [node for node, potential in potential_edges.items()
+ for _ in range(potential)]
+ return edges
+
+ # Even though a suitable edge set exists,
+ # the generation of such a set is not guaranteed.
+ # Try repeatedly to find one.
+ edges = _try_creation()
+ while edges is None:
+ edges = _try_creation()
+
+ G = nx.Graph()
+ G.name = "random_regular_graph(%s, %s)" % (d, n)
+ G.add_edges_from(edges)
+
+ return G
+
+def _random_subset(seq,m):
+ """ Return m unique elements from seq.
+
+ This differs from random.sample which can return repeated
+ elements if seq holds repeated elements.
+ """
+ targets=set()
+ while len(targets)<m:
+ x=random.choice(seq)
+ targets.add(x)
+ return targets
+
+def barabasi_albert_graph(n, m, seed=None):
+ """Return random graph using Barabási-Albert preferential attachment model.
+
+ A graph of n nodes is grown by attaching new nodes each with m
+ edges that are preferentially attached to existing nodes with high
+ degree.
+
+ Parameters
+ ----------
+ n : int
+ Number of nodes
+ m : int
+ Number of edges to attach from a new node to existing nodes
+ seed : int, optional
+ Seed for random number generator (default=None).
+
+ Returns
+ -------
+ G : Graph
+
+ Notes
+ -----
+ The initialization is a graph with with m nodes and no edges.
+
+ References
+ ----------
+ .. [1] A. L. Barabási and R. Albert "Emergence of scaling in
+ random networks", Science 286, pp 509-512, 1999.
+ """
+
+ if m < 1 or m >=n:
+ raise nx.NetworkXError(\
+ "Barabási-Albert network must have m>=1 and m<n, m=%d,n=%d"%(m,n))
+ if seed is not None:
+ random.seed(seed)
+
+ # Add m initial nodes (m0 in barabasi-speak)
+ G=empty_graph(m)
+ G.name="barabasi_albert_graph(%s,%s)"%(n,m)
+ # Target nodes for new edges
+ targets=list(range(m))
+ # List of existing nodes, with nodes repeated once for each adjacent edge
+ repeated_nodes=[]
+ # Start adding the other n-m nodes. The first node is m.
+ source=m
+ while source<n:
+ # Add edges to m nodes from the source.
+ G.add_edges_from(zip([source]*m,targets))
+ # Add one node to the list for each new edge just created.
+ repeated_nodes.extend(targets)
+ # And the new node "source" has m edges to add to the list.
+ repeated_nodes.extend([source]*m)
+ # Now choose m unique nodes from the existing nodes
+ # Pick uniformly from repeated_nodes (preferential attachement)
+ targets = _random_subset(repeated_nodes,m)
+ source += 1
+ return G
+
+def powerlaw_cluster_graph(n, m, p, seed=None):
+ """Holme and Kim algorithm for growing graphs with powerlaw
+ degree distribution and approximate average clustering.
+
+ Parameters
+ ----------
+ n : int
+ the number of nodes
+ m : int
+ the number of random edges to add for each new node
+ p : float,
+ Probability of adding a triangle after adding a random edge
+ seed : int, optional
+ Seed for random number generator (default=None).
+
+ Notes
+ -----
+ The average clustering has a hard time getting above
+ a certain cutoff that depends on m. This cutoff is often quite low.
+ Note that the transitivity (fraction of triangles to possible
+ triangles) seems to go down with network size.
+
+ It is essentially the Barabási-Albert (B-A) growth model with an
+ extra step that each random edge is followed by a chance of
+ making an edge to one of its neighbors too (and thus a triangle).
+
+ This algorithm improves on B-A in the sense that it enables a
+ higher average clustering to be attained if desired.
+
+ It seems possible to have a disconnected graph with this algorithm
+ since the initial m nodes may not be all linked to a new node
+ on the first iteration like the B-A model.
+
+ References
+ ----------
+ .. [1] P. Holme and B. J. Kim,
+ "Growing scale-free networks with tunable clustering",
+ Phys. Rev. E, 65, 026107, 2002.
+ """
+
+ if m < 1 or n < m:
+ raise nx.NetworkXError(\
+ "NetworkXError must have m>1 and m<n, m=%d,n=%d"%(m,n))
+
+ if p > 1 or p < 0:
+ raise nx.NetworkXError(\
+ "NetworkXError p must be in [0,1], p=%f"%(p))
+ if seed is not None:
+ random.seed(seed)
+
+ G=empty_graph(m) # add m initial nodes (m0 in barabasi-speak)
+ G.name="Powerlaw-Cluster Graph"
+ repeated_nodes=G.nodes() # list of existing nodes to sample from
+ # with nodes repeated once for each adjacent edge
+ source=m # next node is m
+ while source<n: # Now add the other n-1 nodes
+ possible_targets = _random_subset(repeated_nodes,m)
+ # do one preferential attachment for new node
+ target=possible_targets.pop()
+ G.add_edge(source,target)
+ repeated_nodes.append(target) # add one node to list for each new link
+ count=1
+ while count<m: # add m-1 more new links
+ if random.random()<p: # clustering step: add triangle
+ neighborhood=[nbr for nbr in G.neighbors(target) \
+ if not G.has_edge(source,nbr) \
+ and not nbr==source]
+ if neighborhood: # if there is a neighbor without a link
+ nbr=random.choice(neighborhood)
+ G.add_edge(source,nbr) # add triangle
+ repeated_nodes.append(nbr)
+ count=count+1
+ continue # go to top of while loop
+ # else do preferential attachment step if above fails
+ target=possible_targets.pop()
+ G.add_edge(source,target)
+ repeated_nodes.append(target)
+ count=count+1
+
+ repeated_nodes.extend([source]*m) # add source node to list m times
+ source += 1
+ return G
+
+def random_lobster(n, p1, p2, seed=None):
+ """Return a random lobster.
+
+ A lobster is a tree that reduces to a caterpillar when pruning all
+ leaf nodes.
+
+ A caterpillar is a tree that reduces to a path graph when pruning
+ all leaf nodes (p2=0).
+
+ Parameters
+ ----------
+ n : int
+ The expected number of nodes in the backbone
+ p1 : float
+ Probability of adding an edge to the backbone
+ p2 : float
+ Probability of adding an edge one level beyond backbone
+ seed : int, optional
+ Seed for random number generator (default=None).
+ """
+ # a necessary ingredient in any self-respecting graph library
+ if seed is not None:
+ random.seed(seed)
+ llen=int(2*random.random()*n + 0.5)
+ L=path_graph(llen)
+ L.name="random_lobster(%d,%s,%s)"%(n,p1,p2)
+ # build caterpillar: add edges to path graph with probability p1
+ current_node=llen-1
+ for n in range(llen):
+ if random.random()<p1: # add fuzzy caterpillar parts
+ current_node+=1
+ L.add_edge(n,current_node)
+ if random.random()<p2: # add crunchy lobster bits
+ current_node+=1
+ L.add_edge(current_node-1,current_node)
+ return L # voila, un lobster!
+
+def random_shell_graph(constructor, seed=None):
+ """Return a random shell graph for the constructor given.
+
+ Parameters
+ ----------
+ constructor: a list of three-tuples
+ (n,m,d) for each shell starting at the center shell.
+ n : int
+ The number of nodes in the shell
+ m : int
+ The number or edges in the shell
+ d : float
+ The ratio of inter-shell (next) edges to intra-shell edges.
+ d=0 means no intra shell edges, d=1 for the last shell
+ seed : int, optional
+ Seed for random number generator (default=None).
+
+ Examples
+ --------
+ >>> constructor=[(10,20,0.8),(20,40,0.8)]
+ >>> G=nx.random_shell_graph(constructor)
+
+ """
+ G=empty_graph(0)
+ G.name="random_shell_graph(constructor)"
+
+ if seed is not None:
+ random.seed(seed)
+
+ glist=[]
+ intra_edges=[]
+ nnodes=0
+ # create gnm graphs for each shell
+ for (n,m,d) in constructor:
+ inter_edges=int(m*d)
+ intra_edges.append(m-inter_edges)
+ g=nx.convert_node_labels_to_integers(
+ gnm_random_graph(n,inter_edges),
+ first_label=nnodes)
+ glist.append(g)
+ nnodes+=n
+ G=nx.operators.union(G,g)
+
+ # connect the shells randomly
+ for gi in range(len(glist)-1):
+ nlist1=glist[gi].nodes()
+ nlist2=glist[gi+1].nodes()
+ total_edges=intra_edges[gi]
+ edge_count=0
+ while edge_count < total_edges:
+ u = random.choice(nlist1)
+ v = random.choice(nlist2)
+ if u==v or G.has_edge(u,v):
+ continue
+ else:
+ G.add_edge(u,v)
+ edge_count=edge_count+1
+ return G
+
+
+def random_powerlaw_tree(n, gamma=3, seed=None, tries=100):
+ """Return a tree with a powerlaw degree distribution.
+
+ Parameters
+ ----------
+ n : int,
+ The number of nodes
+ gamma : float
+ Exponent of the power-law
+ seed : int, optional
+ Seed for random number generator (default=None).
+ tries : int
+ Number of attempts to adjust sequence to make a tree
+
+ Notes
+ -----
+ A trial powerlaw degree sequence is chosen and then elements are
+ swapped with new elements from a powerlaw distribution until
+ the sequence makes a tree (#edges=#nodes-1).
+
+ """
+ from networkx.generators.degree_seq import degree_sequence_tree
+ try:
+ s=random_powerlaw_tree_sequence(n,
+ gamma=gamma,
+ seed=seed,
+ tries=tries)
+ except:
+ raise nx.NetworkXError(\
+ "Exceeded max (%d) attempts for a valid tree sequence."%tries)
+ G=degree_sequence_tree(s)
+ G.name="random_powerlaw_tree(%s,%s)"%(n,gamma)
+ return G
+
+
+def random_powerlaw_tree_sequence(n, gamma=3, seed=None, tries=100):
+ """ Return a degree sequence for a tree with a powerlaw distribution.
+
+ Parameters
+ ----------
+ n : int,
+ The number of nodes
+ gamma : float
+ Exponent of the power-law
+ seed : int, optional
+ Seed for random number generator (default=None).
+ tries : int
+ Number of attempts to adjust sequence to make a tree
+
+ Notes
+ -----
+ A trial powerlaw degree sequence is chosen and then elements are
+ swapped with new elements from a powerlaw distribution until
+ the sequence makes a tree (#edges=#nodes-1).
+
+
+ """
+ if seed is not None:
+ random.seed(seed)
+
+ # get trial sequence
+ z=nx.utils.powerlaw_sequence(n,exponent=gamma)
+ # round to integer values in the range [0,n]
+ zseq=[min(n, max( int(round(s)),0 )) for s in z]
+
+ # another sequence to swap values from
+ z=nx.utils.powerlaw_sequence(tries,exponent=gamma)
+ # round to integer values in the range [0,n]
+ swap=[min(n, max( int(round(s)),0 )) for s in z]
+
+ for deg in swap:
+ if n-sum(zseq)/2.0 == 1.0: # is a tree, return sequence
+ return zseq
+ index=random.randint(0,n-1)
+ zseq[index]=swap.pop()
+
+ raise nx.NetworkXError(\
+ "Exceeded max (%d) attempts for a valid tree sequence."%tries)
+ return False
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/small.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/small.py
new file mode 100644
index 0000000..f41f8d0
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/small.py
@@ -0,0 +1,412 @@
+# -*- coding: utf-8 -*-
+"""
+Various small and named graphs, together with some compact generators.
+
+"""
+__author__ ="""Aric Hagberg (hagberg@lanl.gov)\nPieter Swart (swart@lanl.gov)"""
+# Copyright (C) 2004-2008 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+__all__ = ['make_small_graph',
+ 'LCF_graph',
+ 'bull_graph',
+ 'chvatal_graph',
+ 'cubical_graph',
+ 'desargues_graph',
+ 'diamond_graph',
+ 'dodecahedral_graph',
+ 'frucht_graph',
+ 'heawood_graph',
+ 'house_graph',
+ 'house_x_graph',
+ 'icosahedral_graph',
+ 'krackhardt_kite_graph',
+ 'moebius_kantor_graph',
+ 'octahedral_graph',
+ 'pappus_graph',
+ 'petersen_graph',
+ 'sedgewick_maze_graph',
+ 'tetrahedral_graph',
+ 'truncated_cube_graph',
+ 'truncated_tetrahedron_graph',
+ 'tutte_graph']
+
+import networkx as nx
+from networkx.generators.classic import empty_graph, cycle_graph, path_graph, complete_graph
+from networkx.exception import NetworkXError
+
+#------------------------------------------------------------------------------
+# Tools for creating small graphs
+#------------------------------------------------------------------------------
+def make_small_undirected_graph(graph_description, create_using=None):
+ """
+ Return a small undirected graph described by graph_description.
+
+ See make_small_graph.
+ """
+ if create_using is not None and create_using.is_directed():
+ raise NetworkXError("Directed Graph not supported")
+ return make_small_graph(graph_description, create_using)
+
+def make_small_graph(graph_description, create_using=None):
+ """
+ Return the small graph described by graph_description.
+
+ graph_description is a list of the form [ltype,name,n,xlist]
+
+ Here ltype is one of "adjacencylist" or "edgelist",
+ name is the name of the graph and n the number of nodes.
+ This constructs a graph of n nodes with integer labels 0,..,n-1.
+
+ If ltype="adjacencylist" then xlist is an adjacency list
+ with exactly n entries, in with the j'th entry (which can be empty)
+ specifies the nodes connected to vertex j.
+ e.g. the "square" graph C_4 can be obtained by
+
+ >>> G=nx.make_small_graph(["adjacencylist","C_4",4,[[2,4],[1,3],[2,4],[1,3]]])
+
+ or, since we do not need to add edges twice,
+
+ >>> G=nx.make_small_graph(["adjacencylist","C_4",4,[[2,4],[3],[4],[]]])
+
+ If ltype="edgelist" then xlist is an edge list
+ written as [[v1,w2],[v2,w2],...,[vk,wk]],
+ where vj and wj integers in the range 1,..,n
+ e.g. the "square" graph C_4 can be obtained by
+
+ >>> G=nx.make_small_graph(["edgelist","C_4",4,[[1,2],[3,4],[2,3],[4,1]]])
+
+ Use the create_using argument to choose the graph class/type.
+ """
+ ltype=graph_description[0]
+ name=graph_description[1]
+ n=graph_description[2]
+
+ G=empty_graph(n, create_using)
+ nodes=G.nodes()
+
+ if ltype=="adjacencylist":
+ adjlist=graph_description[3]
+ if len(adjlist) != n:
+ raise NetworkXError("invalid graph_description")
+ G.add_edges_from([(u-1,v) for v in nodes for u in adjlist[v]])
+ elif ltype=="edgelist":
+ edgelist=graph_description[3]
+ for e in edgelist:
+ v1=e[0]-1
+ v2=e[1]-1
+ if v1<0 or v1>n-1 or v2<0 or v2>n-1:
+ raise NetworkXError("invalid graph_description")
+ else:
+ G.add_edge(v1,v2)
+ G.name=name
+ return G
+
+
+def LCF_graph(n,shift_list,repeats,create_using=None):
+ """
+ Return the cubic graph specified in LCF notation.
+
+ LCF notation (LCF=Lederberg-Coxeter-Fruchte) is a compressed
+ notation used in the generation of various cubic Hamiltonian
+ graphs of high symmetry. See, for example, dodecahedral_graph,
+ desargues_graph, heawood_graph and pappus_graph below.
+
+ n (number of nodes)
+ The starting graph is the n-cycle with nodes 0,...,n-1.
+ (The null graph is returned if n < 0.)
+
+ shift_list = [s1,s2,..,sk], a list of integer shifts mod n,
+
+ repeats
+ integer specifying the number of times that shifts in shift_list
+ are successively applied to each v_current in the n-cycle
+ to generate an edge between v_current and v_current+shift mod n.
+
+ For v1 cycling through the n-cycle a total of k*repeats
+ with shift cycling through shiftlist repeats times connect
+ v1 with v1+shift mod n
+
+ The utility graph K_{3,3}
+
+ >>> G=nx.LCF_graph(6,[3,-3],3)
+
+ The Heawood graph
+
+ >>> G=nx.LCF_graph(14,[5,-5],7)
+
+ See http://mathworld.wolfram.com/LCFNotation.html for a description
+ and references.
+
+ """
+ if create_using is not None and create_using.is_directed():
+ raise NetworkXError("Directed Graph not supported")
+
+ if n <= 0:
+ return empty_graph(0, create_using)
+
+ # start with the n-cycle
+ G=cycle_graph(n, create_using)
+ G.name="LCF_graph"
+ nodes=G.nodes()
+
+ n_extra_edges=repeats*len(shift_list)
+ # edges are added n_extra_edges times
+ # (not all of these need be new)
+ if n_extra_edges < 1:
+ return G
+
+ for i in range(n_extra_edges):
+ shift=shift_list[i%len(shift_list)] #cycle through shift_list
+ v1=nodes[i%n] # cycle repeatedly through nodes
+ v2=nodes[(i + shift)%n]
+ G.add_edge(v1, v2)
+ return G
+
+
+#-------------------------------------------------------------------------------
+# Various small and named graphs
+#-------------------------------------------------------------------------------
+
+def bull_graph(create_using=None):
+ """Return the Bull graph. """
+ description=[
+ "adjacencylist",
+ "Bull Graph",
+ 5,
+ [[2,3],[1,3,4],[1,2,5],[2],[3]]
+ ]
+ G=make_small_undirected_graph(description, create_using)
+ return G
+
+def chvatal_graph(create_using=None):
+ """Return the Chvátal graph."""
+ description=[
+ "adjacencylist",
+ "Chvatal Graph",
+ 12,
+ [[2,5,7,10],[3,6,8],[4,7,9],[5,8,10],
+ [6,9],[11,12],[11,12],[9,12],
+ [11],[11,12],[],[]]
+ ]
+ G=make_small_undirected_graph(description, create_using)
+ return G
+
+def cubical_graph(create_using=None):
+ """Return the 3-regular Platonic Cubical graph."""
+ description=[
+ "adjacencylist",
+ "Platonic Cubical Graph",
+ 8,
+ [[2,4,5],[1,3,8],[2,4,7],[1,3,6],
+ [1,6,8],[4,5,7],[3,6,8],[2,5,7]]
+ ]
+ G=make_small_undirected_graph(description, create_using)
+ return G
+
+def desargues_graph(create_using=None):
+ """ Return the Desargues graph."""
+ G=LCF_graph(20, [5,-5,9,-9], 5, create_using)
+ G.name="Desargues Graph"
+ return G
+
+def diamond_graph(create_using=None):
+ """Return the Diamond graph. """
+ description=[
+ "adjacencylist",
+ "Diamond Graph",
+ 4,
+ [[2,3],[1,3,4],[1,2,4],[2,3]]
+ ]
+ G=make_small_undirected_graph(description, create_using)
+ return G
+
+def dodecahedral_graph(create_using=None):
+ """ Return the Platonic Dodecahedral graph. """
+ G=LCF_graph(20, [10,7,4,-4,-7,10,-4,7,-7,4], 2, create_using)
+ G.name="Dodecahedral Graph"
+ return G
+
+def frucht_graph(create_using=None):
+ """Return the Frucht Graph.
+
+ The Frucht Graph is the smallest cubical graph whose
+ automorphism group consists only of the identity element.
+
+ """
+ G=cycle_graph(7, create_using)
+ G.add_edges_from([[0,7],[1,7],[2,8],[3,9],[4,9],[5,10],[6,10],
+ [7,11],[8,11],[8,9],[10,11]])
+
+ G.name="Frucht Graph"
+ return G
+
+def heawood_graph(create_using=None):
+ """ Return the Heawood graph, a (3,6) cage. """
+ G=LCF_graph(14, [5,-5], 7, create_using)
+ G.name="Heawood Graph"
+ return G
+
+def house_graph(create_using=None):
+ """Return the House graph (square with triangle on top)."""
+ description=[
+ "adjacencylist",
+ "House Graph",
+ 5,
+ [[2,3],[1,4],[1,4,5],[2,3,5],[3,4]]
+ ]
+ G=make_small_undirected_graph(description, create_using)
+ return G
+
+def house_x_graph(create_using=None):
+ """Return the House graph with a cross inside the house square."""
+ description=[
+ "adjacencylist",
+ "House-with-X-inside Graph",
+ 5,
+ [[2,3,4],[1,3,4],[1,2,4,5],[1,2,3,5],[3,4]]
+ ]
+ G=make_small_undirected_graph(description, create_using)
+ return G
+
+def icosahedral_graph(create_using=None):
+ """Return the Platonic Icosahedral graph."""
+ description=[
+ "adjacencylist",
+ "Platonic Icosahedral Graph",
+ 12,
+ [[2,6,8,9,12],[3,6,7,9],[4,7,9,10],[5,7,10,11],
+ [6,7,11,12],[7,12],[],[9,10,11,12],
+ [10],[11],[12],[]]
+ ]
+ G=make_small_undirected_graph(description, create_using)
+ return G
+
+
+def krackhardt_kite_graph(create_using=None):
+ """
+ Return the Krackhardt Kite Social Network.
+
+ A 10 actor social network introduced by David Krackhardt
+ to illustrate: degree, betweenness, centrality, closeness, etc.
+ The traditional labeling is:
+ Andre=1, Beverley=2, Carol=3, Diane=4,
+ Ed=5, Fernando=6, Garth=7, Heather=8, Ike=9, Jane=10.
+
+ """
+ description=[
+ "adjacencylist",
+ "Krackhardt Kite Social Network",
+ 10,
+ [[2,3,4,6],[1,4,5,7],[1,4,6],[1,2,3,5,6,7],[2,4,7],
+ [1,3,4,7,8],[2,4,5,6,8],[6,7,9],[8,10],[9]]
+ ]
+ G=make_small_undirected_graph(description, create_using)
+ return G
+
+def moebius_kantor_graph(create_using=None):
+ """Return the Moebius-Kantor graph."""
+ G=LCF_graph(16, [5,-5], 8, create_using)
+ G.name="Moebius-Kantor Graph"
+ return G
+
+def octahedral_graph(create_using=None):
+ """Return the Platonic Octahedral graph."""
+ description=[
+ "adjacencylist",
+ "Platonic Octahedral Graph",
+ 6,
+ [[2,3,4,5],[3,4,6],[5,6],[5,6],[6],[]]
+ ]
+ G=make_small_undirected_graph(description, create_using)
+ return G
+
+def pappus_graph():
+ """ Return the Pappus graph."""
+ G=LCF_graph(18,[5,7,-7,7,-7,-5],3)
+ G.name="Pappus Graph"
+ return G
+
+def petersen_graph(create_using=None):
+ """Return the Petersen graph."""
+ description=[
+ "adjacencylist",
+ "Petersen Graph",
+ 10,
+ [[2,5,6],[1,3,7],[2,4,8],[3,5,9],[4,1,10],[1,8,9],[2,9,10],
+ [3,6,10],[4,6,7],[5,7,8]]
+ ]
+ G=make_small_undirected_graph(description, create_using)
+ return G
+
+
+def sedgewick_maze_graph(create_using=None):
+ """
+ Return a small maze with a cycle.
+
+ This is the maze used in Sedgewick,3rd Edition, Part 5, Graph
+ Algorithms, Chapter 18, e.g. Figure 18.2 and following.
+ Nodes are numbered 0,..,7
+ """
+ G=empty_graph(0, create_using)
+ G.add_nodes_from(range(8))
+ G.add_edges_from([[0,2],[0,7],[0,5]])
+ G.add_edges_from([[1,7],[2,6]])
+ G.add_edges_from([[3,4],[3,5]])
+ G.add_edges_from([[4,5],[4,7],[4,6]])
+ G.name="Sedgewick Maze"
+ return G
+
+def tetrahedral_graph(create_using=None):
+ """ Return the 3-regular Platonic Tetrahedral graph."""
+ G=complete_graph(4, create_using)
+ G.name="Platonic Tetrahedral graph"
+ return G
+
+def truncated_cube_graph(create_using=None):
+ """Return the skeleton of the truncated cube."""
+ description=[
+ "adjacencylist",
+ "Truncated Cube Graph",
+ 24,
+ [[2,3,5],[12,15],[4,5],[7,9],
+ [6],[17,19],[8,9],[11,13],
+ [10],[18,21],[12,13],[15],
+ [14],[22,23],[16],[20,24],
+ [18,19],[21],[20],[24],
+ [22],[23],[24],[]]
+ ]
+ G=make_small_undirected_graph(description, create_using)
+ return G
+
+def truncated_tetrahedron_graph(create_using=None):
+ """Return the skeleton of the truncated Platonic tetrahedron."""
+ G=path_graph(12, create_using)
+# G.add_edges_from([(1,3),(1,10),(2,7),(4,12),(5,12),(6,8),(9,11)])
+ G.add_edges_from([(0,2),(0,9),(1,6),(3,11),(4,11),(5,7),(8,10)])
+ G.name="Truncated Tetrahedron Graph"
+ return G
+
+def tutte_graph(create_using=None):
+ """Return the Tutte graph."""
+ description=[
+ "adjacencylist",
+ "Tutte's Graph",
+ 46,
+ [[2,3,4],[5,27],[11,12],[19,20],[6,34],
+ [7,30],[8,28],[9,15],[10,39],[11,38],
+ [40],[13,40],[14,36],[15,16],[35],
+ [17,23],[18,45],[19,44],[46],[21,46],
+ [22,42],[23,24],[41],[25,28],[26,33],
+ [27,32],[34],[29],[30,33],[31],
+ [32,34],[33],[],[],[36,39],
+ [37],[38,40],[39],[],[],
+ [42,45],[43],[44,46],[45],[],[]]
+ ]
+ G=make_small_undirected_graph(description, create_using)
+ return G
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/social.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/social.py
new file mode 100644
index 0000000..212dd9c
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/social.py
@@ -0,0 +1,280 @@
+"""
+Famous social networks.
+"""
+import networkx as nx
+__author__ = """\n""".join(['Jordi Torrents <jtorrents@milnou.net>',
+ 'Katy Bold <kbold@princeton.edu>',
+ 'Aric Hagberg <aric.hagberg@gmail.com)'])
+
+__all__ = ['karate_club_graph','davis_southern_women_graph',
+ 'florentine_families_graph']
+
+def karate_club_graph():
+ """Return Zachary's Karate club graph.
+
+ References
+ ----------
+ .. [1] Zachary W.
+ An information flow model for conflict and fission in small groups.
+ Journal of Anthropological Research, 33, 452-473, (1977).
+
+ .. [2] Data file from:
+ http://vlado.fmf.uni-lj.si/pub/networks/data/Ucinet/UciData.htm
+ """
+ G=nx.Graph()
+ G.add_nodes_from(range(34))
+ G.name="Zachary's Karate Club"
+
+ zacharydat="""\
+0 1 1 1 1 1 1 1 1 0 1 1 1 1 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 1 0 0
+1 0 1 1 0 0 0 1 0 0 0 0 0 1 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 1 0 0 0
+1 1 0 1 0 0 0 1 1 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 1 0
+1 1 1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1
+0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
+1 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1
+0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1
+1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1
+1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 1 0 0 1 1
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 1 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1
+0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 1
+0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 1 1
+0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1
+1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 1 0 0 0 1 1
+0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 1 0 0 1 0 1 0 1 1 0 0 0 0 0 1 1 1 0 1
+0 0 0 0 0 0 0 0 1 1 0 0 0 1 1 1 0 0 1 1 1 0 1 1 0 0 1 1 1 1 1 1 1 0"""
+ row=0
+ for line in zacharydat.split('\n'):
+ thisrow=list(map(int,line.split(' ')))
+ for col in range(0,len(thisrow)):
+ if thisrow[col]==1:
+ G.add_edge(row,col) # col goes from 0,33
+ row+=1
+ club1 = 'Mr. Hi'
+ club2 = 'Officer'
+ G.node[0]['club'] = club1
+ G.node[1]['club'] = club1
+ G.node[2]['club'] = club1
+ G.node[3]['club'] = club1
+ G.node[4]['club'] = club1
+ G.node[5]['club'] = club1
+ G.node[6]['club'] = club1
+ G.node[7]['club'] = club1
+ G.node[8]['club'] = club1
+ G.node[9]['club'] = club2
+ G.node[10]['club'] = club1
+ G.node[11]['club'] = club1
+ G.node[12]['club'] = club1
+ G.node[13]['club'] = club1
+ G.node[14]['club'] = club2
+ G.node[15]['club'] = club2
+ G.node[16]['club'] = club1
+ G.node[17]['club'] = club1
+ G.node[18]['club'] = club2
+ G.node[19]['club'] = club1
+ G.node[20]['club'] = club2
+ G.node[21]['club'] = club1
+ G.node[22]['club'] = club2
+ G.node[23]['club'] = club2
+ G.node[24]['club'] = club2
+ G.node[25]['club'] = club2
+ G.node[26]['club'] = club2
+ G.node[27]['club'] = club2
+ G.node[28]['club'] = club2
+ G.node[29]['club'] = club2
+ G.node[30]['club'] = club2
+ G.node[31]['club'] = club2
+ G.node[32]['club'] = club2
+ G.node[33]['club'] = club2
+ return G
+
+
+
+def davis_southern_women_graph():
+ """Return Davis Southern women social network.
+
+ This is a bipartite graph.
+
+ References
+ ----------
+ .. [1] A. Davis, Gardner, B. B., Gardner, M. R., 1941. Deep South.
+ University of Chicago Press, Chicago, IL.
+ """
+ G = nx.Graph()
+ # Top nodes
+ G.add_nodes_from(["Evelyn Jefferson",
+ "Laura Mandeville",
+ "Theresa Anderson",
+ "Brenda Rogers",
+ "Charlotte McDowd",
+ "Frances Anderson",
+ "Eleanor Nye",
+ "Pearl Oglethorpe",
+ "Ruth DeSand",
+ "Verne Sanderson",
+ "Myra Liddel",
+ "Katherina Rogers",
+ "Sylvia Avondale",
+ "Nora Fayette",
+ "Helen Lloyd",
+ "Dorothy Murchison",
+ "Olivia Carleton",
+ "Flora Price"],
+ bipartite=0)
+ # Bottom nodes
+ G.add_nodes_from(["E1",
+ "E2",
+ "E3",
+ "E4",
+ "E5",
+ "E6",
+ "E7",
+ "E8",
+ "E9",
+ "E10",
+ "E11",
+ "E12",
+ "E13",
+ "E14"],
+ bipartite=1)
+
+ G.add_edges_from([("Evelyn Jefferson","E1"),
+ ("Evelyn Jefferson","E2"),
+ ("Evelyn Jefferson","E3"),
+ ("Evelyn Jefferson","E4"),
+ ("Evelyn Jefferson","E5"),
+ ("Evelyn Jefferson","E6"),
+ ("Evelyn Jefferson","E8"),
+ ("Evelyn Jefferson","E9"),
+ ("Laura Mandeville","E1"),
+ ("Laura Mandeville","E2"),
+ ("Laura Mandeville","E3"),
+ ("Laura Mandeville","E5"),
+ ("Laura Mandeville","E6"),
+ ("Laura Mandeville","E7"),
+ ("Laura Mandeville","E8"),
+ ("Theresa Anderson","E2"),
+ ("Theresa Anderson","E3"),
+ ("Theresa Anderson","E4"),
+ ("Theresa Anderson","E5"),
+ ("Theresa Anderson","E6"),
+ ("Theresa Anderson","E7"),
+ ("Theresa Anderson","E8"),
+ ("Theresa Anderson","E9"),
+ ("Brenda Rogers","E1"),
+ ("Brenda Rogers","E3"),
+ ("Brenda Rogers","E4"),
+ ("Brenda Rogers","E5"),
+ ("Brenda Rogers","E6"),
+ ("Brenda Rogers","E7"),
+ ("Brenda Rogers","E8"),
+ ("Charlotte McDowd","E3"),
+ ("Charlotte McDowd","E4"),
+ ("Charlotte McDowd","E5"),
+ ("Charlotte McDowd","E7"),
+ ("Frances Anderson","E3"),
+ ("Frances Anderson","E5"),
+ ("Frances Anderson","E6"),
+ ("Frances Anderson","E8"),
+ ("Eleanor Nye","E5"),
+ ("Eleanor Nye","E6"),
+ ("Eleanor Nye","E7"),
+ ("Eleanor Nye","E8"),
+ ("Pearl Oglethorpe","E6"),
+ ("Pearl Oglethorpe","E8"),
+ ("Pearl Oglethorpe","E9"),
+ ("Ruth DeSand","E5"),
+ ("Ruth DeSand","E7"),
+ ("Ruth DeSand","E8"),
+ ("Ruth DeSand","E9"),
+ ("Verne Sanderson","E7"),
+ ("Verne Sanderson","E8"),
+ ("Verne Sanderson","E9"),
+ ("Verne Sanderson","E12"),
+ ("Myra Liddel","E8"),
+ ("Myra Liddel","E9"),
+ ("Myra Liddel","E10"),
+ ("Myra Liddel","E12"),
+ ("Katherina Rogers","E8"),
+ ("Katherina Rogers","E9"),
+ ("Katherina Rogers","E10"),
+ ("Katherina Rogers","E12"),
+ ("Katherina Rogers","E13"),
+ ("Katherina Rogers","E14"),
+ ("Sylvia Avondale","E7"),
+ ("Sylvia Avondale","E8"),
+ ("Sylvia Avondale","E9"),
+ ("Sylvia Avondale","E10"),
+ ("Sylvia Avondale","E12"),
+ ("Sylvia Avondale","E13"),
+ ("Sylvia Avondale","E14"),
+ ("Nora Fayette","E6"),
+ ("Nora Fayette","E7"),
+ ("Nora Fayette","E9"),
+ ("Nora Fayette","E10"),
+ ("Nora Fayette","E11"),
+ ("Nora Fayette","E12"),
+ ("Nora Fayette","E13"),
+ ("Nora Fayette","E14"),
+ ("Helen Lloyd","E7"),
+ ("Helen Lloyd","E8"),
+ ("Helen Lloyd","E10"),
+ ("Helen Lloyd","E11"),
+ ("Helen Lloyd","E12"),
+ ("Dorothy Murchison","E8"),
+ ("Dorothy Murchison","E9"),
+ ("Olivia Carleton","E9"),
+ ("Olivia Carleton","E11"),
+ ("Flora Price","E9"),
+ ("Flora Price","E11")])
+ return G
+
+def florentine_families_graph():
+ """Return Florentine families graph.
+
+ References
+ ----------
+ .. [1] Ronald L. Breiger and Philippa E. Pattison
+ Cumulated social roles: The duality of persons and their algebras,1
+ Social Networks, Volume 8, Issue 3, September 1986, Pages 215-256
+ """
+ G=nx.Graph()
+ G.add_edge('Acciaiuoli','Medici')
+ G.add_edge('Castellani','Peruzzi')
+ G.add_edge('Castellani','Strozzi')
+ G.add_edge('Castellani','Barbadori')
+ G.add_edge('Medici','Barbadori')
+ G.add_edge('Medici','Ridolfi')
+ G.add_edge('Medici','Tornabuoni')
+ G.add_edge('Medici','Albizzi')
+ G.add_edge('Medici','Salviati')
+ G.add_edge('Salviati','Pazzi')
+ G.add_edge('Peruzzi','Strozzi')
+ G.add_edge('Peruzzi','Bischeri')
+ G.add_edge('Strozzi','Ridolfi')
+ G.add_edge('Strozzi','Bischeri')
+ G.add_edge('Ridolfi','Tornabuoni')
+ G.add_edge('Tornabuoni','Guadagni')
+ G.add_edge('Albizzi','Ginori')
+ G.add_edge('Albizzi','Guadagni')
+ G.add_edge('Bischeri','Guadagni')
+ G.add_edge('Guadagni','Lamberteschi')
+ return G
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/stochastic.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/stochastic.py
new file mode 100644
index 0000000..5553f5f
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/stochastic.py
@@ -0,0 +1,46 @@
+"""Stocastic graph."""
+# Copyright (C) 2010-2013 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+__author__ = "Aric Hagberg <aric.hagberg@gmail.com>"
+__all__ = ['stochastic_graph']
+
+def stochastic_graph(G, copy=True, weight='weight'):
+ """Return a right-stochastic representation of G.
+
+ A right-stochastic graph is a weighted digraph in which all of
+ the node (out) neighbors edge weights sum to 1.
+
+ Parameters
+ -----------
+ G : graph
+ A NetworkX graph
+
+ copy : boolean, optional
+ If True make a copy of the graph, otherwise modify the original graph
+
+ weight : edge attribute key (optional, default='weight')
+ Edge data key used for weight. If no attribute is found for an edge
+ the edge weight is set to 1.
+ """
+ if type(G) == nx.MultiGraph or type(G) == nx.MultiDiGraph:
+ raise nx.NetworkXError('stochastic_graph not implemented '
+ 'for multigraphs')
+
+ if not G.is_directed():
+ raise nx.NetworkXError('stochastic_graph not implemented '
+ 'for undirected graphs')
+
+ if copy:
+ W = nx.DiGraph(G)
+ else:
+ W = G # reference original graph, no copy
+
+ degree = W.out_degree(weight=weight)
+ for (u,v,d) in W.edges(data=True):
+ d[weight] = float(d.get(weight,1.0))/degree[u]
+ return W
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_atlas.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_atlas.py
new file mode 100644
index 0000000..2428385
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_atlas.py
@@ -0,0 +1,55 @@
+from nose.tools import *
+import networkx as nx
+
+
+class TestAtlas(object):
+ def setUp(self):
+ self.GAG=nx.graph_atlas_g()
+
+ def test_sizes(self):
+ G=self.GAG[0]
+ assert_equal(G.number_of_nodes(),0)
+ assert_equal(G.number_of_edges(),0)
+
+ G=self.GAG[7]
+ assert_equal(G.number_of_nodes(),3)
+ assert_equal(G.number_of_edges(),3)
+
+ def test_names(self):
+ i=0
+ for g in self.GAG:
+ name=g.name
+ assert_equal(int(name[1:]),i)
+ i+=1
+
+ def test_monotone_nodes(self):
+ # check for monotone increasing number of nodes
+ previous=self.GAG[0]
+ for g in self.GAG:
+ assert_false(len(g)-len(previous) > 1)
+ previous=g.copy()
+
+ def test_monotone_nodes(self):
+ # check for monotone increasing number of edges
+ # (for fixed number of nodes)
+ previous=self.GAG[0]
+ for g in self.GAG:
+ if len(g)==len(previous):
+ assert_false(g.size()-previous.size() > 1)
+ previous=g.copy()
+
+ def test_monotone_degree_sequence(self):
+ # check for monotone increasing degree sequence
+ # (for fixed number f nodes and edges)
+ # note that 111223 < 112222
+ previous=self.GAG[0]
+ for g in self.GAG:
+ if len(g)==0:
+ continue
+ if len(g)==len(previous) & g.size()==previous.size():
+ deg_seq=sorted(g.degree().values())
+ previous_deg_seq=sorted(previous.degree().values())
+ assert_true(previous_deg_seq < deg_seq)
+ previous=g.copy()
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_bipartite.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_bipartite.py
new file mode 100644
index 0000000..63d336e
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_bipartite.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python
+
+from nose.tools import *
+from networkx import *
+from networkx.generators.bipartite import *
+
+"""Generators - Bipartite
+----------------------
+"""
+
+class TestGeneratorsBipartite():
+ def test_configuration_model(self):
+ aseq=[3,3,3,3]
+ bseq=[2,2,2,2,2]
+ assert_raises(networkx.exception.NetworkXError,
+ bipartite_configuration_model, aseq, bseq)
+
+ aseq=[3,3,3,3]
+ bseq=[2,2,2,2,2,2]
+ G=bipartite_configuration_model(aseq,bseq)
+ assert_equal(sorted(G.degree().values()),
+ [2, 2, 2, 2, 2, 2, 3, 3, 3, 3])
+
+ aseq=[2,2,2,2,2,2]
+ bseq=[3,3,3,3]
+ G=bipartite_configuration_model(aseq,bseq)
+ assert_equal(sorted(G.degree().values()),
+ [2, 2, 2, 2, 2, 2, 3, 3, 3, 3])
+
+ aseq=[2,2,2,1,1,1]
+ bseq=[3,3,3]
+ G=bipartite_configuration_model(aseq,bseq)
+ assert_equal(sorted(G.degree().values()),
+ [1, 1, 1, 2, 2, 2, 3, 3, 3])
+
+ GU=project(Graph(G),range(len(aseq)))
+ assert_equal(GU.number_of_nodes(), 6)
+
+ GD=project(Graph(G),range(len(aseq),len(aseq)+len(bseq)))
+ assert_equal(GD.number_of_nodes(), 3)
+
+ assert_raises(networkx.exception.NetworkXError,
+ bipartite_configuration_model, aseq, bseq,
+ create_using=DiGraph())
+
+ def test_havel_hakimi_graph(self):
+ aseq=[3,3,3,3]
+ bseq=[2,2,2,2,2]
+ assert_raises(networkx.exception.NetworkXError,
+ bipartite_havel_hakimi_graph, aseq, bseq)
+
+ bseq=[2,2,2,2,2,2]
+ G=bipartite_havel_hakimi_graph(aseq,bseq)
+ assert_equal(sorted(G.degree().values()),
+ [2, 2, 2, 2, 2, 2, 3, 3, 3, 3])
+
+ aseq=[2,2,2,2,2,2]
+ bseq=[3,3,3,3]
+ G=bipartite_havel_hakimi_graph(aseq,bseq)
+ assert_equal(sorted(G.degree().values()),
+ [2, 2, 2, 2, 2, 2, 3, 3, 3, 3])
+
+ GU=project(Graph(G),range(len(aseq)))
+ assert_equal(GU.number_of_nodes(), 6)
+
+ GD=project(Graph(G),range(len(aseq),len(aseq)+len(bseq)))
+ assert_equal(GD.number_of_nodes(), 4)
+ assert_raises(networkx.exception.NetworkXError,
+ bipartite_havel_hakimi_graph, aseq, bseq,
+ create_using=DiGraph())
+
+ def test_reverse_havel_hakimi_graph(self):
+ aseq=[3,3,3,3]
+ bseq=[2,2,2,2,2]
+ assert_raises(networkx.exception.NetworkXError,
+ bipartite_reverse_havel_hakimi_graph, aseq, bseq)
+
+ bseq=[2,2,2,2,2,2]
+ G=bipartite_reverse_havel_hakimi_graph(aseq,bseq)
+ assert_equal(sorted(G.degree().values()),
+ [2, 2, 2, 2, 2, 2, 3, 3, 3, 3])
+
+ aseq=[2,2,2,2,2,2]
+ bseq=[3,3,3,3]
+ G=bipartite_reverse_havel_hakimi_graph(aseq,bseq)
+ assert_equal(sorted(G.degree().values()),
+ [2, 2, 2, 2, 2, 2, 3, 3, 3, 3])
+
+ aseq=[2,2,2,1,1,1]
+ bseq=[3,3,3]
+ G=bipartite_reverse_havel_hakimi_graph(aseq,bseq)
+ assert_equal(sorted(G.degree().values()),
+ [1, 1, 1, 2, 2, 2, 3, 3, 3])
+
+ GU=project(Graph(G),range(len(aseq)))
+ assert_equal(GU.number_of_nodes(), 6)
+
+ GD=project(Graph(G),range(len(aseq),len(aseq)+len(bseq)))
+ assert_equal(GD.number_of_nodes(), 3)
+ assert_raises(networkx.exception.NetworkXError,
+ bipartite_reverse_havel_hakimi_graph, aseq, bseq,
+ create_using=DiGraph())
+
+ def test_alternating_havel_hakimi_graph(self):
+ aseq=[3,3,3,3]
+ bseq=[2,2,2,2,2]
+ assert_raises(networkx.exception.NetworkXError,
+ bipartite_alternating_havel_hakimi_graph, aseq, bseq)
+
+ bseq=[2,2,2,2,2,2]
+ G=bipartite_alternating_havel_hakimi_graph(aseq,bseq)
+ assert_equal(sorted(G.degree().values()),
+ [2, 2, 2, 2, 2, 2, 3, 3, 3, 3])
+
+ aseq=[2,2,2,2,2,2]
+ bseq=[3,3,3,3]
+ G=bipartite_alternating_havel_hakimi_graph(aseq,bseq)
+ assert_equal(sorted(G.degree().values()),
+ [2, 2, 2, 2, 2, 2, 3, 3, 3, 3])
+
+ aseq=[2,2,2,1,1,1]
+ bseq=[3,3,3]
+ G=bipartite_alternating_havel_hakimi_graph(aseq,bseq)
+ assert_equal(sorted(G.degree().values()),
+ [1, 1, 1, 2, 2, 2, 3, 3, 3])
+
+ GU=project(Graph(G),range(len(aseq)))
+ assert_equal(GU.number_of_nodes(), 6)
+
+ GD=project(Graph(G),range(len(aseq),len(aseq)+len(bseq)))
+ assert_equal(GD.number_of_nodes(), 3)
+
+ assert_raises(networkx.exception.NetworkXError,
+ bipartite_alternating_havel_hakimi_graph, aseq, bseq,
+ create_using=DiGraph())
+
+ def test_preferential_attachment(self):
+ aseq=[3,2,1,1]
+ G=bipartite_preferential_attachment_graph(aseq,0.5)
+ assert_raises(networkx.exception.NetworkXError,
+ bipartite_preferential_attachment_graph, aseq, 0.5,
+ create_using=DiGraph())
+
+ def test_bipartite_random_graph(self):
+ n=10
+ m=20
+ G=bipartite_random_graph(n,m,0.9)
+ assert_equal(len(G),30)
+ assert_true(is_bipartite(G))
+ X,Y=nx.algorithms.bipartite.sets(G)
+ assert_equal(set(range(n)),X)
+ assert_equal(set(range(n,n+m)),Y)
+
+ def test_directed_bipartite_random_graph(self):
+ n=10
+ m=20
+ G=bipartite_random_graph(n,m,0.9,directed=True)
+ assert_equal(len(G),30)
+ assert_true(is_bipartite(G))
+ X,Y=nx.algorithms.bipartite.sets(G)
+ assert_equal(set(range(n)),X)
+ assert_equal(set(range(n,n+m)),Y)
+
+ def test_bipartite_gnmk_random_graph(self):
+ n = 10
+ m = 20
+ edges = 100
+ G = bipartite_gnmk_random_graph(n, m, edges)
+ assert_equal(len(G),30)
+ assert_true(is_bipartite(G))
+ X,Y=nx.algorithms.bipartite.sets(G)
+ print(X)
+ assert_equal(set(range(n)),X)
+ assert_equal(set(range(n,n+m)),Y)
+ assert_equal(edges, len(G.edges()))
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_classic.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_classic.py
new file mode 100644
index 0000000..96ca367
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_classic.py
@@ -0,0 +1,408 @@
+#!/usr/bin/env python
+"""
+====================
+Generators - Classic
+====================
+
+Unit tests for various classic graph generators in generators/classic.py
+"""
+from nose.tools import *
+from networkx import *
+from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic
+is_isomorphic=graph_could_be_isomorphic
+
+class TestGeneratorClassic():
+ def test_balanced_tree(self):
+ # balanced_tree(r,h) is a tree with (r**(h+1)-1)/(r-1) edges
+ for r,h in [(2,2),(3,3),(6,2)]:
+ t=balanced_tree(r,h)
+ order=t.order()
+ assert_true(order==(r**(h+1)-1)/(r-1))
+ assert_true(is_connected(t))
+ assert_true(t.size()==order-1)
+ dh = degree_histogram(t)
+ assert_equal(dh[0],0) # no nodes of 0
+ assert_equal(dh[1],r**h) # nodes of degree 1 are leaves
+ assert_equal(dh[r],1) # root is degree r
+ assert_equal(dh[r+1],order-r**h-1)# everyone else is degree r+1
+ assert_equal(len(dh),r+2)
+
+ def test_balanced_tree_star(self):
+ # balanced_tree(r,1) is the r-star
+ t=balanced_tree(r=2,h=1)
+ assert_true(is_isomorphic(t,star_graph(2)))
+ t=balanced_tree(r=5,h=1)
+ assert_true(is_isomorphic(t,star_graph(5)))
+ t=balanced_tree(r=10,h=1)
+ assert_true(is_isomorphic(t,star_graph(10)))
+
+ def test_full_rary_tree(self):
+ r=2
+ n=9
+ t=full_rary_tree(r,n)
+ assert_equal(t.order(),n)
+ assert_true(is_connected(t))
+ dh = degree_histogram(t)
+ assert_equal(dh[0],0) # no nodes of 0
+ assert_equal(dh[1],5) # nodes of degree 1 are leaves
+ assert_equal(dh[r],1) # root is degree r
+ assert_equal(dh[r+1],9-5-1) # everyone else is degree r+1
+ assert_equal(len(dh),r+2)
+
+ def test_full_rary_tree_balanced(self):
+ t=full_rary_tree(2,15)
+ th=balanced_tree(2,3)
+ assert_true(is_isomorphic(t,th))
+
+ def test_full_rary_tree_path(self):
+ t=full_rary_tree(1,10)
+ assert_true(is_isomorphic(t,path_graph(10)))
+
+ def test_full_rary_tree_empty(self):
+ t=full_rary_tree(0,10)
+ assert_true(is_isomorphic(t,empty_graph(10)))
+ t=full_rary_tree(3,0)
+ assert_true(is_isomorphic(t,empty_graph(0)))
+
+ def test_full_rary_tree_3_20(self):
+ t=full_rary_tree(3,20)
+ assert_equal(t.order(),20)
+
+ def test_barbell_graph(self):
+ # number of nodes = 2*m1 + m2 (2 m1-complete graphs + m2-path + 2 edges)
+ # number of edges = 2*(number_of_edges(m1-complete graph) + m2 + 1
+ m1=3; m2=5
+ b=barbell_graph(m1,m2)
+ assert_true(number_of_nodes(b)==2*m1+m2)
+ assert_true(number_of_edges(b)==m1*(m1-1) + m2 + 1)
+ assert_equal(b.name, 'barbell_graph(3,5)')
+
+ m1=4; m2=10
+ b=barbell_graph(m1,m2)
+ assert_true(number_of_nodes(b)==2*m1+m2)
+ assert_true(number_of_edges(b)==m1*(m1-1) + m2 + 1)
+ assert_equal(b.name, 'barbell_graph(4,10)')
+
+ m1=3; m2=20
+ b=barbell_graph(m1,m2)
+ assert_true(number_of_nodes(b)==2*m1+m2)
+ assert_true(number_of_edges(b)==m1*(m1-1) + m2 + 1)
+ assert_equal(b.name, 'barbell_graph(3,20)')
+
+ # Raise NetworkXError if m1<2
+ m1=1; m2=20
+ assert_raises(networkx.exception.NetworkXError, barbell_graph, m1, m2)
+
+ # Raise NetworkXError if m2<0
+ m1=5; m2=-2
+ assert_raises(networkx.exception.NetworkXError, barbell_graph, m1, m2)
+
+ # barbell_graph(2,m) = path_graph(m+4)
+ m1=2; m2=5
+ b=barbell_graph(m1,m2)
+ assert_true(is_isomorphic(b, path_graph(m2+4)))
+
+ m1=2; m2=10
+ b=barbell_graph(m1,m2)
+ assert_true(is_isomorphic(b, path_graph(m2+4)))
+
+ m1=2; m2=20
+ b=barbell_graph(m1,m2)
+ assert_true(is_isomorphic(b, path_graph(m2+4)))
+
+ assert_raises(networkx.exception.NetworkXError, barbell_graph, m1, m2,
+ create_using=DiGraph())
+
+ mb=barbell_graph(m1, m2, create_using=MultiGraph())
+ assert_true(mb.edges()==b.edges())
+
+ def test_complete_graph(self):
+ # complete_graph(m) is a connected graph with
+ # m nodes and m*(m+1)/2 edges
+ for m in [0, 1, 3, 5]:
+ g = complete_graph(m)
+ assert_true(number_of_nodes(g) == m)
+ assert_true(number_of_edges(g) == m * (m - 1) // 2)
+
+
+ mg=complete_graph(m, create_using=MultiGraph())
+ assert_true(mg.edges()==g.edges())
+
+ def test_complete_digraph(self):
+ # complete_graph(m) is a connected graph with
+ # m nodes and m*(m+1)/2 edges
+ for m in [0, 1, 3, 5]:
+ g = complete_graph(m,create_using=nx.DiGraph())
+ assert_true(number_of_nodes(g) == m)
+ assert_true(number_of_edges(g) == m * (m - 1))
+
+ def test_complete_bipartite_graph(self):
+ G=complete_bipartite_graph(0,0)
+ assert_true(is_isomorphic( G, null_graph() ))
+
+ for i in [1, 5]:
+ G=complete_bipartite_graph(i,0)
+ assert_true(is_isomorphic( G, empty_graph(i) ))
+ G=complete_bipartite_graph(0,i)
+ assert_true(is_isomorphic( G, empty_graph(i) ))
+
+ G=complete_bipartite_graph(2,2)
+ assert_true(is_isomorphic( G, cycle_graph(4) ))
+
+ G=complete_bipartite_graph(1,5)
+ assert_true(is_isomorphic( G, star_graph(5) ))
+
+ G=complete_bipartite_graph(5,1)
+ assert_true(is_isomorphic( G, star_graph(5) ))
+
+ # complete_bipartite_graph(m1,m2) is a connected graph with
+ # m1+m2 nodes and m1*m2 edges
+ for m1, m2 in [(5, 11), (7, 3)]:
+ G=complete_bipartite_graph(m1,m2)
+ assert_equal(number_of_nodes(G), m1 + m2)
+ assert_equal(number_of_edges(G), m1 * m2)
+
+ assert_raises(networkx.exception.NetworkXError,
+ complete_bipartite_graph, 7, 3, create_using=DiGraph())
+
+ mG=complete_bipartite_graph(7, 3, create_using=MultiGraph())
+ assert_equal(mG.edges(), G.edges())
+
+ def test_circular_ladder_graph(self):
+ G=circular_ladder_graph(5)
+ assert_raises(networkx.exception.NetworkXError, circular_ladder_graph,
+ 5, create_using=DiGraph())
+ mG=circular_ladder_graph(5, create_using=MultiGraph())
+ assert_equal(mG.edges(), G.edges())
+
+ def test_cycle_graph(self):
+ G=cycle_graph(4)
+ assert_equal(sorted(G.edges()), [(0, 1), (0, 3), (1, 2), (2, 3)])
+ mG=cycle_graph(4, create_using=MultiGraph())
+ assert_equal(sorted(mG.edges()), [(0, 1), (0, 3), (1, 2), (2, 3)])
+ G=cycle_graph(4, create_using=DiGraph())
+ assert_false(G.has_edge(2,1))
+ assert_true(G.has_edge(1,2))
+
+ def test_dorogovtsev_goltsev_mendes_graph(self):
+ G=dorogovtsev_goltsev_mendes_graph(0)
+ assert_equal(G.edges(), [(0, 1)])
+ assert_equal(G.nodes(), [0, 1])
+ G=dorogovtsev_goltsev_mendes_graph(1)
+ assert_equal(G.edges(), [(0, 1), (0, 2), (1, 2)])
+ assert_equal(average_clustering(G), 1.0)
+ assert_equal(list(triangles(G).values()), [1, 1, 1])
+ G=dorogovtsev_goltsev_mendes_graph(10)
+ assert_equal(number_of_nodes(G), 29526)
+ assert_equal(number_of_edges(G), 59049)
+ assert_equal(G.degree(0), 1024)
+ assert_equal(G.degree(1), 1024)
+ assert_equal(G.degree(2), 1024)
+
+ assert_raises(networkx.exception.NetworkXError,
+ dorogovtsev_goltsev_mendes_graph, 7,
+ create_using=DiGraph())
+ assert_raises(networkx.exception.NetworkXError,
+ dorogovtsev_goltsev_mendes_graph, 7,
+ create_using=MultiGraph())
+
+ def test_empty_graph(self):
+ G=empty_graph()
+ assert_equal(number_of_nodes(G), 0)
+ G=empty_graph(42)
+ assert_equal(number_of_nodes(G), 42)
+ assert_equal(number_of_edges(G), 0)
+ assert_equal(G.name, 'empty_graph(42)')
+
+ # create empty digraph
+ G=empty_graph(42,create_using=DiGraph(name="duh"))
+ assert_equal(number_of_nodes(G), 42)
+ assert_equal(number_of_edges(G), 0)
+ assert_equal(G.name, 'empty_graph(42)')
+ assert_true(isinstance(G,DiGraph))
+
+ # create empty multigraph
+ G=empty_graph(42,create_using=MultiGraph(name="duh"))
+ assert_equal(number_of_nodes(G), 42)
+ assert_equal(number_of_edges(G), 0)
+ assert_equal(G.name, 'empty_graph(42)')
+ assert_true(isinstance(G,MultiGraph))
+
+ # create empty graph from another
+ pete=petersen_graph()
+ G=empty_graph(42,create_using=pete)
+ assert_equal(number_of_nodes(G), 42)
+ assert_equal(number_of_edges(G), 0)
+ assert_equal(G.name, 'empty_graph(42)')
+ assert_true(isinstance(G,Graph))
+
+ def test_grid_2d_graph(self):
+ n=5;m=6
+ G=grid_2d_graph(n,m)
+ assert_equal(number_of_nodes(G), n*m)
+ assert_equal(degree_histogram(G), [0,0,4,2*(n+m)-8,(n-2)*(m-2)])
+ DG=grid_2d_graph(n,m, create_using=DiGraph())
+ assert_equal(DG.succ, G.adj)
+ assert_equal(DG.pred, G.adj)
+ MG=grid_2d_graph(n,m, create_using=MultiGraph())
+ assert_equal(MG.edges(), G.edges())
+
+ def test_grid_graph(self):
+ """grid_graph([n,m]) is a connected simple graph with the
+ following properties:
+ number_of_nodes=n*m
+ degree_histogram=[0,0,4,2*(n+m)-8,(n-2)*(m-2)]
+ """
+ for n, m in [(3, 5), (5, 3), (4, 5), (5, 4)]:
+ dim=[n,m]
+ g=grid_graph(dim)
+ assert_equal(number_of_nodes(g), n*m)
+ assert_equal(degree_histogram(g), [0,0,4,2*(n+m)-8,(n-2)*(m-2)])
+ assert_equal(dim,[n,m])
+
+ for n, m in [(1, 5), (5, 1)]:
+ dim=[n,m]
+ g=grid_graph(dim)
+ assert_equal(number_of_nodes(g), n*m)
+ assert_true(is_isomorphic(g,path_graph(5)))
+ assert_equal(dim,[n,m])
+
+# mg=grid_graph([n,m], create_using=MultiGraph())
+# assert_equal(mg.edges(), g.edges())
+
+ def test_hypercube_graph(self):
+ for n, G in [(0, null_graph()), (1, path_graph(2)),
+ (2, cycle_graph(4)), (3, cubical_graph())]:
+ g=hypercube_graph(n)
+ assert_true(is_isomorphic(g, G))
+
+ g=hypercube_graph(4)
+ assert_equal(degree_histogram(g), [0, 0, 0, 0, 16])
+ g=hypercube_graph(5)
+ assert_equal(degree_histogram(g), [0, 0, 0, 0, 0, 32])
+ g=hypercube_graph(6)
+ assert_equal(degree_histogram(g), [0, 0, 0, 0, 0, 0, 64])
+
+# mg=hypercube_graph(6, create_using=MultiGraph())
+# assert_equal(mg.edges(), g.edges())
+
+ def test_ladder_graph(self):
+ for i, G in [(0, empty_graph(0)), (1, path_graph(2)),
+ (2, hypercube_graph(2)), (10, grid_graph([2,10]))]:
+ assert_true(is_isomorphic(ladder_graph(i), G))
+
+ assert_raises(networkx.exception.NetworkXError,
+ ladder_graph, 2, create_using=DiGraph())
+
+ g = ladder_graph(2)
+ mg=ladder_graph(2, create_using=MultiGraph())
+ assert_equal(mg.edges(), g.edges())
+
+ def test_lollipop_graph(self):
+ # number of nodes = m1 + m2
+ # number of edges = number_of_edges(complete_graph(m1)) + m2
+ for m1, m2 in [(3, 5), (4, 10), (3, 20)]:
+ b=lollipop_graph(m1,m2)
+ assert_equal(number_of_nodes(b), m1+m2)
+ assert_equal(number_of_edges(b), m1*(m1-1)/2 + m2)
+ assert_equal(b.name,
+ 'lollipop_graph(' + str(m1) + ',' + str(m2) + ')')
+
+ # Raise NetworkXError if m<2
+ assert_raises(networkx.exception.NetworkXError,
+ lollipop_graph, 1, 20)
+
+ # Raise NetworkXError if n<0
+ assert_raises(networkx.exception.NetworkXError,
+ lollipop_graph, 5, -2)
+
+ # lollipop_graph(2,m) = path_graph(m+2)
+ for m1, m2 in [(2, 5), (2, 10), (2, 20)]:
+ b=lollipop_graph(m1,m2)
+ assert_true(is_isomorphic(b, path_graph(m2+2)))
+
+ assert_raises(networkx.exception.NetworkXError,
+ lollipop_graph, m1, m2, create_using=DiGraph())
+
+ mb=lollipop_graph(m1, m2, create_using=MultiGraph())
+ assert_true(mb.edges(), b.edges())
+
+ def test_null_graph(self):
+ assert_equal(number_of_nodes(null_graph()), 0)
+
+ def test_path_graph(self):
+ p=path_graph(0)
+ assert_true(is_isomorphic(p, null_graph()))
+ assert_equal(p.name, 'path_graph(0)')
+
+ p=path_graph(1)
+ assert_true(is_isomorphic( p, empty_graph(1)))
+ assert_equal(p.name, 'path_graph(1)')
+
+ p=path_graph(10)
+ assert_true(is_connected(p))
+ assert_equal(sorted(list(p.degree().values())),
+ [1, 1, 2, 2, 2, 2, 2, 2, 2, 2])
+ assert_equal(p.order()-1, p.size())
+
+ dp=path_graph(3, create_using=DiGraph())
+ assert_true(dp.has_edge(0,1))
+ assert_false(dp.has_edge(1,0))
+
+ mp=path_graph(10, create_using=MultiGraph())
+ assert_true(mp.edges()==p.edges())
+
+ def test_periodic_grid_2d_graph(self):
+ g=grid_2d_graph(0,0, periodic=True)
+ assert_equal(g.degree(), {})
+
+ for m, n, G in [(2, 2, cycle_graph(4)), (1, 7, cycle_graph(7)),
+ (7, 1, cycle_graph(7)), (2, 5, circular_ladder_graph(5)),
+ (5, 2, circular_ladder_graph(5)), (2, 4, cubical_graph()),
+ (4, 2, cubical_graph())]:
+ g=grid_2d_graph(m,n, periodic=True)
+ assert_true(is_isomorphic(g, G))
+
+ DG=grid_2d_graph(4, 2, periodic=True, create_using=DiGraph())
+ assert_equal(DG.succ,g.adj)
+ assert_equal(DG.pred,g.adj)
+ MG=grid_2d_graph(4, 2, periodic=True, create_using=MultiGraph())
+ assert_equal(MG.edges(),g.edges())
+
+ def test_star_graph(self):
+ assert_true(is_isomorphic(star_graph(0), empty_graph(1)))
+ assert_true(is_isomorphic(star_graph(1), path_graph(2)))
+ assert_true(is_isomorphic(star_graph(2), path_graph(3)))
+
+ s=star_graph(10)
+ assert_equal(sorted(list(s.degree().values())),
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 10])
+
+ assert_raises(networkx.exception.NetworkXError,
+ star_graph, 10, create_using=DiGraph())
+
+ ms=star_graph(10, create_using=MultiGraph())
+ assert_true(ms.edges()==s.edges())
+
+ def test_trivial_graph(self):
+ assert_equal(number_of_nodes(trivial_graph()), 1)
+
+ def test_wheel_graph(self):
+ for n, G in [(0, null_graph()), (1, empty_graph(1)),
+ (2, path_graph(2)), (3, complete_graph(3)),
+ (4, complete_graph(4))]:
+ g=wheel_graph(n)
+ assert_true(is_isomorphic( g, G))
+
+ assert_equal(g.name, 'wheel_graph(4)')
+
+ g=wheel_graph(10)
+ assert_equal(sorted(list(g.degree().values())),
+ [3, 3, 3, 3, 3, 3, 3, 3, 3, 9])
+
+ assert_raises(networkx.exception.NetworkXError,
+ wheel_graph, 10, create_using=DiGraph())
+
+ mg=wheel_graph(10, create_using=MultiGraph())
+ assert_equal(mg.edges(), g.edges())
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_degree_seq.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_degree_seq.py
new file mode 100644
index 0000000..0d09855
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_degree_seq.py
@@ -0,0 +1,169 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx
+from networkx import *
+from networkx.generators.degree_seq import *
+from networkx.utils import uniform_sequence,powerlaw_sequence
+
+def test_configuration_model_empty():
+ # empty graph has empty degree sequence
+ deg_seq=[]
+ G=configuration_model(deg_seq)
+ assert_equal(G.degree(), {})
+
+def test_configuration_model():
+ deg_seq=[5,3,3,3,3,2,2,2,1,1,1]
+ G=configuration_model(deg_seq,seed=12345678)
+ assert_equal(sorted(G.degree().values(),reverse=True),
+ [5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1])
+ assert_equal(sorted(G.degree(range(len(deg_seq))).values(),
+ reverse=True),
+ [5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1])
+
+ # test that fixed seed delivers the same graph
+ deg_seq=[3,3,3,3,3,3,3,3,3,3,3,3]
+ G1=configuration_model(deg_seq,seed=1000)
+ G2=configuration_model(deg_seq,seed=1000)
+ assert_true(is_isomorphic(G1,G2))
+ G1=configuration_model(deg_seq,seed=10)
+ G2=configuration_model(deg_seq,seed=10)
+ assert_true(is_isomorphic(G1,G2))
+
+@raises(NetworkXError)
+def test_configuation_raise():
+ z=[5,3,3,3,3,2,2,2,1,1,1]
+ G = configuration_model(z, create_using=DiGraph())
+
+@raises(NetworkXError)
+def test_configuation_raise_odd():
+ z=[5,3,3,3,3,2,2,2,1,1]
+ G = configuration_model(z, create_using=DiGraph())
+
+@raises(NetworkXError)
+def test_directed_configuation_raise_unequal():
+ zin = [5,3,3,3,3,2,2,2,1,1]
+ zout = [5,3,3,3,3,2,2,2,1,2]
+ G = directed_configuration_model(zin, zout)
+
+def test_directed_configuation_mode():
+ G = directed_configuration_model([],[],seed=0)
+ assert_equal(len(G),0)
+
+
+def test_expected_degree_graph_empty():
+ # empty graph has empty degree sequence
+ deg_seq=[]
+ G=expected_degree_graph(deg_seq)
+ assert_equal(G.degree(), {})
+
+
+def test_expected_degree_graph():
+ # test that fixed seed delivers the same graph
+ deg_seq=[3,3,3,3,3,3,3,3,3,3,3,3]
+ G1=expected_degree_graph(deg_seq,seed=1000)
+ G2=expected_degree_graph(deg_seq,seed=1000)
+ assert_true(is_isomorphic(G1,G2))
+
+ G1=expected_degree_graph(deg_seq,seed=10)
+ G2=expected_degree_graph(deg_seq,seed=10)
+ assert_true(is_isomorphic(G1,G2))
+
+
+def test_expected_degree_graph_selfloops():
+ deg_seq=[3,3,3,3,3,3,3,3,3,3,3,3]
+ G1=expected_degree_graph(deg_seq,seed=1000, selfloops=False)
+ G2=expected_degree_graph(deg_seq,seed=1000, selfloops=False)
+ assert_true(is_isomorphic(G1,G2))
+
+def test_expected_degree_graph_skew():
+ deg_seq=[10,2,2,2,2]
+ G1=expected_degree_graph(deg_seq,seed=1000)
+ G2=expected_degree_graph(deg_seq,seed=1000)
+ assert_true(is_isomorphic(G1,G2))
+
+
+def test_havel_hakimi_construction():
+ G = havel_hakimi_graph([])
+ assert_equal(len(G),0)
+
+ z=[1000,3,3,3,3,2,2,2,1,1,1]
+ assert_raises(networkx.exception.NetworkXError,
+ havel_hakimi_graph, z)
+ z=["A",3,3,3,3,2,2,2,1,1,1]
+ assert_raises(networkx.exception.NetworkXError,
+ havel_hakimi_graph, z)
+
+ z=[5,4,3,3,3,2,2,2]
+ G=havel_hakimi_graph(z)
+ G=configuration_model(z)
+ z=[6,5,4,4,2,1,1,1]
+ assert_raises(networkx.exception.NetworkXError,
+ havel_hakimi_graph, z)
+
+ z=[10,3,3,3,3,2,2,2,2,2,2]
+
+ G=havel_hakimi_graph(z)
+
+ assert_raises(networkx.exception.NetworkXError,
+ havel_hakimi_graph, z, create_using=DiGraph())
+
+def test_directed_havel_hakimi():
+ # Test range of valid directed degree sequences
+ n, r = 100, 10
+ p = 1.0 / r
+ for i in range(r):
+ G1 = nx.erdos_renyi_graph(n,p*(i+1),None,True)
+ din = list(G1.in_degree().values())
+ dout = list(G1.out_degree().values())
+ G2 = nx.directed_havel_hakimi_graph(din, dout)
+ assert_true(din == list(G2.in_degree().values()))
+ assert_true(dout == list(G2.out_degree().values()))
+
+ # Test non-graphical sequence
+ dout = [1000,3,3,3,3,2,2,2,1,1,1]
+ din=[103,102,102,102,102,102,102,102,102,102]
+ assert_raises(nx.exception.NetworkXError,
+ nx.directed_havel_hakimi_graph, din, dout)
+ # Test valid sequences
+ dout=[1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
+ din=[2, 2, 2, 2, 2, 2, 2, 2, 0, 2]
+ G2 = nx.directed_havel_hakimi_graph(din, dout)
+ assert_true(din == list(G2.in_degree().values()))
+ assert_true(dout == list(G2.out_degree().values()))
+ # Test unequal sums
+ din=[2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
+ assert_raises(nx.exception.NetworkXError,
+ nx.directed_havel_hakimi_graph, din, dout)
+ # Test for negative values
+ din=[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -2]
+ assert_raises(nx.exception.NetworkXError,
+ nx.directed_havel_hakimi_graph, din, dout)
+
+def test_degree_sequence_tree():
+ z=[1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
+ G=degree_sequence_tree(z)
+ assert_true(len(G.nodes())==len(z))
+ assert_true(len(G.edges())==sum(z)/2)
+
+ assert_raises(networkx.exception.NetworkXError,
+ degree_sequence_tree, z, create_using=DiGraph())
+
+ z=[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
+ assert_raises(networkx.exception.NetworkXError,
+ degree_sequence_tree, z)
+
+def test_random_degree_sequence_graph():
+ d=[1,2,2,3]
+ G = nx.random_degree_sequence_graph(d)
+ assert_equal(d, list(G.degree().values()))
+
+def test_random_degree_sequence_graph_raise():
+ z=[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
+ assert_raises(networkx.exception.NetworkXUnfeasible,
+ random_degree_sequence_graph, z)
+
+def test_random_degree_sequence_large():
+ G = nx.fast_gnp_random_graph(100,0.1)
+ d = G.degree().values()
+ G = nx.random_degree_sequence_graph(d, seed=0)
+ assert_equal(sorted(d), sorted(list(G.degree().values())))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_directed.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_directed.py
new file mode 100644
index 0000000..45b9ab1
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_directed.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+
+"""Generators - Directed Graphs
+----------------------------
+"""
+
+from nose.tools import *
+from networkx import *
+from networkx.generators.directed import *
+
+class TestGeneratorsDirected():
+ def test_smoke_test_random_graphs(self):
+ G=gn_graph(100)
+ G=gnr_graph(100,0.5)
+ G=gnc_graph(100)
+ G=scale_free_graph(100)
+
+ def test_create_using_keyword_arguments(self):
+ assert_raises(networkx.exception.NetworkXError,
+ gn_graph, 100, create_using=Graph())
+ assert_raises(networkx.exception.NetworkXError,
+ gnr_graph, 100, 0.5, create_using=Graph())
+ assert_raises(networkx.exception.NetworkXError,
+ gnc_graph, 100, create_using=Graph())
+ assert_raises(networkx.exception.NetworkXError,
+ scale_free_graph, 100, create_using=Graph())
+ G=gn_graph(100,seed=1)
+ MG=gn_graph(100,create_using=MultiDiGraph(),seed=1)
+ assert_equal(G.edges(), MG.edges())
+ G=gnr_graph(100,0.5,seed=1)
+ MG=gnr_graph(100,0.5,create_using=MultiDiGraph(),seed=1)
+ assert_equal(G.edges(), MG.edges())
+ G=gnc_graph(100,seed=1)
+ MG=gnc_graph(100,create_using=MultiDiGraph(),seed=1)
+ assert_equal(G.edges(), MG.edges())
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_ego.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_ego.py
new file mode 100644
index 0000000..da15b60
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_ego.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+"""
+ego graph
+---------
+"""
+
+from nose.tools import assert_true, assert_equal
+import networkx as nx
+
+class TestGeneratorEgo():
+ def test_ego(self):
+ G=nx.star_graph(3)
+ H=nx.ego_graph(G,0)
+ assert_true(nx.is_isomorphic(G,H))
+ G.add_edge(1,11)
+ G.add_edge(2,22)
+ G.add_edge(3,33)
+ H=nx.ego_graph(G,0)
+ assert_true(nx.is_isomorphic(nx.star_graph(3),H))
+ G=nx.path_graph(3)
+ H=nx.ego_graph(G,0)
+ assert_equal(H.edges(), [(0, 1)])
+ H=nx.ego_graph(G,0,undirected=True)
+ assert_equal(H.edges(), [(0, 1)])
+ H=nx.ego_graph(G,0,center=False)
+ assert_equal(H.edges(), [])
+
+
+ def test_ego_distance(self):
+ G=nx.Graph()
+ G.add_edge(0,1,weight=2,distance=1)
+ G.add_edge(1,2,weight=2,distance=2)
+ G.add_edge(2,3,weight=2,distance=1)
+ assert_equal(sorted(nx.ego_graph(G,0,radius=3).nodes()),[0,1,2,3])
+ eg=nx.ego_graph(G,0,radius=3,distance='weight')
+ assert_equal(sorted(eg.nodes()),[0,1])
+ eg=nx.ego_graph(G,0,radius=3,distance='weight',undirected=True)
+ assert_equal(sorted(eg.nodes()),[0,1])
+ eg=nx.ego_graph(G,0,radius=3,distance='distance')
+ assert_equal(sorted(eg.nodes()),[0,1,2])
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_geometric.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_geometric.py
new file mode 100644
index 0000000..5e29e25
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_geometric.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+class TestGeneratorsGeometric():
+ def test_random_geometric_graph(self):
+ G=nx.random_geometric_graph(50,0.25)
+ assert_equal(len(G),50)
+
+ def test_geographical_threshold_graph(self):
+ G=nx.geographical_threshold_graph(50,100)
+ assert_equal(len(G),50)
+
+ def test_waxman_graph(self):
+ G=nx.waxman_graph(50,0.5,0.1)
+ assert_equal(len(G),50)
+ G=nx.waxman_graph(50,0.5,0.1,L=1)
+ assert_equal(len(G),50)
+
+ def test_naviable_small_world(self):
+ G = nx.navigable_small_world_graph(5,p=1,q=0)
+ gg = nx.grid_2d_graph(5,5).to_directed()
+ assert_true(nx.is_isomorphic(G,gg))
+
+ G = nx.navigable_small_world_graph(5,p=1,q=0,dim=3)
+ gg = nx.grid_graph([5,5,5]).to_directed()
+ assert_true(nx.is_isomorphic(G,gg))
+
+ G = nx.navigable_small_world_graph(5,p=1,q=0,dim=1)
+ gg = nx.grid_graph([5]).to_directed()
+ assert_true(nx.is_isomorphic(G,gg))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_hybrid.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_hybrid.py
new file mode 100644
index 0000000..88c7505
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_hybrid.py
@@ -0,0 +1,24 @@
+from nose.tools import *
+import networkx as nx
+
+def test_2d_grid_graph():
+ # FC article claims 2d grid graph of size n is (3,3)-connected
+ # and (5,9)-connected, but I don't think it is (5,9)-connected
+ G=nx.grid_2d_graph(8,8,periodic=True)
+ assert_true(nx.is_kl_connected(G,3,3))
+ assert_false(nx.is_kl_connected(G,5,9))
+ (H,graphOK)=nx.kl_connected_subgraph(G,5,9,same_as_graph=True)
+ assert_false(graphOK)
+
+def test_small_graph():
+ G=nx.Graph()
+ G.add_edge(1,2)
+ G.add_edge(1,3)
+ G.add_edge(2,3)
+ assert_true(nx.is_kl_connected(G,2,2))
+ H=nx.kl_connected_subgraph(G,2,2)
+ (H,graphOK)=nx.kl_connected_subgraph(G,2,2,
+ low_memory=True,
+ same_as_graph=True)
+ assert_true(graphOK)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_intersection.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_intersection.py
new file mode 100644
index 0000000..26bbcf6
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_intersection.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+
+class TestIntersectionGraph():
+ def test_random_intersection_graph(self):
+ G=nx.uniform_random_intersection_graph(10,5,0.5)
+ assert_equal(len(G),10)
+
+ def test_k_random_intersection_graph(self):
+ G=nx.k_random_intersection_graph(10,5,2)
+ assert_equal(len(G),10)
+
+ def test_general_random_intersection_graph(self):
+ G=nx.general_random_intersection_graph(10,5,[0.1,0.2,0.2,0.1,0.1])
+ assert_equal(len(G),10)
+ assert_raises(ValueError, nx.general_random_intersection_graph,10,5,
+ [0.1,0.2,0.2,0.1])
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_line.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_line.py
new file mode 100644
index 0000000..35b71be
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_line.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+"""line graph
+----------
+"""
+
+import networkx as nx
+from nose.tools import *
+
+
+class TestGeneratorLine():
+ def test_line(self):
+ G=nx.star_graph(5)
+ L=nx.line_graph(G)
+ assert_true(nx.is_isomorphic(L,nx.complete_graph(5)))
+ G=nx.path_graph(5)
+ L=nx.line_graph(G)
+ assert_true(nx.is_isomorphic(L,nx.path_graph(4)))
+ G=nx.cycle_graph(5)
+ L=nx.line_graph(G)
+ assert_true(nx.is_isomorphic(L,G))
+ G=nx.DiGraph()
+ G.add_edges_from([(0,1),(0,2),(0,3)])
+ L=nx.line_graph(G)
+ assert_equal(L.adj, {})
+ G=nx.DiGraph()
+ G.add_edges_from([(0,1),(1,2),(2,3)])
+ L=nx.line_graph(G)
+ assert_equal(sorted(L.edges()), [((0, 1), (1, 2)), ((1, 2), (2, 3))])
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_random_clustered.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_random_clustered.py
new file mode 100644
index 0000000..f052f2c
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_random_clustered.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx
+
+class TestRandomClusteredGraph:
+
+ def test_valid(self):
+ node=[1,1,1,2,1,2,0,0]
+ tri=[0,0,0,0,0,1,1,1]
+ joint_degree_sequence=zip(node,tri)
+ G = networkx.random_clustered_graph(joint_degree_sequence)
+ assert_equal(G.number_of_nodes(),8)
+ assert_equal(G.number_of_edges(),7)
+
+ def test_valid2(self):
+ G = networkx.random_clustered_graph(\
+ [(1,2),(2,1),(1,1),(1,1),(1,1),(2,0)])
+ assert_equal(G.number_of_nodes(),6)
+ assert_equal(G.number_of_edges(),10)
+
+ def test_invalid1(self):
+ assert_raises((TypeError,networkx.NetworkXError),
+ networkx.random_clustered_graph,[[1,1],[2,1],[0,1]])
+
+ def test_invalid2(self):
+ assert_raises((TypeError,networkx.NetworkXError),
+ networkx.random_clustered_graph,[[1,1],[1,2],[0,1]])
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_random_graphs.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_random_graphs.py
new file mode 100644
index 0000000..c49d74d
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_random_graphs.py
@@ -0,0 +1,129 @@
+#!/usr/bin/env python
+from nose.tools import *
+from networkx import *
+from networkx.generators.random_graphs import *
+
+class TestGeneratorsRandom():
+ def smoke_test_random_graph(self):
+ seed = 42
+ G=gnp_random_graph(100,0.25,seed)
+ G=binomial_graph(100,0.25,seed)
+ G=erdos_renyi_graph(100,0.25,seed)
+ G=fast_gnp_random_graph(100,0.25,seed)
+ G=gnm_random_graph(100,20,seed)
+ G=dense_gnm_random_graph(100,20,seed)
+
+ G=watts_strogatz_graph(10,2,0.25,seed)
+ assert_equal(len(G), 10)
+ assert_equal(G.number_of_edges(), 10)
+
+ G=connected_watts_strogatz_graph(10,2,0.1,seed)
+ assert_equal(len(G), 10)
+ assert_equal(G.number_of_edges(), 10)
+
+ G=watts_strogatz_graph(10,4,0.25,seed)
+ assert_equal(len(G), 10)
+ assert_equal(G.number_of_edges(), 20)
+
+ G=newman_watts_strogatz_graph(10,2,0.0,seed)
+ assert_equal(len(G), 10)
+ assert_equal(G.number_of_edges(), 10)
+
+ G=newman_watts_strogatz_graph(10,4,0.25,seed)
+ assert_equal(len(G), 10)
+ assert_true(G.number_of_edges() >= 20)
+
+ G=barabasi_albert_graph(100,1,seed)
+ G=barabasi_albert_graph(100,3,seed)
+ assert_equal(G.number_of_edges(),(97*3))
+
+ G=powerlaw_cluster_graph(100,1,1.0,seed)
+ G=powerlaw_cluster_graph(100,3,0.0,seed)
+ assert_equal(G.number_of_edges(),(97*3))
+
+ G=random_regular_graph(10,20,seed)
+
+ assert_raises(networkx.exception.NetworkXError,
+ random_regular_graph, 3, 21)
+
+ constructor=[(10,20,0.8),(20,40,0.8)]
+ G=random_shell_graph(constructor,seed)
+
+ G=nx.random_lobster(10,0.1,0.5,seed)
+
+ def test_gnp(self):
+ G=gnp_random_graph(10,0.1)
+ assert_equal(len(G),10)
+
+ G=gnp_random_graph(10,0.1,seed=42)
+ assert_equal(len(G),10)
+
+ G=gnp_random_graph(10,1.1)
+ assert_equal(len(G),10)
+ assert_equal(len(G.edges()),45)
+
+ G=gnp_random_graph(10,1.1,directed=True)
+ assert_equal(len(G),10)
+ assert_equal(len(G.edges()),90)
+
+ G=gnp_random_graph(10,-1.1)
+ assert_equal(len(G),10)
+ assert_equal(len(G.edges()),0)
+
+ G=binomial_graph(10,0.1)
+ assert_equal(len(G),10)
+
+ G=erdos_renyi_graph(10,0.1)
+ assert_equal(len(G),10)
+
+
+ def test_fast_gnp(self):
+ G=fast_gnp_random_graph(10,0.1)
+ assert_equal(len(G),10)
+
+ G=fast_gnp_random_graph(10,0.1,seed=42)
+ assert_equal(len(G),10)
+
+ G=fast_gnp_random_graph(10,1.1)
+ assert_equal(len(G),10)
+ assert_equal(len(G.edges()),45)
+
+ G=fast_gnp_random_graph(10,-1.1)
+ assert_equal(len(G),10)
+ assert_equal(len(G.edges()),0)
+
+ G=fast_gnp_random_graph(10,0.1,directed=True)
+ assert_true(G.is_directed())
+ assert_equal(len(G),10)
+
+
+ def test_gnm(self):
+ G=gnm_random_graph(10,3)
+ assert_equal(len(G),10)
+ assert_equal(len(G.edges()),3)
+
+ G=gnm_random_graph(10,3,seed=42)
+ assert_equal(len(G),10)
+ assert_equal(len(G.edges()),3)
+
+ G=gnm_random_graph(10,100)
+ assert_equal(len(G),10)
+ assert_equal(len(G.edges()),45)
+
+ G=gnm_random_graph(10,100,directed=True)
+ assert_equal(len(G),10)
+ assert_equal(len(G.edges()),90)
+
+ G=gnm_random_graph(10,-1.1)
+ assert_equal(len(G),10)
+ assert_equal(len(G.edges()),0)
+
+ def test_watts_strogatz_big_k(self):
+ assert_raises(networkx.exception.NetworkXError,
+ watts_strogatz_graph, 10, 10, 0.25)
+ assert_raises(networkx.exception.NetworkXError,
+ newman_watts_strogatz_graph, 10, 10, 0.25)
+ # could create an infinite loop, now doesn't
+ # infinite loop used to occur when a node has degree n-1 and needs to rewire
+ watts_strogatz_graph(10, 9, 0.25, seed=0)
+ newman_watts_strogatz_graph(10, 9, 0.5, seed=0)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_small.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_small.py
new file mode 100644
index 0000000..5228d3a
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_small.py
@@ -0,0 +1,181 @@
+#!/usr/bin/env python
+
+from nose.tools import *
+from networkx import *
+from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic
+is_isomorphic=graph_could_be_isomorphic
+
+"""Generators - Small
+=====================
+
+Some small graphs
+"""
+
+null=null_graph()
+
+class TestGeneratorsSmall():
+ def test_make_small_graph(self):
+ d=["adjacencylist","Bull Graph",5,[[2,3],[1,3,4],[1,2,5],[2],[3]]]
+ G=make_small_graph(d)
+ assert_true(is_isomorphic(G, bull_graph()))
+
+ def test__LCF_graph(self):
+ # If n<=0, then return the null_graph
+ G=LCF_graph(-10,[1,2],100)
+ assert_true(is_isomorphic(G,null))
+ G=LCF_graph(0,[1,2],3)
+ assert_true(is_isomorphic(G,null))
+ G=LCF_graph(0,[1,2],10)
+ assert_true(is_isomorphic(G,null))
+
+ # Test that LCF(n,[],0) == cycle_graph(n)
+ for a, b, c in [(5, [], 0), (10, [], 0), (5, [], 1), (10, [], 10)]:
+ G=LCF_graph(a, b, c)
+ assert_true(is_isomorphic(G,cycle_graph(a)))
+
+ # Generate the utility graph K_{3,3}
+ G=LCF_graph(6,[3,-3],3)
+ utility_graph=complete_bipartite_graph(3,3)
+ assert_true(is_isomorphic(G, utility_graph))
+
+ def test_properties_named_small_graphs(self):
+ G=bull_graph()
+ assert_equal(G.number_of_nodes(), 5)
+ assert_equal(G.number_of_edges(), 5)
+ assert_equal(sorted(G.degree().values()), [1, 1, 2, 3, 3])
+ assert_equal(diameter(G), 3)
+ assert_equal(radius(G), 2)
+
+ G=chvatal_graph()
+ assert_equal(G.number_of_nodes(), 12)
+ assert_equal(G.number_of_edges(), 24)
+ assert_equal(list(G.degree().values()), 12 * [4])
+ assert_equal(diameter(G), 2)
+ assert_equal(radius(G), 2)
+
+ G=cubical_graph()
+ assert_equal(G.number_of_nodes(), 8)
+ assert_equal(G.number_of_edges(), 12)
+ assert_equal(list(G.degree().values()), 8*[3])
+ assert_equal(diameter(G), 3)
+ assert_equal(radius(G), 3)
+
+ G=desargues_graph()
+ assert_equal(G.number_of_nodes(), 20)
+ assert_equal(G.number_of_edges(), 30)
+ assert_equal(list(G.degree().values()), 20*[3])
+
+ G=diamond_graph()
+ assert_equal(G.number_of_nodes(), 4)
+ assert_equal(sorted(G.degree().values()), [2, 2, 3, 3])
+ assert_equal(diameter(G), 2)
+ assert_equal(radius(G), 1)
+
+ G=dodecahedral_graph()
+ assert_equal(G.number_of_nodes(), 20)
+ assert_equal(G.number_of_edges(), 30)
+ assert_equal(list(G.degree().values()), 20*[3])
+ assert_equal(diameter(G), 5)
+ assert_equal(radius(G), 5)
+
+ G=frucht_graph()
+ assert_equal(G.number_of_nodes(), 12)
+ assert_equal(G.number_of_edges(), 18)
+ assert_equal(list(G.degree().values()), 12*[3])
+ assert_equal(diameter(G), 4)
+ assert_equal(radius(G), 3)
+
+ G=heawood_graph()
+ assert_equal(G.number_of_nodes(), 14)
+ assert_equal(G.number_of_edges(), 21)
+ assert_equal(list(G.degree().values()), 14*[3])
+ assert_equal(diameter(G), 3)
+ assert_equal(radius(G), 3)
+
+ G=house_graph()
+ assert_equal(G.number_of_nodes(), 5)
+ assert_equal(G.number_of_edges(), 6)
+ assert_equal(sorted(G.degree().values()), [2, 2, 2, 3, 3])
+ assert_equal(diameter(G), 2)
+ assert_equal(radius(G), 2)
+
+ G=house_x_graph()
+ assert_equal(G.number_of_nodes(), 5)
+ assert_equal(G.number_of_edges(), 8)
+ assert_equal(sorted(G.degree().values()), [2, 3, 3, 4, 4])
+ assert_equal(diameter(G), 2)
+ assert_equal(radius(G), 1)
+
+ G=icosahedral_graph()
+ assert_equal(G.number_of_nodes(), 12)
+ assert_equal(G.number_of_edges(), 30)
+ assert_equal(list(G.degree().values()),
+ [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5])
+ assert_equal(diameter(G), 3)
+ assert_equal(radius(G), 3)
+
+ G=krackhardt_kite_graph()
+ assert_equal(G.number_of_nodes(), 10)
+ assert_equal(G.number_of_edges(), 18)
+ assert_equal(sorted(G.degree().values()),
+ [1, 2, 3, 3, 3, 4, 4, 5, 5, 6])
+
+ G=moebius_kantor_graph()
+ assert_equal(G.number_of_nodes(), 16)
+ assert_equal(G.number_of_edges(), 24)
+ assert_equal(list(G.degree().values()), 16*[3])
+ assert_equal(diameter(G), 4)
+
+ G=octahedral_graph()
+ assert_equal(G.number_of_nodes(), 6)
+ assert_equal(G.number_of_edges(), 12)
+ assert_equal(list(G.degree().values()), 6*[4])
+ assert_equal(diameter(G), 2)
+ assert_equal(radius(G), 2)
+
+ G=pappus_graph()
+ assert_equal(G.number_of_nodes(), 18)
+ assert_equal(G.number_of_edges(), 27)
+ assert_equal(list(G.degree().values()), 18*[3])
+ assert_equal(diameter(G), 4)
+
+ G=petersen_graph()
+ assert_equal(G.number_of_nodes(), 10)
+ assert_equal(G.number_of_edges(), 15)
+ assert_equal(list(G.degree().values()), 10*[3])
+ assert_equal(diameter(G), 2)
+ assert_equal(radius(G), 2)
+
+ G=sedgewick_maze_graph()
+ assert_equal(G.number_of_nodes(), 8)
+ assert_equal(G.number_of_edges(), 10)
+ assert_equal(sorted(G.degree().values()), [1, 2, 2, 2, 3, 3, 3, 4])
+
+ G=tetrahedral_graph()
+ assert_equal(G.number_of_nodes(), 4)
+ assert_equal(G.number_of_edges(), 6)
+ assert_equal(list(G.degree().values()), [3, 3, 3, 3])
+ assert_equal(diameter(G), 1)
+ assert_equal(radius(G), 1)
+
+ G=truncated_cube_graph()
+ assert_equal(G.number_of_nodes(), 24)
+ assert_equal(G.number_of_edges(), 36)
+ assert_equal(list(G.degree().values()), 24*[3])
+
+ G=truncated_tetrahedron_graph()
+ assert_equal(G.number_of_nodes(), 12)
+ assert_equal(G.number_of_edges(), 18)
+ assert_equal(list(G.degree().values()), 12*[3])
+
+ G=tutte_graph()
+ assert_equal(G.number_of_nodes(), 46)
+ assert_equal(G.number_of_edges(), 69)
+ assert_equal(list(G.degree().values()), 46*[3])
+
+ # Test create_using with directed or multigraphs on small graphs
+ assert_raises(networkx.exception.NetworkXError, tutte_graph,
+ create_using=DiGraph())
+ MG=tutte_graph(create_using=MultiGraph())
+ assert_equal(MG.edges(), G.edges())
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_stochastic.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_stochastic.py
new file mode 100644
index 0000000..15c6fef
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_stochastic.py
@@ -0,0 +1,33 @@
+from nose.tools import assert_true, assert_equal, raises
+import networkx as nx
+
+def test_stochastic():
+ G=nx.DiGraph()
+ G.add_edge(0,1)
+ G.add_edge(0,2)
+ S=nx.stochastic_graph(G)
+ assert_true(nx.is_isomorphic(G,S))
+ assert_equal(sorted(S.edges(data=True)),
+ [(0, 1, {'weight': 0.5}),
+ (0, 2, {'weight': 0.5})])
+ S=nx.stochastic_graph(G,copy=True)
+ assert_equal(sorted(S.edges(data=True)),
+ [(0, 1, {'weight': 0.5}),
+ (0, 2, {'weight': 0.5})])
+
+def test_stochastic_ints():
+ G=nx.DiGraph()
+ G.add_edge(0,1,weight=1)
+ G.add_edge(0,2,weight=1)
+ S=nx.stochastic_graph(G)
+ assert_equal(sorted(S.edges(data=True)),
+ [(0, 1, {'weight': 0.5}),
+ (0, 2, {'weight': 0.5})])
+
+@raises(nx.NetworkXError)
+def test_stochastic_graph_input():
+ S = nx.stochastic_graph(nx.Graph())
+
+@raises(nx.NetworkXError)
+def test_stochastic_multigraph_input():
+ S = nx.stochastic_graph(nx.MultiGraph())
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_threshold.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_threshold.py
new file mode 100644
index 0000000..198d7cc
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/tests/test_threshold.py
@@ -0,0 +1,183 @@
+#!/usr/bin/env python
+
+"""Threshold Graphs
+================
+"""
+
+from nose.tools import *
+from nose import SkipTest
+from nose.plugins.attrib import attr
+import networkx as nx
+import networkx.generators.threshold as nxt
+from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic
+
+cnlti = nx.convert_node_labels_to_integers
+
+
+class TestGeneratorThreshold():
+ def test_threshold_sequence_graph_test(self):
+ G=nx.star_graph(10)
+ assert_true(nxt.is_threshold_graph(G))
+ assert_true(nxt.is_threshold_sequence(list(G.degree().values())))
+
+ G=nx.complete_graph(10)
+ assert_true(nxt.is_threshold_graph(G))
+ assert_true(nxt.is_threshold_sequence(list(G.degree().values())))
+
+ deg=[3,2,2,1,1,1]
+ assert_false(nxt.is_threshold_sequence(deg))
+
+ deg=[3,2,2,1]
+ assert_true(nxt.is_threshold_sequence(deg))
+
+ G=nx.generators.havel_hakimi_graph(deg)
+ assert_true(nxt.is_threshold_graph(G))
+
+ def test_creation_sequences(self):
+ deg=[3,2,2,1]
+ G=nx.generators.havel_hakimi_graph(deg)
+ cs0=nxt.creation_sequence(deg)
+ H0=nxt.threshold_graph(cs0)
+ assert_equal(''.join(cs0), 'ddid')
+
+ cs1=nxt.creation_sequence(deg, with_labels=True)
+ H1=nxt.threshold_graph(cs1)
+ assert_equal(cs1, [(1, 'd'), (2, 'd'), (3, 'i'), (0, 'd')])
+
+ cs2=nxt.creation_sequence(deg, compact=True)
+ H2=nxt.threshold_graph(cs2)
+ assert_equal(cs2, [2, 1, 1])
+ assert_equal(''.join(nxt.uncompact(cs2)), 'ddid')
+ assert_true(graph_could_be_isomorphic(H0,G))
+ assert_true(graph_could_be_isomorphic(H0,H1))
+ assert_true(graph_could_be_isomorphic(H0,H2))
+
+ def test_shortest_path(self):
+ deg=[3,2,2,1]
+ G=nx.generators.havel_hakimi_graph(deg)
+ cs1=nxt.creation_sequence(deg, with_labels=True)
+ for n, m in [(3, 0), (0, 3), (0, 2), (0, 1), (1, 3),
+ (3, 1), (1, 2), (2, 3)]:
+ assert_equal(nxt.shortest_path(cs1,n,m),
+ nx.shortest_path(G, n, m))
+
+ spl=nxt.shortest_path_length(cs1,3)
+ spl2=nxt.shortest_path_length([ t for v,t in cs1],2)
+ assert_equal(spl, spl2)
+
+ spld={}
+ for j,pl in enumerate(spl):
+ n=cs1[j][0]
+ spld[n]=pl
+ assert_equal(spld, nx.single_source_shortest_path_length(G, 3))
+
+ def test_weights_thresholds(self):
+ wseq=[3,4,3,3,5,6,5,4,5,6]
+ cs=nxt.weights_to_creation_sequence(wseq,threshold=10)
+ wseq=nxt.creation_sequence_to_weights(cs)
+ cs2=nxt.weights_to_creation_sequence(wseq)
+ assert_equal(cs, cs2)
+
+ wseq=nxt.creation_sequence_to_weights(nxt.uncompact([3,1,2,3,3,2,3]))
+ assert_equal(wseq,
+ [s*0.125 for s in [4,4,4,3,5,5,2,2,2,6,6,6,1,1,7,7,7]])
+
+ wseq=nxt.creation_sequence_to_weights([3,1,2,3,3,2,3])
+ assert_equal(wseq,
+ [s*0.125 for s in [4,4,4,3,5,5,2,2,2,6,6,6,1,1,7,7,7]])
+
+ wseq=nxt.creation_sequence_to_weights(list(enumerate('ddidiiidididi')))
+ assert_equal(wseq,
+ [s*0.1 for s in [5,5,4,6,3,3,3,7,2,8,1,9,0]])
+
+ wseq=nxt.creation_sequence_to_weights('ddidiiidididi')
+ assert_equal(wseq,
+ [s*0.1 for s in [5,5,4,6,3,3,3,7,2,8,1,9,0]])
+
+ wseq=nxt.creation_sequence_to_weights('ddidiiidididid')
+ ws=[s/float(12) for s in [6,6,5,7,4,4,4,8,3,9,2,10,1,11]]
+ assert_true(sum([abs(c-d) for c,d in zip(wseq,ws)]) < 1e-14)
+
+ def test_finding_routines(self):
+ G=nx.Graph({1:[2],2:[3],3:[4],4:[5],5:[6]})
+ G.add_edge(2,4)
+ G.add_edge(2,5)
+ G.add_edge(2,7)
+ G.add_edge(3,6)
+ G.add_edge(4,6)
+
+ # Alternating 4 cycle
+ assert_equal(nxt.find_alternating_4_cycle(G), [1, 2, 3, 6])
+
+ # Threshold graph
+ TG=nxt.find_threshold_graph(G)
+ assert_true(nxt.is_threshold_graph(TG))
+ assert_equal(sorted(TG.nodes()), [1, 2, 3, 4, 5, 7])
+
+ cs=nxt.creation_sequence(TG.degree(),with_labels=True)
+ assert_equal(nxt.find_creation_sequence(G), cs)
+
+ def test_fast_versions_properties_threshold_graphs(self):
+ cs='ddiiddid'
+ G=nxt.threshold_graph(cs)
+ assert_equal(nxt.density('ddiiddid'), nx.density(G))
+ assert_equal(sorted(nxt.degree_sequence(cs)),
+ sorted(G.degree().values()))
+
+ ts=nxt.triangle_sequence(cs)
+ assert_equal(ts, list(nx.triangles(G).values()))
+ assert_equal(sum(ts) // 3, nxt.triangles(cs))
+
+ c1=nxt.cluster_sequence(cs)
+ c2=list(nx.clustering(G).values())
+ assert_almost_equal(sum([abs(c-d) for c,d in zip(c1,c2)]), 0)
+
+ b1=nx.betweenness_centrality(G).values()
+ b2=nxt.betweenness_sequence(cs)
+ assert_true(sum([abs(c-d) for c,d in zip(b1,b2)]) < 1e-14)
+
+ assert_equal(nxt.eigenvalues(cs), [0, 1, 3, 3, 5, 7, 7, 8])
+
+ # Degree Correlation
+ assert_true(abs(nxt.degree_correlation(cs)+0.593038821954) < 1e-12)
+ assert_equal(nxt.degree_correlation('diiiddi'), -0.8)
+ assert_equal(nxt.degree_correlation('did'), -1.0)
+ assert_equal(nxt.degree_correlation('ddd'), 1.0)
+ assert_equal(nxt.eigenvalues('dddiii'), [0, 0, 0, 0, 3, 3])
+ assert_equal(nxt.eigenvalues('dddiiid'), [0, 1, 1, 1, 4, 4, 7])
+
+ def test_tg_creation_routines(self):
+ s=nxt.left_d_threshold_sequence(5,7)
+ s=nxt.right_d_threshold_sequence(5,7)
+ s1=nxt.swap_d(s,1.0,1.0)
+
+
+ @attr('numpy')
+ def test_eigenvectors(self):
+ try:
+ import numpy as N
+ eigenval=N.linalg.eigvals
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+ cs='ddiiddid'
+ G=nxt.threshold_graph(cs)
+ (tgeval,tgevec)=nxt.eigenvectors(cs)
+ dot=N.dot
+ assert_equal([ abs(dot(lv,lv)-1.0)<1e-9 for lv in tgevec ], [True]*8)
+ lapl=nx.laplacian_matrix(G)
+# tgev=[ dot(lv,dot(lapl,lv)) for lv in tgevec ]
+# assert_true(sum([abs(c-d) for c,d in zip(tgev,tgeval)]) < 1e-9)
+# tgev.sort()
+# lev=list(eigenval(lapl))
+# lev.sort()
+# assert_true(sum([abs(c-d) for c,d in zip(tgev,lev)]) < 1e-9)
+
+ def test_create_using(self):
+ cs='ddiiddid'
+ G=nxt.threshold_graph(cs)
+ assert_raises(nx.exception.NetworkXError,
+ nxt.threshold_graph, cs, create_using=nx.DiGraph())
+ MG=nxt.threshold_graph(cs,create_using=nx.MultiGraph())
+ assert_equal(MG.edges(), G.edges())
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/generators/threshold.py b/lib/python2.7/site-packages/setoolsgui/networkx/generators/threshold.py
new file mode 100644
index 0000000..68a565e
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/generators/threshold.py
@@ -0,0 +1,906 @@
+"""
+Threshold Graphs - Creation, manipulation and identification.
+"""
+__author__ = """Aric Hagberg (hagberg@lanl.gov)\nPieter Swart (swart@lanl.gov)\nDan Schult (dschult@colgate.edu)"""
+# Copyright (C) 2004-2008 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+#
+
+__all__=[]
+
+import random # for swap_d
+from math import sqrt
+import networkx
+
+def is_threshold_graph(G):
+ """
+ Returns True if G is a threshold graph.
+ """
+ return is_threshold_sequence(list(G.degree().values()))
+
+def is_threshold_sequence(degree_sequence):
+ """
+ Returns True if the sequence is a threshold degree seqeunce.
+
+ Uses the property that a threshold graph must be constructed by
+ adding either dominating or isolated nodes. Thus, it can be
+ deconstructed iteratively by removing a node of degree zero or a
+ node that connects to the remaining nodes. If this deconstruction
+ failes then the sequence is not a threshold sequence.
+ """
+ ds=degree_sequence[:] # get a copy so we don't destroy original
+ ds.sort()
+ while ds:
+ if ds[0]==0: # if isolated node
+ ds.pop(0) # remove it
+ continue
+ if ds[-1]!=len(ds)-1: # is the largest degree node dominating?
+ return False # no, not a threshold degree sequence
+ ds.pop() # yes, largest is the dominating node
+ ds=[ d-1 for d in ds ] # remove it and decrement all degrees
+ return True
+
+
+def creation_sequence(degree_sequence,with_labels=False,compact=False):
+ """
+ Determines the creation sequence for the given threshold degree sequence.
+
+ The creation sequence is a list of single characters 'd'
+ or 'i': 'd' for dominating or 'i' for isolated vertices.
+ Dominating vertices are connected to all vertices present when it
+ is added. The first node added is by convention 'd'.
+ This list can be converted to a string if desired using "".join(cs)
+
+ If with_labels==True:
+ Returns a list of 2-tuples containing the vertex number
+ and a character 'd' or 'i' which describes the type of vertex.
+
+ If compact==True:
+ Returns the creation sequence in a compact form that is the number
+ of 'i's and 'd's alternating.
+ Examples:
+ [1,2,2,3] represents d,i,i,d,d,i,i,i
+ [3,1,2] represents d,d,d,i,d,d
+
+ Notice that the first number is the first vertex to be used for
+ construction and so is always 'd'.
+
+ with_labels and compact cannot both be True.
+
+ Returns None if the sequence is not a threshold sequence
+ """
+ if with_labels and compact:
+ raise ValueError("compact sequences cannot be labeled")
+
+ # make an indexed copy
+ if isinstance(degree_sequence,dict): # labeled degree seqeunce
+ ds = [ [degree,label] for (label,degree) in degree_sequence.items() ]
+ else:
+ ds=[ [d,i] for i,d in enumerate(degree_sequence) ]
+ ds.sort()
+ cs=[] # creation sequence
+ while ds:
+ if ds[0][0]==0: # isolated node
+ (d,v)=ds.pop(0)
+ if len(ds)>0: # make sure we start with a d
+ cs.insert(0,(v,'i'))
+ else:
+ cs.insert(0,(v,'d'))
+ continue
+ if ds[-1][0]!=len(ds)-1: # Not dominating node
+ return None # not a threshold degree sequence
+ (d,v)=ds.pop()
+ cs.insert(0,(v,'d'))
+ ds=[ [d[0]-1,d[1]] for d in ds ] # decrement due to removing node
+
+ if with_labels: return cs
+ if compact: return make_compact(cs)
+ return [ v[1] for v in cs ] # not labeled
+
+def make_compact(creation_sequence):
+ """
+ Returns the creation sequence in a compact form
+ that is the number of 'i's and 'd's alternating.
+ Examples:
+ [1,2,2,3] represents d,i,i,d,d,i,i,i.
+ [3,1,2] represents d,d,d,i,d,d.
+ Notice that the first number is the first vertex
+ to be used for construction and so is always 'd'.
+
+ Labeled creation sequences lose their labels in the
+ compact representation.
+ """
+ first=creation_sequence[0]
+ if isinstance(first,str): # creation sequence
+ cs = creation_sequence[:]
+ elif isinstance(first,tuple): # labeled creation sequence
+ cs = [ s[1] for s in creation_sequence ]
+ elif isinstance(first,int): # compact creation sequence
+ return creation_sequence
+ else:
+ raise TypeError("Not a valid creation sequence type")
+
+ ccs=[]
+ count=1 # count the run lengths of d's or i's.
+ for i in range(1,len(cs)):
+ if cs[i]==cs[i-1]:
+ count+=1
+ else:
+ ccs.append(count)
+ count=1
+ ccs.append(count) # don't forget the last one
+ return ccs
+
+def uncompact(creation_sequence):
+ """
+ Converts a compact creation sequence for a threshold
+ graph to a standard creation sequence (unlabeled).
+ If the creation_sequence is already standard, return it.
+ See creation_sequence.
+ """
+ first=creation_sequence[0]
+ if isinstance(first,str): # creation sequence
+ return creation_sequence
+ elif isinstance(first,tuple): # labeled creation sequence
+ return creation_sequence
+ elif isinstance(first,int): # compact creation sequence
+ ccscopy=creation_sequence[:]
+ else:
+ raise TypeError("Not a valid creation sequence type")
+ cs = []
+ while ccscopy:
+ cs.extend(ccscopy.pop(0)*['d'])
+ if ccscopy:
+ cs.extend(ccscopy.pop(0)*['i'])
+ return cs
+
+def creation_sequence_to_weights(creation_sequence):
+ """
+ Returns a list of node weights which create the threshold
+ graph designated by the creation sequence. The weights
+ are scaled so that the threshold is 1.0. The order of the
+ nodes is the same as that in the creation sequence.
+ """
+ # Turn input sequence into a labeled creation sequence
+ first=creation_sequence[0]
+ if isinstance(first,str): # creation sequence
+ if isinstance(creation_sequence,list):
+ wseq = creation_sequence[:]
+ else:
+ wseq = list(creation_sequence) # string like 'ddidid'
+ elif isinstance(first,tuple): # labeled creation sequence
+ wseq = [ v[1] for v in creation_sequence]
+ elif isinstance(first,int): # compact creation sequence
+ wseq = uncompact(creation_sequence)
+ else:
+ raise TypeError("Not a valid creation sequence type")
+ # pass through twice--first backwards
+ wseq.reverse()
+ w=0
+ prev='i'
+ for j,s in enumerate(wseq):
+ if s=='i':
+ wseq[j]=w
+ prev=s
+ elif prev=='i':
+ prev=s
+ w+=1
+ wseq.reverse() # now pass through forwards
+ for j,s in enumerate(wseq):
+ if s=='d':
+ wseq[j]=w
+ prev=s
+ elif prev=='d':
+ prev=s
+ w+=1
+ # Now scale weights
+ if prev=='d': w+=1
+ wscale=1./float(w)
+ return [ ww*wscale for ww in wseq]
+ #return wseq
+
+def weights_to_creation_sequence(weights,threshold=1,with_labels=False,compact=False):
+ """
+ Returns a creation sequence for a threshold graph
+ determined by the weights and threshold given as input.
+ If the sum of two node weights is greater than the
+ threshold value, an edge is created between these nodes.
+
+ The creation sequence is a list of single characters 'd'
+ or 'i': 'd' for dominating or 'i' for isolated vertices.
+ Dominating vertices are connected to all vertices present
+ when it is added. The first node added is by convention 'd'.
+
+ If with_labels==True:
+ Returns a list of 2-tuples containing the vertex number
+ and a character 'd' or 'i' which describes the type of vertex.
+
+ If compact==True:
+ Returns the creation sequence in a compact form that is the number
+ of 'i's and 'd's alternating.
+ Examples:
+ [1,2,2,3] represents d,i,i,d,d,i,i,i
+ [3,1,2] represents d,d,d,i,d,d
+
+ Notice that the first number is the first vertex to be used for
+ construction and so is always 'd'.
+
+ with_labels and compact cannot both be True.
+ """
+ if with_labels and compact:
+ raise ValueError("compact sequences cannot be labeled")
+
+ # make an indexed copy
+ if isinstance(weights,dict): # labeled weights
+ wseq = [ [w,label] for (label,w) in weights.items() ]
+ else:
+ wseq = [ [w,i] for i,w in enumerate(weights) ]
+ wseq.sort()
+ cs=[] # creation sequence
+ cutoff=threshold-wseq[-1][0]
+ while wseq:
+ if wseq[0][0]<cutoff: # isolated node
+ (w,label)=wseq.pop(0)
+ cs.append((label,'i'))
+ else:
+ (w,label)=wseq.pop()
+ cs.append((label,'d'))
+ cutoff=threshold-wseq[-1][0]
+ if len(wseq)==1: # make sure we start with a d
+ (w,label)=wseq.pop()
+ cs.append((label,'d'))
+ # put in correct order
+ cs.reverse()
+
+ if with_labels: return cs
+ if compact: return make_compact(cs)
+ return [ v[1] for v in cs ] # not labeled
+
+
+# Manipulating NetworkX.Graphs in context of threshold graphs
+def threshold_graph(creation_sequence, create_using=None):
+ """
+ Create a threshold graph from the creation sequence or compact
+ creation_sequence.
+
+ The input sequence can be a
+
+ creation sequence (e.g. ['d','i','d','d','d','i'])
+ labeled creation sequence (e.g. [(0,'d'),(2,'d'),(1,'i')])
+ compact creation sequence (e.g. [2,1,1,2,0])
+
+ Use cs=creation_sequence(degree_sequence,labeled=True)
+ to convert a degree sequence to a creation sequence.
+
+ Returns None if the sequence is not valid
+ """
+ # Turn input sequence into a labeled creation sequence
+ first=creation_sequence[0]
+ if isinstance(first,str): # creation sequence
+ ci = list(enumerate(creation_sequence))
+ elif isinstance(first,tuple): # labeled creation sequence
+ ci = creation_sequence[:]
+ elif isinstance(first,int): # compact creation sequence
+ cs = uncompact(creation_sequence)
+ ci = list(enumerate(cs))
+ else:
+ print("not a valid creation sequence type")
+ return None
+
+ if create_using is None:
+ G = networkx.Graph()
+ elif create_using.is_directed():
+ raise networkx.NetworkXError("Directed Graph not supported")
+ else:
+ G = create_using
+ G.clear()
+
+ G.name="Threshold Graph"
+
+ # add nodes and edges
+ # if type is 'i' just add nodea
+ # if type is a d connect to everything previous
+ while ci:
+ (v,node_type)=ci.pop(0)
+ if node_type=='d': # dominating type, connect to all existing nodes
+ for u in G.nodes():
+ G.add_edge(v,u)
+ G.add_node(v)
+ return G
+
+
+
+def find_alternating_4_cycle(G):
+ """
+ Returns False if there aren't any alternating 4 cycles.
+ Otherwise returns the cycle as [a,b,c,d] where (a,b)
+ and (c,d) are edges and (a,c) and (b,d) are not.
+ """
+ for (u,v) in G.edges():
+ for w in G.nodes():
+ if not G.has_edge(u,w) and u!=w:
+ for x in G.neighbors(w):
+ if not G.has_edge(v,x) and v!=x:
+ return [u,v,w,x]
+ return False
+
+
+
+def find_threshold_graph(G, create_using=None):
+ """
+ Return a threshold subgraph that is close to largest in G.
+ The threshold graph will contain the largest degree node in G.
+
+ """
+ return threshold_graph(find_creation_sequence(G),create_using)
+
+
+def find_creation_sequence(G):
+ """
+ Find a threshold subgraph that is close to largest in G.
+ Returns the labeled creation sequence of that threshold graph.
+ """
+ cs=[]
+ # get a local pointer to the working part of the graph
+ H=G
+ while H.order()>0:
+ # get new degree sequence on subgraph
+ dsdict=H.degree()
+ ds=[ [d,v] for v,d in dsdict.items() ]
+ ds.sort()
+ # Update threshold graph nodes
+ if ds[-1][0]==0: # all are isolated
+ cs.extend( zip( dsdict, ['i']*(len(ds)-1)+['d']) )
+ break # Done!
+ # pull off isolated nodes
+ while ds[0][0]==0:
+ (d,iso)=ds.pop(0)
+ cs.append((iso,'i'))
+ # find new biggest node
+ (d,bigv)=ds.pop()
+ # add edges of star to t_g
+ cs.append((bigv,'d'))
+ # form subgraph of neighbors of big node
+ H=H.subgraph(H.neighbors(bigv))
+ cs.reverse()
+ return cs
+
+
+
+### Properties of Threshold Graphs
+def triangles(creation_sequence):
+ """
+ Compute number of triangles in the threshold graph with the
+ given creation sequence.
+ """
+ # shortcut algoritm that doesn't require computing number
+ # of triangles at each node.
+ cs=creation_sequence # alias
+ dr=cs.count("d") # number of d's in sequence
+ ntri=dr*(dr-1)*(dr-2)/6 # number of triangles in clique of nd d's
+ # now add dr choose 2 triangles for every 'i' in sequence where
+ # dr is the number of d's to the right of the current i
+ for i,typ in enumerate(cs):
+ if typ=="i":
+ ntri+=dr*(dr-1)/2
+ else:
+ dr-=1
+ return ntri
+
+
+def triangle_sequence(creation_sequence):
+ """
+ Return triangle sequence for the given threshold graph creation sequence.
+
+ """
+ cs=creation_sequence
+ seq=[]
+ dr=cs.count("d") # number of d's to the right of the current pos
+ dcur=(dr-1)*(dr-2) // 2 # number of triangles through a node of clique dr
+ irun=0 # number of i's in the last run
+ drun=0 # number of d's in the last run
+ for i,sym in enumerate(cs):
+ if sym=="d":
+ drun+=1
+ tri=dcur+(dr-1)*irun # new triangles at this d
+ else: # cs[i]="i":
+ if prevsym=="d": # new string of i's
+ dcur+=(dr-1)*irun # accumulate shared shortest paths
+ irun=0 # reset i run counter
+ dr-=drun # reduce number of d's to right
+ drun=0 # reset d run counter
+ irun+=1
+ tri=dr*(dr-1) // 2 # new triangles at this i
+ seq.append(tri)
+ prevsym=sym
+ return seq
+
+def cluster_sequence(creation_sequence):
+ """
+ Return cluster sequence for the given threshold graph creation sequence.
+ """
+ triseq=triangle_sequence(creation_sequence)
+ degseq=degree_sequence(creation_sequence)
+ cseq=[]
+ for i,deg in enumerate(degseq):
+ tri=triseq[i]
+ if deg <= 1: # isolated vertex or single pair gets cc 0
+ cseq.append(0)
+ continue
+ max_size=(deg*(deg-1)) // 2
+ cseq.append(float(tri)/float(max_size))
+ return cseq
+
+
+def degree_sequence(creation_sequence):
+ """
+ Return degree sequence for the threshold graph with the given
+ creation sequence
+ """
+ cs=creation_sequence # alias
+ seq=[]
+ rd=cs.count("d") # number of d to the right
+ for i,sym in enumerate(cs):
+ if sym=="d":
+ rd-=1
+ seq.append(rd+i)
+ else:
+ seq.append(rd)
+ return seq
+
+def density(creation_sequence):
+ """
+ Return the density of the graph with this creation_sequence.
+ The density is the fraction of possible edges present.
+ """
+ N=len(creation_sequence)
+ two_size=sum(degree_sequence(creation_sequence))
+ two_possible=N*(N-1)
+ den=two_size/float(two_possible)
+ return den
+
+def degree_correlation(creation_sequence):
+ """
+ Return the degree-degree correlation over all edges.
+ """
+ cs=creation_sequence
+ s1=0 # deg_i*deg_j
+ s2=0 # deg_i^2+deg_j^2
+ s3=0 # deg_i+deg_j
+ m=0 # number of edges
+ rd=cs.count("d") # number of d nodes to the right
+ rdi=[ i for i,sym in enumerate(cs) if sym=="d"] # index of "d"s
+ ds=degree_sequence(cs)
+ for i,sym in enumerate(cs):
+ if sym=="d":
+ if i!=rdi[0]:
+ print("Logic error in degree_correlation",i,rdi)
+ raise ValueError
+ rdi.pop(0)
+ degi=ds[i]
+ for dj in rdi:
+ degj=ds[dj]
+ s1+=degj*degi
+ s2+=degi**2+degj**2
+ s3+=degi+degj
+ m+=1
+ denom=(2*m*s2-s3*s3)
+ numer=(4*m*s1-s3*s3)
+ if denom==0:
+ if numer==0:
+ return 1
+ raise ValueError("Zero Denominator but Numerator is %s"%numer)
+ return numer/float(denom)
+
+
+def shortest_path(creation_sequence,u,v):
+ """
+ Find the shortest path between u and v in a
+ threshold graph G with the given creation_sequence.
+
+ For an unlabeled creation_sequence, the vertices
+ u and v must be integers in (0,len(sequence)) refering
+ to the position of the desired vertices in the sequence.
+
+ For a labeled creation_sequence, u and v are labels of veritices.
+
+ Use cs=creation_sequence(degree_sequence,with_labels=True)
+ to convert a degree sequence to a creation sequence.
+
+ Returns a list of vertices from u to v.
+ Example: if they are neighbors, it returns [u,v]
+ """
+ # Turn input sequence into a labeled creation sequence
+ first=creation_sequence[0]
+ if isinstance(first,str): # creation sequence
+ cs = [(i,creation_sequence[i]) for i in range(len(creation_sequence))]
+ elif isinstance(first,tuple): # labeled creation sequence
+ cs = creation_sequence[:]
+ elif isinstance(first,int): # compact creation sequence
+ ci = uncompact(creation_sequence)
+ cs = [(i,ci[i]) for i in range(len(ci))]
+ else:
+ raise TypeError("Not a valid creation sequence type")
+
+ verts=[ s[0] for s in cs ]
+ if v not in verts:
+ raise ValueError("Vertex %s not in graph from creation_sequence"%v)
+ if u not in verts:
+ raise ValueError("Vertex %s not in graph from creation_sequence"%u)
+ # Done checking
+ if u==v: return [u]
+
+ uindex=verts.index(u)
+ vindex=verts.index(v)
+ bigind=max(uindex,vindex)
+ if cs[bigind][1]=='d':
+ return [u,v]
+ # must be that cs[bigind][1]=='i'
+ cs=cs[bigind:]
+ while cs:
+ vert=cs.pop()
+ if vert[1]=='d':
+ return [u,vert[0],v]
+ # All after u are type 'i' so no connection
+ return -1
+
+def shortest_path_length(creation_sequence,i):
+ """
+ Return the shortest path length from indicated node to
+ every other node for the threshold graph with the given
+ creation sequence.
+ Node is indicated by index i in creation_sequence unless
+ creation_sequence is labeled in which case, i is taken to
+ be the label of the node.
+
+ Paths lengths in threshold graphs are at most 2.
+ Length to unreachable nodes is set to -1.
+ """
+ # Turn input sequence into a labeled creation sequence
+ first=creation_sequence[0]
+ if isinstance(first,str): # creation sequence
+ if isinstance(creation_sequence,list):
+ cs = creation_sequence[:]
+ else:
+ cs = list(creation_sequence)
+ elif isinstance(first,tuple): # labeled creation sequence
+ cs = [ v[1] for v in creation_sequence]
+ i = [v[0] for v in creation_sequence].index(i)
+ elif isinstance(first,int): # compact creation sequence
+ cs = uncompact(creation_sequence)
+ else:
+ raise TypeError("Not a valid creation sequence type")
+
+ # Compute
+ N=len(cs)
+ spl=[2]*N # length 2 to every node
+ spl[i]=0 # except self which is 0
+ # 1 for all d's to the right
+ for j in range(i+1,N):
+ if cs[j]=="d":
+ spl[j]=1
+ if cs[i]=='d': # 1 for all nodes to the left
+ for j in range(i):
+ spl[j]=1
+ # and -1 for any trailing i to indicate unreachable
+ for j in range(N-1,0,-1):
+ if cs[j]=="d":
+ break
+ spl[j]=-1
+ return spl
+
+
+def betweenness_sequence(creation_sequence,normalized=True):
+ """
+ Return betweenness for the threshold graph with the given creation
+ sequence. The result is unscaled. To scale the values
+ to the iterval [0,1] divide by (n-1)*(n-2).
+ """
+ cs=creation_sequence
+ seq=[] # betweenness
+ lastchar='d' # first node is always a 'd'
+ dr=float(cs.count("d")) # number of d's to the right of curren pos
+ irun=0 # number of i's in the last run
+ drun=0 # number of d's in the last run
+ dlast=0.0 # betweenness of last d
+ for i,c in enumerate(cs):
+ if c=='d': #cs[i]=="d":
+ # betweennees = amt shared with eariler d's and i's
+ # + new isolated nodes covered
+ # + new paths to all previous nodes
+ b=dlast + (irun-1)*irun/dr + 2*irun*(i-drun-irun)/dr
+ drun+=1 # update counter
+ else: # cs[i]="i":
+ if lastchar=='d': # if this is a new run of i's
+ dlast=b # accumulate betweenness
+ dr-=drun # update number of d's to the right
+ drun=0 # reset d counter
+ irun=0 # reset i counter
+ b=0 # isolated nodes have zero betweenness
+ irun+=1 # add another i to the run
+ seq.append(float(b))
+ lastchar=c
+
+ # normalize by the number of possible shortest paths
+ if normalized:
+ order=len(cs)
+ scale=1.0/((order-1)*(order-2))
+ seq=[ s*scale for s in seq ]
+
+ return seq
+
+
+def eigenvectors(creation_sequence):
+ """
+ Return a 2-tuple of Laplacian eigenvalues and eigenvectors
+ for the threshold network with creation_sequence.
+ The first value is a list of eigenvalues.
+ The second value is a list of eigenvectors.
+ The lists are in the same order so corresponding eigenvectors
+ and eigenvalues are in the same position in the two lists.
+
+ Notice that the order of the eigenvalues returned by eigenvalues(cs)
+ may not correspond to the order of these eigenvectors.
+ """
+ ccs=make_compact(creation_sequence)
+ N=sum(ccs)
+ vec=[0]*N
+ val=vec[:]
+ # get number of type d nodes to the right (all for first node)
+ dr=sum(ccs[::2])
+
+ nn=ccs[0]
+ vec[0]=[1./sqrt(N)]*N
+ val[0]=0
+ e=dr
+ dr-=nn
+ type_d=True
+ i=1
+ dd=1
+ while dd<nn:
+ scale=1./sqrt(dd*dd+i)
+ vec[i]=i*[-scale]+[dd*scale]+[0]*(N-i-1)
+ val[i]=e
+ i+=1
+ dd+=1
+ if len(ccs)==1: return (val,vec)
+ for nn in ccs[1:]:
+ scale=1./sqrt(nn*i*(i+nn))
+ vec[i]=i*[-nn*scale]+nn*[i*scale]+[0]*(N-i-nn)
+ # find eigenvalue
+ type_d=not type_d
+ if type_d:
+ e=i+dr
+ dr-=nn
+ else:
+ e=dr
+ val[i]=e
+ st=i
+ i+=1
+ dd=1
+ while dd<nn:
+ scale=1./sqrt(i-st+dd*dd)
+ vec[i]=[0]*st+(i-st)*[-scale]+[dd*scale]+[0]*(N-i-1)
+ val[i]=e
+ i+=1
+ dd+=1
+ return (val,vec)
+
+def spectral_projection(u,eigenpairs):
+ """
+ Returns the coefficients of each eigenvector
+ in a projection of the vector u onto the normalized
+ eigenvectors which are contained in eigenpairs.
+
+ eigenpairs should be a list of two objects. The
+ first is a list of eigenvalues and the second a list
+ of eigenvectors. The eigenvectors should be lists.
+
+ There's not a lot of error checking on lengths of
+ arrays, etc. so be careful.
+ """
+ coeff=[]
+ evect=eigenpairs[1]
+ for ev in evect:
+ c=sum([ evv*uv for (evv,uv) in zip(ev,u)])
+ coeff.append(c)
+ return coeff
+
+
+
+def eigenvalues(creation_sequence):
+ """
+ Return sequence of eigenvalues of the Laplacian of the threshold
+ graph for the given creation_sequence.
+
+ Based on the Ferrer's diagram method. The spectrum is integral
+ and is the conjugate of the degree sequence.
+
+ See::
+
+ @Article{degree-merris-1994,
+ author = {Russel Merris},
+ title = {Degree maximal graphs are Laplacian integral},
+ journal = {Linear Algebra Appl.},
+ year = {1994},
+ volume = {199},
+ pages = {381--389},
+ }
+
+ """
+ degseq=degree_sequence(creation_sequence)
+ degseq.sort()
+ eiglist=[] # zero is always one eigenvalue
+ eig=0
+ row=len(degseq)
+ bigdeg=degseq.pop()
+ while row:
+ if bigdeg<row:
+ eiglist.append(eig)
+ row-=1
+ else:
+ eig+=1
+ if degseq:
+ bigdeg=degseq.pop()
+ else:
+ bigdeg=0
+ return eiglist
+
+
+### Threshold graph creation routines
+
+def random_threshold_sequence(n,p,seed=None):
+ """
+ Create a random threshold sequence of size n.
+ A creation sequence is built by randomly choosing d's with
+ probabiliy p and i's with probability 1-p.
+
+ s=nx.random_threshold_sequence(10,0.5)
+
+ returns a threshold sequence of length 10 with equal
+ probably of an i or a d at each position.
+
+ A "random" threshold graph can be built with
+
+ G=nx.threshold_graph(s)
+
+ """
+ if not seed is None:
+ random.seed(seed)
+
+ if not (p<=1 and p>=0):
+ raise ValueError("p must be in [0,1]")
+
+ cs=['d'] # threshold sequences always start with a d
+ for i in range(1,n):
+ if random.random() < p:
+ cs.append('d')
+ else:
+ cs.append('i')
+ return cs
+
+
+
+
+
+# maybe *_d_threshold_sequence routines should
+# be (or be called from) a single routine with a more descriptive name
+# and a keyword parameter?
+def right_d_threshold_sequence(n,m):
+ """
+ Create a skewed threshold graph with a given number
+ of vertices (n) and a given number of edges (m).
+
+ The routine returns an unlabeled creation sequence
+ for the threshold graph.
+
+ FIXME: describe algorithm
+
+ """
+ cs=['d']+['i']*(n-1) # create sequence with n insolated nodes
+
+ # m <n : not enough edges, make disconnected
+ if m < n:
+ cs[m]='d'
+ return cs
+
+ # too many edges
+ if m > n*(n-1)/2:
+ raise ValueError("Too many edges for this many nodes.")
+
+ # connected case m >n-1
+ ind=n-1
+ sum=n-1
+ while sum<m:
+ cs[ind]='d'
+ ind -= 1
+ sum += ind
+ ind=m-(sum-ind)
+ cs[ind]='d'
+ return cs
+
+def left_d_threshold_sequence(n,m):
+ """
+ Create a skewed threshold graph with a given number
+ of vertices (n) and a given number of edges (m).
+
+ The routine returns an unlabeled creation sequence
+ for the threshold graph.
+
+ FIXME: describe algorithm
+
+ """
+ cs=['d']+['i']*(n-1) # create sequence with n insolated nodes
+
+ # m <n : not enough edges, make disconnected
+ if m < n:
+ cs[m]='d'
+ return cs
+
+ # too many edges
+ if m > n*(n-1)/2:
+ raise ValueError("Too many edges for this many nodes.")
+
+ # Connected case when M>N-1
+ cs[n-1]='d'
+ sum=n-1
+ ind=1
+ while sum<m:
+ cs[ind]='d'
+ sum += ind
+ ind += 1
+ if sum>m: # be sure not to change the first vertex
+ cs[sum-m]='i'
+ return cs
+
+def swap_d(cs,p_split=1.0,p_combine=1.0,seed=None):
+ """
+ Perform a "swap" operation on a threshold sequence.
+
+ The swap preserves the number of nodes and edges
+ in the graph for the given sequence.
+ The resulting sequence is still a threshold sequence.
+
+ Perform one split and one combine operation on the
+ 'd's of a creation sequence for a threshold graph.
+ This operation maintains the number of nodes and edges
+ in the graph, but shifts the edges from node to node
+ maintaining the threshold quality of the graph.
+ """
+ if not seed is None:
+ random.seed(seed)
+
+ # preprocess the creation sequence
+ dlist= [ i for (i,node_type) in enumerate(cs[1:-1]) if node_type=='d' ]
+ # split
+ if random.random()<p_split:
+ choice=random.choice(dlist)
+ split_to=random.choice(range(choice))
+ flip_side=choice-split_to
+ if split_to!=flip_side and cs[split_to]=='i' and cs[flip_side]=='i':
+ cs[choice]='i'
+ cs[split_to]='d'
+ cs[flip_side]='d'
+ dlist.remove(choice)
+ # don't add or combine may reverse this action
+ # dlist.extend([split_to,flip_side])
+# print >>sys.stderr,"split at %s to %s and %s"%(choice,split_to,flip_side)
+ # combine
+ if random.random()<p_combine and dlist:
+ first_choice= random.choice(dlist)
+ second_choice=random.choice(dlist)
+ target=first_choice+second_choice
+ if target >= len(cs) or cs[target]=='d' or first_choice==second_choice:
+ return cs
+ # OK to combine
+ cs[first_choice]='i'
+ cs[second_choice]='i'
+ cs[target]='d'
+# print >>sys.stderr,"combine %s and %s to make %s."%(first_choice,second_choice,target)
+
+ return cs
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/linalg/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/linalg/__init__.py
new file mode 100644
index 0000000..9f63c89
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/linalg/__init__.py
@@ -0,0 +1,9 @@
+from networkx.linalg.attrmatrix import *
+import networkx.linalg.attrmatrix
+from networkx.linalg.spectrum import *
+import networkx.linalg.spectrum
+from networkx.linalg.graphmatrix import *
+import networkx.linalg.graphmatrix
+from networkx.linalg.laplacianmatrix import *
+import networkx.linalg.laplacianmatrix
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/linalg/attrmatrix.py b/lib/python2.7/site-packages/setoolsgui/networkx/linalg/attrmatrix.py
new file mode 100644
index 0000000..df193bb
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/linalg/attrmatrix.py
@@ -0,0 +1,458 @@
+"""
+ Functions for constructing matrix-like objects from graph attributes.
+"""
+
+__all__ = ['attr_matrix', 'attr_sparse_matrix']
+
+import networkx as nx
+
+def _node_value(G, node_attr):
+ """Returns a function that returns a value from G.node[u].
+
+ We return a function expecting a node as its sole argument. Then, in the
+ simplest scenario, the returned function will return G.node[u][node_attr].
+ However, we also handle the case when `node_attr` is None or when it is a
+ function itself.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ node_attr : {None, str, callable}
+ Specification of how the value of the node attribute should be obtained
+ from the node attribute dictionary.
+
+ Returns
+ -------
+ value : function
+ A function expecting a node as its sole argument. The function will
+ returns a value from G.node[u] that depends on `edge_attr`.
+
+ """
+ if node_attr is None:
+ value = lambda u: u
+ elif not hasattr(node_attr, '__call__'):
+ # assume it is a key for the node attribute dictionary
+ value = lambda u: G.node[u][node_attr]
+ else:
+ # Advanced: Allow users to specify something else.
+ #
+ # For example,
+ # node_attr = lambda u: G.node[u].get('size', .5) * 3
+ #
+ value = node_attr
+
+ return value
+
+def _edge_value(G, edge_attr):
+ """Returns a function that returns a value from G[u][v].
+
+ Suppose there exists an edge between u and v. Then we return a function
+ expecting u and v as arguments. For Graph and DiGraph, G[u][v] is
+ the edge attribute dictionary, and the function (essentially) returns
+ G[u][v][edge_attr]. However, we also handle cases when `edge_attr` is None
+ and when it is a function itself. For MultiGraph and MultiDiGraph, G[u][v]
+ is a dictionary of all edges between u and v. In this case, the returned
+ function sums the value of `edge_attr` for every edge between u and v.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ edge_attr : {None, str, callable}
+ Specification of how the value of the edge attribute should be obtained
+ from the edge attribute dictionary, G[u][v]. For multigraphs, G[u][v]
+ is a dictionary of all the edges between u and v. This allows for
+ special treatment of multiedges.
+
+ Returns
+ -------
+ value : function
+ A function expecting two nodes as parameters. The nodes should
+ represent the from- and to- node of an edge. The function will
+ return a value from G[u][v] that depends on `edge_attr`.
+
+ """
+
+ if edge_attr is None:
+ # topological count of edges
+
+ if G.is_multigraph():
+ value = lambda u,v: len(G[u][v])
+ else:
+ value = lambda u,v: 1
+
+ elif not hasattr(edge_attr, '__call__'):
+ # assume it is a key for the edge attribute dictionary
+
+ if edge_attr == 'weight':
+ # provide a default value
+ if G.is_multigraph():
+ value = lambda u,v: sum([d.get(edge_attr, 1) for d in G[u][v].values()])
+ else:
+ value = lambda u,v: G[u][v].get(edge_attr, 1)
+ else:
+ # otherwise, the edge attribute MUST exist for each edge
+ if G.is_multigraph():
+ value = lambda u,v: sum([d[edge_attr] for d in G[u][v].values()])
+ else:
+ value = lambda u,v: G[u][v][edge_attr]
+
+ else:
+ # Advanced: Allow users to specify something else.
+ #
+ # Alternative default value:
+ # edge_attr = lambda u,v: G[u][v].get('thickness', .5)
+ #
+ # Function on an attribute:
+ # edge_attr = lambda u,v: abs(G[u][v]['weight'])
+ #
+ # Handle Multi(Di)Graphs differently:
+ # edge_attr = lambda u,v: numpy.prod([d['size'] for d in G[u][v].values()])
+ #
+ # Ignore multiple edges
+ # edge_attr = lambda u,v: 1 if len(G[u][v]) else 0
+ #
+ value = edge_attr
+
+ return value
+
+def attr_matrix(G, edge_attr=None, node_attr=None, normalized=False,
+ rc_order=None, dtype=None, order=None):
+ """Returns a NumPy matrix using attributes from G.
+
+ If only `G` is passed in, then the adjacency matrix is constructed.
+
+ Let A be a discrete set of values for the node attribute `node_attr`. Then
+ the elements of A represent the rows and columns of the constructed matrix.
+ Now, iterate through every edge e=(u,v) in `G` and consider the value
+ of the edge attribute `edge_attr`. If ua and va are the values of the
+ node attribute `node_attr` for u and v, respectively, then the value of
+ the edge attribute is added to the matrix element at (ua, va).
+
+ Parameters
+ ----------
+ G : graph
+ The NetworkX graph used to construct the NumPy matrix.
+
+ edge_attr : str, optional
+ Each element of the matrix represents a running total of the
+ specified edge attribute for edges whose node attributes correspond
+ to the rows/cols of the matirx. The attribute must be present for
+ all edges in the graph. If no attribute is specified, then we
+ just count the number of edges whose node attributes correspond
+ to the matrix element.
+
+ node_attr : str, optional
+ Each row and column in the matrix represents a particular value
+ of the node attribute. The attribute must be present for all nodes
+ in the graph. Note, the values of this attribute should be reliably
+ hashable. So, float values are not recommended. If no attribute is
+ specified, then the rows and columns will be the nodes of the graph.
+
+ normalized : bool, optional
+ If True, then each row is normalized by the summation of its values.
+
+ rc_order : list, optional
+ A list of the node attribute values. This list specifies the ordering
+ of rows and columns of the array. If no ordering is provided, then
+ the ordering will be random (and also, a return value).
+
+ Other Parameters
+ ----------------
+ dtype : NumPy data-type, optional
+ A valid NumPy dtype used to initialize the array. Keep in mind certain
+ dtypes can yield unexpected results if the array is to be normalized.
+ The parameter is passed to numpy.zeros(). If unspecified, the NumPy
+ default is used.
+
+ order : {'C', 'F'}, optional
+ Whether to store multidimensional data in C- or Fortran-contiguous
+ (row- or column-wise) order in memory. This parameter is passed to
+ numpy.zeros(). If unspecified, the NumPy default is used.
+
+ Returns
+ -------
+ M : NumPy matrix
+ The attribute matrix.
+
+ ordering : list
+ If `rc_order` was specified, then only the matrix is returned.
+ However, if `rc_order` was None, then the ordering used to construct
+ the matrix is returned as well.
+
+ Examples
+ --------
+ Construct an adjacency matrix:
+
+ >>> G = nx.Graph()
+ >>> G.add_edge(0,1,thickness=1,weight=3)
+ >>> G.add_edge(0,2,thickness=2)
+ >>> G.add_edge(1,2,thickness=3)
+ >>> nx.attr_matrix(G, rc_order=[0,1,2])
+ matrix([[ 0., 1., 1.],
+ [ 1., 0., 1.],
+ [ 1., 1., 0.]])
+
+ Alternatively, we can obtain the matrix describing edge thickness.
+
+ >>> nx.attr_matrix(G, edge_attr='thickness', rc_order=[0,1,2])
+ matrix([[ 0., 1., 2.],
+ [ 1., 0., 3.],
+ [ 2., 3., 0.]])
+
+ We can also color the nodes and ask for the probability distribution over
+ all edges (u,v) describing:
+
+ Pr(v has color Y | u has color X)
+
+ >>> G.node[0]['color'] = 'red'
+ >>> G.node[1]['color'] = 'red'
+ >>> G.node[2]['color'] = 'blue'
+ >>> rc = ['red', 'blue']
+ >>> nx.attr_matrix(G, node_attr='color', normalized=True, rc_order=rc)
+ matrix([[ 0.33333333, 0.66666667],
+ [ 1. , 0. ]])
+
+ For example, the above tells us that for all edges (u,v):
+
+ Pr( v is red | u is red) = 1/3
+ Pr( v is blue | u is red) = 2/3
+
+ Pr( v is red | u is blue) = 1
+ Pr( v is blue | u is blue) = 0
+
+ Finally, we can obtain the total weights listed by the node colors.
+
+ >>> nx.attr_matrix(G, edge_attr='weight', node_attr='color', rc_order=rc)
+ matrix([[ 3., 2.],
+ [ 2., 0.]])
+
+ Thus, the total weight over all edges (u,v) with u and v having colors:
+
+ (red, red) is 3 # the sole contribution is from edge (0,1)
+ (red, blue) is 2 # contributions from edges (0,2) and (1,2)
+ (blue, red) is 2 # same as (red, blue) since graph is undirected
+ (blue, blue) is 0 # there are no edges with blue endpoints
+
+ """
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError(
+ "attr_matrix() requires numpy: http://scipy.org/ ")
+
+ edge_value = _edge_value(G, edge_attr)
+ node_value = _node_value(G, node_attr)
+
+ if rc_order is None:
+ ordering = list(set([node_value(n) for n in G]))
+ else:
+ ordering = rc_order
+
+ N = len(ordering)
+ undirected = not G.is_directed()
+ index = dict(zip(ordering, range(N)))
+ M = np.zeros((N,N), dtype=dtype, order=order)
+
+ seen = set([])
+ for u,nbrdict in G.adjacency_iter():
+ for v in nbrdict:
+ # Obtain the node attribute values.
+ i, j = index[node_value(u)], index[node_value(v)]
+ if v not in seen:
+ M[i,j] += edge_value(u,v)
+ if undirected:
+ M[j,i] = M[i,j]
+
+ if undirected:
+ seen.add(u)
+
+ if normalized:
+ M /= M.sum(axis=1).reshape((N,1))
+
+ M = np.asmatrix(M)
+
+ if rc_order is None:
+ return M, ordering
+ else:
+ return M
+
+def attr_sparse_matrix(G, edge_attr=None, node_attr=None,
+ normalized=False, rc_order=None, dtype=None):
+ """Returns a SciPy sparse matrix using attributes from G.
+
+ If only `G` is passed in, then the adjacency matrix is constructed.
+
+ Let A be a discrete set of values for the node attribute `node_attr`. Then
+ the elements of A represent the rows and columns of the constructed matrix.
+ Now, iterate through every edge e=(u,v) in `G` and consider the value
+ of the edge attribute `edge_attr`. If ua and va are the values of the
+ node attribute `node_attr` for u and v, respectively, then the value of
+ the edge attribute is added to the matrix element at (ua, va).
+
+ Parameters
+ ----------
+ G : graph
+ The NetworkX graph used to construct the NumPy matrix.
+
+ edge_attr : str, optional
+ Each element of the matrix represents a running total of the
+ specified edge attribute for edges whose node attributes correspond
+ to the rows/cols of the matirx. The attribute must be present for
+ all edges in the graph. If no attribute is specified, then we
+ just count the number of edges whose node attributes correspond
+ to the matrix element.
+
+ node_attr : str, optional
+ Each row and column in the matrix represents a particular value
+ of the node attribute. The attribute must be present for all nodes
+ in the graph. Note, the values of this attribute should be reliably
+ hashable. So, float values are not recommended. If no attribute is
+ specified, then the rows and columns will be the nodes of the graph.
+
+ normalized : bool, optional
+ If True, then each row is normalized by the summation of its values.
+
+ rc_order : list, optional
+ A list of the node attribute values. This list specifies the ordering
+ of rows and columns of the array. If no ordering is provided, then
+ the ordering will be random (and also, a return value).
+
+ Other Parameters
+ ----------------
+ dtype : NumPy data-type, optional
+ A valid NumPy dtype used to initialize the array. Keep in mind certain
+ dtypes can yield unexpected results if the array is to be normalized.
+ The parameter is passed to numpy.zeros(). If unspecified, the NumPy
+ default is used.
+
+ Returns
+ -------
+ M : SciPy sparse matrix
+ The attribute matrix.
+
+ ordering : list
+ If `rc_order` was specified, then only the matrix is returned.
+ However, if `rc_order` was None, then the ordering used to construct
+ the matrix is returned as well.
+
+ Examples
+ --------
+ Construct an adjacency matrix:
+
+ >>> G = nx.Graph()
+ >>> G.add_edge(0,1,thickness=1,weight=3)
+ >>> G.add_edge(0,2,thickness=2)
+ >>> G.add_edge(1,2,thickness=3)
+ >>> M = nx.attr_sparse_matrix(G, rc_order=[0,1,2])
+ >>> M.todense()
+ matrix([[ 0., 1., 1.],
+ [ 1., 0., 1.],
+ [ 1., 1., 0.]])
+
+ Alternatively, we can obtain the matrix describing edge thickness.
+
+ >>> M = nx.attr_sparse_matrix(G, edge_attr='thickness', rc_order=[0,1,2])
+ >>> M.todense()
+ matrix([[ 0., 1., 2.],
+ [ 1., 0., 3.],
+ [ 2., 3., 0.]])
+
+ We can also color the nodes and ask for the probability distribution over
+ all edges (u,v) describing:
+
+ Pr(v has color Y | u has color X)
+
+ >>> G.node[0]['color'] = 'red'
+ >>> G.node[1]['color'] = 'red'
+ >>> G.node[2]['color'] = 'blue'
+ >>> rc = ['red', 'blue']
+ >>> M = nx.attr_sparse_matrix(G, node_attr='color', \
+ normalized=True, rc_order=rc)
+ >>> M.todense()
+ matrix([[ 0.33333333, 0.66666667],
+ [ 1. , 0. ]])
+
+ For example, the above tells us that for all edges (u,v):
+
+ Pr( v is red | u is red) = 1/3
+ Pr( v is blue | u is red) = 2/3
+
+ Pr( v is red | u is blue) = 1
+ Pr( v is blue | u is blue) = 0
+
+ Finally, we can obtain the total weights listed by the node colors.
+
+ >>> M = nx.attr_sparse_matrix(G, edge_attr='weight',\
+ node_attr='color', rc_order=rc)
+ >>> M.todense()
+ matrix([[ 3., 2.],
+ [ 2., 0.]])
+
+ Thus, the total weight over all edges (u,v) with u and v having colors:
+
+ (red, red) is 3 # the sole contribution is from edge (0,1)
+ (red, blue) is 2 # contributions from edges (0,2) and (1,2)
+ (blue, red) is 2 # same as (red, blue) since graph is undirected
+ (blue, blue) is 0 # there are no edges with blue endpoints
+
+ """
+ try:
+ import numpy as np
+ from scipy import sparse
+ except ImportError:
+ raise ImportError(
+ "attr_sparse_matrix() requires scipy: http://scipy.org/ ")
+
+ edge_value = _edge_value(G, edge_attr)
+ node_value = _node_value(G, node_attr)
+
+ if rc_order is None:
+ ordering = list(set([node_value(n) for n in G]))
+ else:
+ ordering = rc_order
+
+ N = len(ordering)
+ undirected = not G.is_directed()
+ index = dict(zip(ordering, range(N)))
+ M = sparse.lil_matrix((N,N), dtype=dtype)
+
+ seen = set([])
+ for u,nbrdict in G.adjacency_iter():
+ for v in nbrdict:
+ # Obtain the node attribute values.
+ i, j = index[node_value(u)], index[node_value(v)]
+ if v not in seen:
+ M[i,j] += edge_value(u,v)
+ if undirected:
+ M[j,i] = M[i,j]
+
+ if undirected:
+ seen.add(u)
+
+ if normalized:
+ norms = np.asarray(M.sum(axis=1)).ravel()
+ for i,norm in enumerate(norms):
+ M[i,:] /= norm
+
+ if rc_order is None:
+ return M, ordering
+ else:
+ return M
+
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import numpy
+ except:
+ raise SkipTest("NumPy not available")
+ try:
+ import scipy
+ except:
+ raise SkipTest("SciPy not available")
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/linalg/graphmatrix.py b/lib/python2.7/site-packages/setoolsgui/networkx/linalg/graphmatrix.py
new file mode 100644
index 0000000..c677619
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/linalg/graphmatrix.py
@@ -0,0 +1,156 @@
+"""
+Adjacency matrix and incidence matrix of graphs.
+"""
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Dan Schult(dschult@colgate.edu)'])
+
+__all__ = ['incidence_matrix',
+ 'adj_matrix', 'adjacency_matrix',
+ ]
+
+
+def incidence_matrix(G, nodelist=None, edgelist=None,
+ oriented=False, weight=None):
+ """Return incidence matrix of G.
+
+ The incidence matrix assigns each row to a node and each column to an edge.
+ For a standard incidence matrix a 1 appears wherever a row's node is
+ incident on the column's edge. For an oriented incidence matrix each
+ edge is assigned an orientation (arbitrarily for undirected and aligning to
+ direction for directed). A -1 appears for the tail of an edge and 1
+ for the head of the edge. The elements are zero otherwise.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ nodelist : list, optional (default= all nodes in G)
+ The rows are ordered according to the nodes in nodelist.
+ If nodelist is None, then the ordering is produced by G.nodes().
+
+ edgelist : list, optional (default= all edges in G)
+ The columns are ordered according to the edges in edgelist.
+ If edgelist is None, then the ordering is produced by G.edges().
+
+ oriented: bool, optional (default=False)
+ If True, matrix elements are +1 or -1 for the head or tail node
+ respectively of each edge. If False, +1 occurs at both nodes.
+
+ weight : string or None, optional (default=None)
+ The edge data key used to provide each value in the matrix.
+ If None, then each edge has weight 1. Edge weights, if used,
+ should be positive so that the orientation can provide the sign.
+
+ Returns
+ -------
+ A : NumPy matrix
+ The incidence matrix of G.
+
+ Notes
+ -----
+ For MultiGraph/MultiDiGraph, the edges in edgelist should be
+ (u,v,key) 3-tuples.
+
+ "Networks are the best discrete model for so many problems in
+ applied mathematics" [1]_.
+
+ References
+ ----------
+ .. [1] Gil Strang, Network applications: A = incidence matrix,
+ http://academicearth.org/lectures/network-applications-incidence-matrix
+ """
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError(
+ "incidence_matrix() requires numpy: http://scipy.org/ ")
+ if nodelist is None:
+ nodelist = G.nodes()
+ if edgelist is None:
+ if G.is_multigraph():
+ edgelist = G.edges(keys=True)
+ else:
+ edgelist = G.edges()
+ A = np.zeros((len(nodelist),len(edgelist)))
+ node_index = dict( (node,i) for i,node in enumerate(nodelist) )
+ for ei,e in enumerate(edgelist):
+ (u,v) = e[:2]
+ if u == v: continue # self loops give zero column
+ try:
+ ui = node_index[u]
+ vi = node_index[v]
+ except KeyError:
+ raise NetworkXError('node %s or %s in edgelist '
+ 'but not in nodelist"%(u,v)')
+ if weight is None:
+ wt = 1
+ else:
+ if G.is_multigraph():
+ ekey = e[2]
+ wt = G[u][v][ekey].get(weight,1)
+ else:
+ wt = G[u][v].get(weight,1)
+ if oriented:
+ A[ui,ei] = -wt
+ A[vi,ei] = wt
+ else:
+ A[ui,ei] = wt
+ A[vi,ei] = wt
+ return np.asmatrix(A)
+
+def adjacency_matrix(G, nodelist=None, weight='weight'):
+ """Return adjacency matrix of G.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ nodelist : list, optional
+ The rows and columns are ordered according to the nodes in nodelist.
+ If nodelist is None, then the ordering is produced by G.nodes().
+
+ weight : string or None, optional (default='weight')
+ The edge data key used to provide each value in the matrix.
+ If None, then each edge has weight 1.
+
+ Returns
+ -------
+ A : numpy matrix
+ Adjacency matrix representation of G.
+
+ Notes
+ -----
+ If you want a pure Python adjacency matrix representation try
+ networkx.convert.to_dict_of_dicts which will return a
+ dictionary-of-dictionaries format that can be addressed as a
+ sparse matrix.
+
+ For MultiGraph/MultiDiGraph, the edges weights are summed.
+ See to_numpy_matrix for other options.
+
+ See Also
+ --------
+ to_numpy_matrix
+ to_dict_of_dicts
+ """
+ return nx.to_numpy_matrix(G,nodelist=nodelist,weight=weight)
+
+adj_matrix=adjacency_matrix
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import numpy
+ except:
+ raise SkipTest("NumPy not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/linalg/laplacianmatrix.py b/lib/python2.7/site-packages/setoolsgui/networkx/linalg/laplacianmatrix.py
new file mode 100644
index 0000000..ffb256c
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/linalg/laplacianmatrix.py
@@ -0,0 +1,277 @@
+"""
+Laplacian matrix of graphs.
+"""
+# Copyright (C) 2004-2013 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+from networkx.utils import require, not_implemented_for
+
+__author__ = "\n".join(['Aric Hagberg <aric.hagberg@gmail.com>',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Dan Schult (dschult@colgate.edu)',
+ 'Alejandro Weinstein <alejandro.weinstein@gmail.com>'])
+
+__all__ = ['laplacian_matrix',
+ 'normalized_laplacian_matrix',
+ 'directed_laplacian_matrix']
+
+@require('numpy')
+@not_implemented_for('directed')
+def laplacian_matrix(G, nodelist=None, weight='weight'):
+ """Return the Laplacian matrix of G.
+
+ The graph Laplacian is the matrix L = D - A, where
+ A is the adjacency matrix and D is the diagonal matrix of node degrees.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ nodelist : list, optional
+ The rows and columns are ordered according to the nodes in nodelist.
+ If nodelist is None, then the ordering is produced by G.nodes().
+
+ weight : string or None, optional (default='weight')
+ The edge data key used to compute each value in the matrix.
+ If None, then each edge has weight 1.
+
+ Returns
+ -------
+ L : NumPy matrix
+ The Laplacian matrix of G.
+
+ Notes
+ -----
+ For MultiGraph/MultiDiGraph, the edges weights are summed.
+ See to_numpy_matrix for other options.
+
+ See Also
+ --------
+ to_numpy_matrix
+ normalized_laplacian_matrix
+ """
+ import numpy as np
+ if nodelist is None:
+ nodelist = G.nodes()
+ if G.is_multigraph():
+ # this isn't the fastest way to do this...
+ A = np.asarray(nx.to_numpy_matrix(G,nodelist=nodelist,weight=weight))
+ I = np.identity(A.shape[0])
+ D = I*np.sum(A,axis=1)
+ L = D - A
+ else:
+ # Graph or DiGraph, this is faster than above
+ n = len(nodelist)
+ index = dict( (n,i) for i,n in enumerate(nodelist) )
+ L = np.zeros((n,n))
+ for ui,u in enumerate(nodelist):
+ totalwt = 0.0
+ for v,d in G[u].items():
+ try:
+ vi = index[v]
+ except KeyError:
+ continue
+ wt = d.get(weight,1)
+ L[ui,vi] = -wt
+ totalwt += wt
+ L[ui,ui] = totalwt
+ return np.asmatrix(L)
+
+@require('numpy')
+@not_implemented_for('directed')
+def normalized_laplacian_matrix(G, nodelist=None, weight='weight'):
+ r"""Return the normalized Laplacian matrix of G.
+
+ The normalized graph Laplacian is the matrix
+
+ .. math::
+
+ NL = D^{-1/2} L D^{-1/2}
+
+ where `L` is the graph Laplacian and `D` is the diagonal matrix of
+ node degrees.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ nodelist : list, optional
+ The rows and columns are ordered according to the nodes in nodelist.
+ If nodelist is None, then the ordering is produced by G.nodes().
+
+ weight : string or None, optional (default='weight')
+ The edge data key used to compute each value in the matrix.
+ If None, then each edge has weight 1.
+
+ Returns
+ -------
+ L : NumPy matrix
+ The normalized Laplacian matrix of G.
+
+ Notes
+ -----
+ For MultiGraph/MultiDiGraph, the edges weights are summed.
+ See to_numpy_matrix for other options.
+
+ If the Graph contains selfloops, D is defined as diag(sum(A,1)), where A is
+ the adjencency matrix [2]_.
+
+ See Also
+ --------
+ laplacian_matrix
+
+ References
+ ----------
+ .. [1] Fan Chung-Graham, Spectral Graph Theory,
+ CBMS Regional Conference Series in Mathematics, Number 92, 1997.
+ .. [2] Steve Butler, Interlacing For Weighted Graphs Using The Normalized
+ Laplacian, Electronic Journal of Linear Algebra, Volume 16, pp. 90-98,
+ March 2007.
+ """
+ import numpy as np
+ if G.is_multigraph():
+ L = laplacian_matrix(G, nodelist=nodelist, weight=weight)
+ D = np.diag(L)
+ elif G.number_of_selfloops() == 0:
+ L = laplacian_matrix(G, nodelist=nodelist, weight=weight)
+ D = np.diag(L)
+ else:
+ A = np.array(nx.adj_matrix(G))
+ D = np.sum(A, 1)
+ L = np.diag(D) - A
+
+ # Handle div by 0. It happens if there are unconnected nodes
+ with np.errstate(divide='ignore'):
+ Disqrt = np.diag(1 / np.sqrt(D))
+ Disqrt[np.isinf(Disqrt)] = 0
+ Ln = np.dot(Disqrt, np.dot(L,Disqrt))
+ return Ln
+
+###############################################################################
+# Code based on
+# https://bitbucket.org/bedwards/networkx-community/src/370bd69fc02f/networkx/algorithms/community/
+
+@require('numpy')
+@not_implemented_for('undirected')
+@not_implemented_for('multigraph')
+def directed_laplacian_matrix(G, nodelist=None, weight='weight',
+ walk_type=None, alpha=0.95):
+ r"""Return the directed Laplacian matrix of G.
+
+ The graph directed Laplacian is the matrix
+
+ .. math::
+
+ L = I - (\Phi^{1/2} P \Phi^{-1/2} + \Phi^{-1/2} P^T \Phi^{1/2} ) / 2
+
+ where `I` is the identity matrix, `P` is the transition matrix of the
+ graph, and `\Phi` a matrix with the Perron vector of `P` in the diagonal and
+ zeros elsewhere.
+
+ Depending on the value of walk_type, `P` can be the transition matrix
+ induced by a random walk, a lazy random walk, or a random walk with
+ teleportation (PageRank).
+
+ Parameters
+ ----------
+ G : DiGraph
+ A NetworkX graph
+
+ nodelist : list, optional
+ The rows and columns are ordered according to the nodes in nodelist.
+ If nodelist is None, then the ordering is produced by G.nodes().
+
+ weight : string or None, optional (default='weight')
+ The edge data key used to compute each value in the matrix.
+ If None, then each edge has weight 1.
+
+ walk_type : string or None, optional (default=None)
+ If None, `P` is selected depending on the properties of the
+ graph. Otherwise is one of 'random', 'lazy', or 'pagerank'
+
+ alpha : real
+ (1 - alpha) is the teleportation probability used with pagerank
+
+ Returns
+ -------
+ L : NumPy array
+ Normalized Laplacian of G.
+
+ Raises
+ ------
+ NetworkXError
+ If NumPy cannot be imported
+
+ NetworkXNotImplemnted
+ If G is not a DiGraph
+
+ Notes
+ -----
+ Only implemented for DiGraphs
+
+ See Also
+ --------
+ laplacian_matrix
+
+ References
+ ----------
+ .. [1] Fan Chung (2005).
+ Laplacians and the Cheeger inequality for directed graphs.
+ Annals of Combinatorics, 9(1), 2005
+ """
+ import numpy as np
+ if walk_type is None:
+ if nx.is_strongly_connected(G):
+ if nx.is_aperiodic(G):
+ walk_type = "random"
+ else:
+ walk_type = "lazy"
+ else:
+ walk_type = "pagerank"
+
+ M = nx.to_numpy_matrix(G, nodelist=nodelist, weight=weight)
+ n, m = M.shape
+ if walk_type in ["random", "lazy"]:
+ DI = np.diagflat(1.0 / np.sum(M, axis=1))
+ if walk_type == "random":
+ P = DI * M
+ else:
+ I = np.identity(n)
+ P = (I + DI * M) / 2.0
+ elif walk_type == "pagerank":
+ if not (0 < alpha < 1):
+ raise nx.NetworkXError('alpha must be between 0 and 1')
+ # add constant to dangling nodes' row
+ dangling = np.where(M.sum(axis=1) == 0)
+ for d in dangling[0]:
+ M[d] = 1.0 / n
+ # normalize
+ M = M / M.sum(axis=1)
+ P = alpha * M + (1 - alpha) / n
+ else:
+ raise nx.NetworkXError("walk_type must be random, lazy, or pagerank")
+
+ evals, evecs = np.linalg.eig(P.T)
+ index = evals.argsort()[-1] # index of largest eval,evec
+ # eigenvector of largest eigenvalue at ind[-1]
+ v = np.array(evecs[:,index]).flatten().real
+ p = v / v.sum()
+ sp = np.sqrt(p)
+ Q = np.diag(sp) * P * np.diag(1.0/sp)
+ I = np.identity(len(G))
+
+ return I - (Q + Q.T) /2.0
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import numpy
+ except:
+ raise SkipTest("NumPy not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/linalg/spectrum.py b/lib/python2.7/site-packages/setoolsgui/networkx/linalg/spectrum.py
new file mode 100644
index 0000000..bca7288
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/linalg/spectrum.py
@@ -0,0 +1,90 @@
+"""
+Eigenvalue spectrum of graphs.
+"""
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Dan Schult(dschult@colgate.edu)'])
+
+__all__ = ['laplacian_spectrum', 'adjacency_spectrum']
+
+
+def laplacian_spectrum(G, weight='weight'):
+ """Return eigenvalues of the Laplacian of G
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ weight : string or None, optional (default='weight')
+ The edge data key used to compute each value in the matrix.
+ If None, then each edge has weight 1.
+
+ Returns
+ -------
+ evals : NumPy array
+ Eigenvalues
+
+ Notes
+ -----
+ For MultiGraph/MultiDiGraph, the edges weights are summed.
+ See to_numpy_matrix for other options.
+
+ See Also
+ --------
+ laplacian_matrix
+ """
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError(
+ "laplacian_spectrum() requires NumPy: http://scipy.org/ ")
+ return np.linalg.eigvals(nx.laplacian_matrix(G,weight=weight))
+
+def adjacency_spectrum(G, weight='weight'):
+ """Return eigenvalues of the adjacency matrix of G.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ weight : string or None, optional (default='weight')
+ The edge data key used to compute each value in the matrix.
+ If None, then each edge has weight 1.
+
+ Returns
+ -------
+ evals : NumPy array
+ Eigenvalues
+
+ Notes
+ -----
+ For MultiGraph/MultiDiGraph, the edges weights are summed.
+ See to_numpy_matrix for other options.
+
+ See Also
+ --------
+ adjacency_matrix
+ """
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError(
+ "adjacency_spectrum() requires NumPy: http://scipy.org/ ")
+ return np.linalg.eigvals(nx.adjacency_matrix(G,weight=weight))
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import numpy
+ except:
+ raise SkipTest("NumPy not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/linalg/tests/test_graphmatrix.py b/lib/python2.7/site-packages/setoolsgui/networkx/linalg/tests/test_graphmatrix.py
new file mode 100644
index 0000000..bba234e
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/linalg/tests/test_graphmatrix.py
@@ -0,0 +1,89 @@
+from nose import SkipTest
+
+import networkx as nx
+from networkx.generators.degree_seq import havel_hakimi_graph
+
+class TestGraphMatrix(object):
+ numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
+ @classmethod
+ def setupClass(cls):
+ global numpy
+ global assert_equal
+ global assert_almost_equal
+ try:
+ import numpy
+ from numpy.testing import assert_equal,assert_almost_equal
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+ def setUp(self):
+ deg=[3,2,2,1,0]
+ self.G=havel_hakimi_graph(deg)
+ self.OI=numpy.array([[-1, -1, -1, 0],
+ [1, 0, 0, -1],
+ [0, 1, 0, 1],
+ [0, 0, 1, 0],
+ [0, 0, 0, 0]])
+ self.A=numpy.array([[0, 1, 1, 1, 0],
+ [1, 0, 1, 0, 0],
+ [1, 1, 0, 0, 0],
+ [1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0]])
+ self.WG=nx.Graph( (u,v,{'weight':0.5,'other':0.3})
+ for (u,v) in self.G.edges_iter() )
+ self.WG.add_node(4)
+ self.WA=numpy.array([[0 , 0.5, 0.5, 0.5, 0],
+ [0.5, 0 , 0.5, 0 , 0],
+ [0.5, 0.5, 0 , 0 , 0],
+ [0.5, 0 , 0 , 0 , 0],
+ [0 , 0 , 0 , 0 , 0]])
+ self.MG=nx.MultiGraph(self.G)
+ self.MG2=self.MG.copy()
+ self.MG2.add_edge(0,1)
+ self.MG2A=numpy.array([[0, 2, 1, 1, 0],
+ [2, 0, 1, 0, 0],
+ [1, 1, 0, 0, 0],
+ [1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0]])
+ self.MGOI=numpy.array([[-1, -1, -1, -1, 0],
+ [1, 1, 0, 0, -1],
+ [0, 0, 1, 0, 1],
+ [0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0]])
+
+ def test_incidence_matrix(self):
+ "Conversion to incidence matrix"
+ assert_equal(nx.incidence_matrix(self.G,oriented=True),self.OI)
+ assert_equal(nx.incidence_matrix(self.G),numpy.abs(self.OI))
+ assert_equal(nx.incidence_matrix(self.MG,oriented=True),self.OI)
+ assert_equal(nx.incidence_matrix(self.MG),numpy.abs(self.OI))
+ assert_equal(nx.incidence_matrix(self.MG2,oriented=True),self.MGOI)
+ assert_equal(nx.incidence_matrix(self.MG2),numpy.abs(self.MGOI))
+ assert_equal(nx.incidence_matrix(self.WG,oriented=True),self.OI)
+ assert_equal(nx.incidence_matrix(self.WG),numpy.abs(self.OI))
+ assert_equal(nx.incidence_matrix(self.WG,oriented=True,
+ weight='weight'),0.5*self.OI)
+ assert_equal(nx.incidence_matrix(self.WG,weight='weight'),
+ numpy.abs(0.5*self.OI))
+ assert_equal(nx.incidence_matrix(self.WG,oriented=True,weight='other'),
+ 0.3*self.OI)
+ WMG=nx.MultiGraph(self.WG)
+ WMG.add_edge(0,1,attr_dict={'weight':0.5,'other':0.3})
+ assert_equal(nx.incidence_matrix(WMG,weight='weight'),
+ numpy.abs(0.5*self.MGOI))
+ assert_equal(nx.incidence_matrix(WMG,weight='weight',oriented=True),
+ 0.5*self.MGOI)
+ assert_equal(nx.incidence_matrix(WMG,weight='other',oriented=True),
+ 0.3*self.MGOI)
+
+ def test_adjacency_matrix(self):
+ "Conversion to adjacency matrix"
+ assert_equal(nx.adj_matrix(self.G),self.A)
+ assert_equal(nx.adj_matrix(self.MG),self.A)
+ assert_equal(nx.adj_matrix(self.MG2),self.MG2A)
+ assert_equal(nx.adj_matrix(self.G,nodelist=[0,1]),self.A[:2,:2])
+ assert_equal(nx.adj_matrix(self.WG),self.WA)
+ assert_equal(nx.adj_matrix(self.WG,weight=None),self.A)
+ assert_equal(nx.adj_matrix(self.MG2,weight=None),self.MG2A)
+ assert_equal(nx.adj_matrix(self.WG,weight='other'),0.6*self.WA)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/linalg/tests/test_laplacian.py b/lib/python2.7/site-packages/setoolsgui/networkx/linalg/tests/test_laplacian.py
new file mode 100644
index 0000000..87725fe
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/linalg/tests/test_laplacian.py
@@ -0,0 +1,101 @@
+from nose import SkipTest
+
+import networkx as nx
+from networkx.generators.degree_seq import havel_hakimi_graph
+
+class TestLaplacian(object):
+ numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
+ @classmethod
+ def setupClass(cls):
+ global numpy
+ global assert_equal
+ global assert_almost_equal
+ try:
+ import numpy
+ from numpy.testing import assert_equal,assert_almost_equal
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+ def setUp(self):
+ deg=[3,2,2,1,0]
+ self.G=havel_hakimi_graph(deg)
+ self.WG=nx.Graph( (u,v,{'weight':0.5,'other':0.3})
+ for (u,v) in self.G.edges_iter() )
+ self.WG.add_node(4)
+ self.MG=nx.MultiGraph(self.G)
+
+ # Graph with selfloops
+ self.Gsl = self.G.copy()
+ for node in self.Gsl.nodes():
+ self.Gsl.add_edge(node, node)
+
+
+ def test_laplacian(self):
+ "Graph Laplacian"
+ NL=numpy.array([[ 3, -1, -1, -1, 0],
+ [-1, 2, -1, 0, 0],
+ [-1, -1, 2, 0, 0],
+ [-1, 0, 0, 1, 0],
+ [ 0, 0, 0, 0, 0]])
+ WL=0.5*NL
+ OL=0.3*NL
+ assert_equal(nx.laplacian_matrix(self.G),NL)
+ assert_equal(nx.laplacian_matrix(self.MG),NL)
+ assert_equal(nx.laplacian_matrix(self.G,nodelist=[0,1]),
+ numpy.array([[ 1, -1],[-1, 1]]))
+ assert_equal(nx.laplacian_matrix(self.WG),WL)
+ assert_equal(nx.laplacian_matrix(self.WG,weight=None),NL)
+ assert_equal(nx.laplacian_matrix(self.WG,weight='other'),OL)
+
+ def test_normalized_laplacian(self):
+ "Generalized Graph Laplacian"
+ GL=numpy.array([[ 1.00, -0.408, -0.408, -0.577, 0.00],
+ [-0.408, 1.00, -0.50, 0.00 , 0.00],
+ [-0.408, -0.50, 1.00, 0.00, 0.00],
+ [-0.577, 0.00, 0.00, 1.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00]])
+ Lsl = numpy.array([[ 0.75 , -0.2887, -0.2887, -0.3536, 0.],
+ [-0.2887, 0.6667, -0.3333, 0. , 0.],
+ [-0.2887, -0.3333, 0.6667, 0. , 0.],
+ [-0.3536, 0. , 0. , 0.5 , 0.],
+ [ 0. , 0. , 0. , 0. , 0.]])
+
+ assert_almost_equal(nx.normalized_laplacian_matrix(self.G),GL,decimal=3)
+ assert_almost_equal(nx.normalized_laplacian_matrix(self.MG),GL,decimal=3)
+ assert_almost_equal(nx.normalized_laplacian_matrix(self.WG),GL,decimal=3)
+ assert_almost_equal(nx.normalized_laplacian_matrix(self.WG,weight='other'),GL,decimal=3)
+ assert_almost_equal(nx.normalized_laplacian_matrix(self.Gsl), Lsl, decimal=3)
+
+ def test_directed_laplacian(self):
+ "Directed Laplacian"
+ # Graph used as an example in Sec. 4.1 of Langville and Meyer,
+ # "Google's PageRank and Beyond". The graph contains dangling nodes, so
+ # the pagerank random walk is selected by directed_laplacian
+ G = nx.DiGraph()
+ G.add_edges_from(((1,2), (1,3), (3,1), (3,2), (3,5), (4,5), (4,6),
+ (5,4), (5,6), (6,4)))
+ GL = numpy.array([[ 0.9833, -0.2941, -0.3882, -0.0291, -0.0231, -0.0261],
+ [-0.2941, 0.8333, -0.2339, -0.0536, -0.0589, -0.0554],
+ [-0.3882, -0.2339, 0.9833, -0.0278, -0.0896, -0.0251],
+ [-0.0291, -0.0536, -0.0278, 0.9833, -0.4878, -0.6675],
+ [-0.0231, -0.0589, -0.0896, -0.4878, 0.9833, -0.2078],
+ [-0.0261, -0.0554, -0.0251, -0.6675, -0.2078, 0.9833]])
+ assert_almost_equal(nx.directed_laplacian_matrix(G, alpha=0.9), GL, decimal=3)
+
+ # Make the graph strongly connected, so we can use a random and lazy walk
+ G.add_edges_from((((2,5), (6,1))))
+ GL = numpy.array([[ 1. , -0.3062, -0.4714, 0. , 0. , -0.3227],
+ [-0.3062, 1. , -0.1443, 0. , -0.3162, 0. ],
+ [-0.4714, -0.1443, 1. , 0. , -0.0913, 0. ],
+ [ 0. , 0. , 0. , 1. , -0.5 , -0.5 ],
+ [ 0. , -0.3162, -0.0913, -0.5 , 1. , -0.25 ],
+ [-0.3227, 0. , 0. , -0.5 , -0.25 , 1. ]])
+ assert_almost_equal(nx.directed_laplacian_matrix(G, walk_type='random'), GL, decimal=3)
+
+ GL = numpy.array([[ 0.5 , -0.1531, -0.2357, 0. , 0. , -0.1614],
+ [-0.1531, 0.5 , -0.0722, 0. , -0.1581, 0. ],
+ [-0.2357, -0.0722, 0.5 , 0. , -0.0456, 0. ],
+ [ 0. , 0. , 0. , 0.5 , -0.25 , -0.25 ],
+ [ 0. , -0.1581, -0.0456, -0.25 , 0.5 , -0.125 ],
+ [-0.1614, 0. , 0. , -0.25 , -0.125 , 0.5 ]])
+ assert_almost_equal(nx.directed_laplacian_matrix(G, walk_type='lazy'), GL, decimal=3)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/linalg/tests/test_spectrum.py b/lib/python2.7/site-packages/setoolsgui/networkx/linalg/tests/test_spectrum.py
new file mode 100644
index 0000000..a2961cb
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/linalg/tests/test_spectrum.py
@@ -0,0 +1,44 @@
+from nose import SkipTest
+
+import networkx as nx
+from networkx.generators.degree_seq import havel_hakimi_graph
+
+class TestSpectrum(object):
+ numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
+ @classmethod
+ def setupClass(cls):
+ global numpy
+ global assert_equal
+ global assert_almost_equal
+ try:
+ import numpy
+ from numpy.testing import assert_equal,assert_almost_equal
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+ def setUp(self):
+ deg=[3,2,2,1,0]
+ self.G=havel_hakimi_graph(deg)
+ self.P=nx.path_graph(3)
+ self.WG=nx.Graph( (u,v,{'weight':0.5,'other':0.3})
+ for (u,v) in self.G.edges_iter() )
+ self.WG.add_node(4)
+
+ def test_laplacian_spectrum(self):
+ "Laplacian eigenvalues"
+ evals=numpy.array([0, 0, 1, 3, 4])
+ e=sorted(nx.laplacian_spectrum(self.G))
+ assert_almost_equal(e,evals)
+ e=sorted(nx.laplacian_spectrum(self.WG,weight=None))
+ assert_almost_equal(e,evals)
+ e=sorted(nx.laplacian_spectrum(self.WG))
+ assert_almost_equal(e,0.5*evals)
+ e=sorted(nx.laplacian_spectrum(self.WG,weight='other'))
+ assert_almost_equal(e,0.3*evals)
+
+ def test_adjacency_spectrum(self):
+ "Adjacency eigenvalues"
+ evals=numpy.array([-numpy.sqrt(2), 0, numpy.sqrt(2)])
+ e=sorted(nx.adjacency_spectrum(self.P))
+ assert_almost_equal(e,evals)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/__init__.py
new file mode 100644
index 0000000..c806cd0
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/__init__.py
@@ -0,0 +1,16 @@
+"""
+A package for reading and writing graphs in various formats.
+
+"""
+from networkx.readwrite.adjlist import *
+from networkx.readwrite.multiline_adjlist import *
+from networkx.readwrite.edgelist import *
+from networkx.readwrite.gpickle import *
+from networkx.readwrite.pajek import *
+from networkx.readwrite.leda import *
+from networkx.readwrite.sparsegraph6 import *
+from networkx.readwrite.nx_yaml import *
+from networkx.readwrite.gml import *
+from networkx.readwrite.graphml import *
+from networkx.readwrite.gexf import *
+from networkx.readwrite.nx_shp import *
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/adjlist.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/adjlist.py
new file mode 100644
index 0000000..57f1e24
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/adjlist.py
@@ -0,0 +1,314 @@
+# -*- coding: utf-8 -*-
+"""
+**************
+Adjacency List
+**************
+Read and write NetworkX graphs as adjacency lists.
+
+Adjacency list format is useful for graphs without data associated
+with nodes or edges and for nodes that can be meaningfully represented
+as strings.
+
+Format
+------
+The adjacency list format consists of lines with node labels. The
+first label in a line is the source node. Further labels in the line
+are considered target nodes and are added to the graph along with an edge
+between the source node and target node.
+
+The graph with edges a-b, a-c, d-e can be represented as the following
+adjacency list (anything following the # in a line is a comment)::
+
+ a b c # source target target
+ d e
+"""
+__author__ = '\n'.join(['Aric Hagberg <hagberg@lanl.gov>',
+ 'Dan Schult <dschult@colgate.edu>',
+ 'Loïc Séguin-C. <loicseguin@gmail.com>'])
+# Copyright (C) 2004-2013 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+__all__ = ['generate_adjlist',
+ 'write_adjlist',
+ 'parse_adjlist',
+ 'read_adjlist']
+
+from networkx.utils import make_str, open_file
+import networkx as nx
+
+
+def generate_adjlist(G, delimiter = ' '):
+ """Generate a single line of the graph G in adjacency list format.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ delimiter : string, optional
+ Separator for node labels
+
+ Returns
+ -------
+ lines : string
+ Lines of data in adjlist format.
+
+ Examples
+ --------
+ >>> G = nx.lollipop_graph(4, 3)
+ >>> for line in nx.generate_adjlist(G):
+ ... print(line)
+ 0 1 2 3
+ 1 2 3
+ 2 3
+ 3 4
+ 4 5
+ 5 6
+ 6
+
+ See Also
+ --------
+ write_adjlist, read_adjlist
+
+ """
+ directed=G.is_directed()
+ seen=set()
+ for s,nbrs in G.adjacency_iter():
+ line = make_str(s)+delimiter
+ for t,data in nbrs.items():
+ if not directed and t in seen:
+ continue
+ if G.is_multigraph():
+ for d in data.values():
+ line += make_str(t) + delimiter
+ else:
+ line += make_str(t) + delimiter
+ if not directed:
+ seen.add(s)
+ yield line[:-len(delimiter)]
+
+@open_file(1,mode='wb')
+def write_adjlist(G, path, comments="#", delimiter=' ', encoding = 'utf-8'):
+ """Write graph G in single-line adjacency-list format to path.
+
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ path : string or file
+ Filename or file handle for data output.
+ Filenames ending in .gz or .bz2 will be compressed.
+
+ comments : string, optional
+ Marker for comment lines
+
+ delimiter : string, optional
+ Separator for node labels
+
+ encoding : string, optional
+ Text encoding.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> nx.write_adjlist(G,"test.adjlist")
+
+ The path can be a filehandle or a string with the name of the file. If a
+ filehandle is provided, it has to be opened in 'wb' mode.
+
+ >>> fh=open("test.adjlist",'wb')
+ >>> nx.write_adjlist(G, fh)
+
+ Notes
+ -----
+ This format does not store graph, node, or edge data.
+
+ See Also
+ --------
+ read_adjlist, generate_adjlist
+ """
+ import sys
+ import time
+ pargs=comments + " ".join(sys.argv) + '\n'
+ header = (pargs
+ + comments + " GMT %s\n" % (time.asctime(time.gmtime()))
+ + comments + " %s\n" % (G.name))
+ path.write(header.encode(encoding))
+
+ for line in generate_adjlist(G, delimiter):
+ line+='\n'
+ path.write(line.encode(encoding))
+
+
+def parse_adjlist(lines, comments = '#', delimiter = None,
+ create_using = None, nodetype = None):
+ """Parse lines of a graph adjacency list representation.
+
+ Parameters
+ ----------
+ lines : list or iterator of strings
+ Input data in adjlist format
+
+ create_using: NetworkX graph container
+ Use given NetworkX graph for holding nodes or edges.
+
+ nodetype : Python type, optional
+ Convert nodes to this type.
+
+ comments : string, optional
+ Marker for comment lines
+
+ delimiter : string, optional
+ Separator for node labels. The default is whitespace.
+
+ create_using: NetworkX graph container
+ Use given NetworkX graph for holding nodes or edges.
+
+
+ Returns
+ -------
+ G: NetworkX graph
+ The graph corresponding to the lines in adjacency list format.
+
+ Examples
+ --------
+ >>> lines = ['1 2 5',
+ ... '2 3 4',
+ ... '3 5',
+ ... '4',
+ ... '5']
+ >>> G = nx.parse_adjlist(lines, nodetype = int)
+ >>> G.nodes()
+ [1, 2, 3, 4, 5]
+ >>> G.edges()
+ [(1, 2), (1, 5), (2, 3), (2, 4), (3, 5)]
+
+ See Also
+ --------
+ read_adjlist
+
+ """
+ if create_using is None:
+ G=nx.Graph()
+ else:
+ try:
+ G=create_using
+ G.clear()
+ except:
+ raise TypeError("Input graph is not a NetworkX graph type")
+
+ for line in lines:
+ p=line.find(comments)
+ if p>=0:
+ line = line[:p]
+ if not len(line):
+ continue
+ vlist=line.strip().split(delimiter)
+ u=vlist.pop(0)
+ # convert types
+ if nodetype is not None:
+ try:
+ u=nodetype(u)
+ except:
+ raise TypeError("Failed to convert node (%s) to type %s"\
+ %(u,nodetype))
+ G.add_node(u)
+ if nodetype is not None:
+ try:
+ vlist=map(nodetype,vlist)
+ except:
+ raise TypeError("Failed to convert nodes (%s) to type %s"\
+ %(','.join(vlist),nodetype))
+ G.add_edges_from([(u, v) for v in vlist])
+ return G
+
+@open_file(0,mode='rb')
+def read_adjlist(path, comments="#", delimiter=None, create_using=None,
+ nodetype=None, encoding = 'utf-8'):
+ """Read graph in adjacency list format from path.
+
+ Parameters
+ ----------
+ path : string or file
+ Filename or file handle to read.
+ Filenames ending in .gz or .bz2 will be uncompressed.
+
+ create_using: NetworkX graph container
+ Use given NetworkX graph for holding nodes or edges.
+
+ nodetype : Python type, optional
+ Convert nodes to this type.
+
+ comments : string, optional
+ Marker for comment lines
+
+ delimiter : string, optional
+ Separator for node labels. The default is whitespace.
+
+ create_using: NetworkX graph container
+ Use given NetworkX graph for holding nodes or edges.
+
+
+ Returns
+ -------
+ G: NetworkX graph
+ The graph corresponding to the lines in adjacency list format.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> nx.write_adjlist(G, "test.adjlist")
+ >>> G=nx.read_adjlist("test.adjlist")
+
+ The path can be a filehandle or a string with the name of the file. If a
+ filehandle is provided, it has to be opened in 'rb' mode.
+
+ >>> fh=open("test.adjlist", 'rb')
+ >>> G=nx.read_adjlist(fh)
+
+ Filenames ending in .gz or .bz2 will be compressed.
+
+ >>> nx.write_adjlist(G,"test.adjlist.gz")
+ >>> G=nx.read_adjlist("test.adjlist.gz")
+
+ The optional nodetype is a function to convert node strings to nodetype.
+
+ For example
+
+ >>> G=nx.read_adjlist("test.adjlist", nodetype=int)
+
+ will attempt to convert all nodes to integer type.
+
+ Since nodes must be hashable, the function nodetype must return hashable
+ types (e.g. int, float, str, frozenset - or tuples of those, etc.)
+
+ The optional create_using parameter is a NetworkX graph container.
+ The default is Graph(), an undirected graph. To read the data as
+ a directed graph use
+
+ >>> G=nx.read_adjlist("test.adjlist", create_using=nx.DiGraph())
+
+ Notes
+ -----
+ This format does not store graph or node data.
+
+ See Also
+ --------
+ write_adjlist
+ """
+ lines = (line.decode(encoding) for line in path)
+ return parse_adjlist(lines,
+ comments = comments,
+ delimiter = delimiter,
+ create_using = create_using,
+ nodetype = nodetype)
+
+# fixture for nose tests
+def teardown_module(module):
+ import os
+ os.unlink('test.adjlist')
+ os.unlink('test.adjlist.gz')
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/edgelist.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/edgelist.py
new file mode 100644
index 0000000..4a1aea9
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/edgelist.py
@@ -0,0 +1,464 @@
+"""
+**********
+Edge Lists
+**********
+Read and write NetworkX graphs as edge lists.
+
+The multi-line adjacency list format is useful for graphs with nodes
+that can be meaningfully represented as strings. With the edgelist
+format simple edge data can be stored but node or graph data is not.
+There is no way of representing isolated nodes unless the node has a
+self-loop edge.
+
+Format
+------
+You can read or write three formats of edge lists with these functions.
+
+Node pairs with no data::
+
+ 1 2
+
+Python dictionary as data::
+
+ 1 2 {'weight':7, 'color':'green'}
+
+Arbitrary data::
+
+ 1 2 7 green
+"""
+__author__ = """Aric Hagberg (hagberg@lanl.gov)\nDan Schult (dschult@colgate.edu)"""
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+__all__ = ['generate_edgelist',
+ 'write_edgelist',
+ 'parse_edgelist',
+ 'read_edgelist',
+ 'read_weighted_edgelist',
+ 'write_weighted_edgelist']
+
+from networkx.utils import open_file, make_str
+import networkx as nx
+
+def generate_edgelist(G, delimiter=' ', data=True):
+ """Generate a single line of the graph G in edge list format.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ delimiter : string, optional
+ Separator for node labels
+
+ data : bool or list of keys
+ If False generate no edge data. If True use a dictionary
+ representation of edge data. If a list of keys use a list of data
+ values corresponding to the keys.
+
+ Returns
+ -------
+ lines : string
+ Lines of data in adjlist format.
+
+ Examples
+ --------
+ >>> G = nx.lollipop_graph(4, 3)
+ >>> G[1][2]['weight'] = 3
+ >>> G[3][4]['capacity'] = 12
+ >>> for line in nx.generate_edgelist(G, data=False):
+ ... print(line)
+ 0 1
+ 0 2
+ 0 3
+ 1 2
+ 1 3
+ 2 3
+ 3 4
+ 4 5
+ 5 6
+
+ >>> for line in nx.generate_edgelist(G):
+ ... print(line)
+ 0 1 {}
+ 0 2 {}
+ 0 3 {}
+ 1 2 {'weight': 3}
+ 1 3 {}
+ 2 3 {}
+ 3 4 {'capacity': 12}
+ 4 5 {}
+ 5 6 {}
+
+ >>> for line in nx.generate_edgelist(G,data=['weight']):
+ ... print(line)
+ 0 1
+ 0 2
+ 0 3
+ 1 2 3
+ 1 3
+ 2 3
+ 3 4
+ 4 5
+ 5 6
+
+ See Also
+ --------
+ write_adjlist, read_adjlist
+ """
+ if data is True or data is False:
+ for e in G.edges(data=data):
+ yield delimiter.join(map(make_str,e))
+ else:
+ for u,v,d in G.edges(data=True):
+ e=[u,v]
+ try:
+ e.extend(d[k] for k in data)
+ except KeyError:
+ pass # missing data for this edge, should warn?
+ yield delimiter.join(map(make_str,e))
+
+@open_file(1,mode='wb')
+def write_edgelist(G, path, comments="#", delimiter=' ', data=True,
+ encoding = 'utf-8'):
+ """Write graph as a list of edges.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+ path : file or string
+ File or filename to write. If a file is provided, it must be
+ opened in 'wb' mode. Filenames ending in .gz or .bz2 will be compressed.
+ comments : string, optional
+ The character used to indicate the start of a comment
+ delimiter : string, optional
+ The string used to separate values. The default is whitespace.
+ data : bool or list, optional
+ If False write no edge data.
+ If True write a string representation of the edge data dictionary..
+ If a list (or other iterable) is provided, write the keys specified
+ in the list.
+ encoding: string, optional
+ Specify which encoding to use when writing file.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> nx.write_edgelist(G, "test.edgelist")
+ >>> G=nx.path_graph(4)
+ >>> fh=open("test.edgelist",'wb')
+ >>> nx.write_edgelist(G, fh)
+ >>> nx.write_edgelist(G, "test.edgelist.gz")
+ >>> nx.write_edgelist(G, "test.edgelist.gz", data=False)
+
+ >>> G=nx.Graph()
+ >>> G.add_edge(1,2,weight=7,color='red')
+ >>> nx.write_edgelist(G,'test.edgelist',data=False)
+ >>> nx.write_edgelist(G,'test.edgelist',data=['color'])
+ >>> nx.write_edgelist(G,'test.edgelist',data=['color','weight'])
+
+ See Also
+ --------
+ write_edgelist()
+ write_weighted_edgelist()
+ """
+
+ for line in generate_edgelist(G, delimiter, data):
+ line+='\n'
+ path.write(line.encode(encoding))
+
+def parse_edgelist(lines, comments='#', delimiter=None,
+ create_using=None, nodetype=None, data=True):
+ """Parse lines of an edge list representation of a graph.
+
+
+ Returns
+ -------
+ G: NetworkX Graph
+ The graph corresponding to lines
+ data : bool or list of (label,type) tuples
+ If False generate no edge data or if True use a dictionary
+ representation of edge data or a list tuples specifying dictionary
+ key names and types for edge data.
+ create_using: NetworkX graph container, optional
+ Use given NetworkX graph for holding nodes or edges.
+ nodetype : Python type, optional
+ Convert nodes to this type.
+ comments : string, optional
+ Marker for comment lines
+ delimiter : string, optional
+ Separator for node labels
+ create_using: NetworkX graph container
+ Use given NetworkX graph for holding nodes or edges.
+
+ Examples
+ --------
+ Edgelist with no data:
+
+ >>> lines = ["1 2",
+ ... "2 3",
+ ... "3 4"]
+ >>> G = nx.parse_edgelist(lines, nodetype = int)
+ >>> G.nodes()
+ [1, 2, 3, 4]
+ >>> G.edges()
+ [(1, 2), (2, 3), (3, 4)]
+
+ Edgelist with data in Python dictionary representation:
+
+ >>> lines = ["1 2 {'weight':3}",
+ ... "2 3 {'weight':27}",
+ ... "3 4 {'weight':3.0}"]
+ >>> G = nx.parse_edgelist(lines, nodetype = int)
+ >>> G.nodes()
+ [1, 2, 3, 4]
+ >>> G.edges(data = True)
+ [(1, 2, {'weight': 3}), (2, 3, {'weight': 27}), (3, 4, {'weight': 3.0})]
+
+ Edgelist with data in a list:
+
+ >>> lines = ["1 2 3",
+ ... "2 3 27",
+ ... "3 4 3.0"]
+ >>> G = nx.parse_edgelist(lines, nodetype = int, data=(('weight',float),))
+ >>> G.nodes()
+ [1, 2, 3, 4]
+ >>> G.edges(data = True)
+ [(1, 2, {'weight': 3.0}), (2, 3, {'weight': 27.0}), (3, 4, {'weight': 3.0})]
+
+ See Also
+ --------
+ read_weighted_edgelist
+
+ """
+ from ast import literal_eval
+ if create_using is None:
+ G=nx.Graph()
+ else:
+ try:
+ G=create_using
+ G.clear()
+ except:
+ raise TypeError("create_using input is not a NetworkX graph type")
+
+ for line in lines:
+ p=line.find(comments)
+ if p>=0:
+ line = line[:p]
+ if not len(line):
+ continue
+ # split line, should have 2 or more
+ s=line.strip().split(delimiter)
+ if len(s)<2:
+ continue
+ u=s.pop(0)
+ v=s.pop(0)
+ d=s
+ if nodetype is not None:
+ try:
+ u=nodetype(u)
+ v=nodetype(v)
+ except:
+ raise TypeError("Failed to convert nodes %s,%s to type %s."
+ %(u,v,nodetype))
+
+ if len(d)==0 or data is False:
+ # no data or data type specified
+ edgedata={}
+ elif data is True:
+ # no edge types specified
+ try: # try to evaluate as dictionary
+ edgedata=dict(literal_eval(' '.join(d)))
+ except:
+ raise TypeError(
+ "Failed to convert edge data (%s) to dictionary."%(d))
+ else:
+ # convert edge data to dictionary with specified keys and type
+ if len(d)!=len(data):
+ raise IndexError(
+ "Edge data %s and data_keys %s are not the same length"%
+ (d, data))
+ edgedata={}
+ for (edge_key,edge_type),edge_value in zip(data,d):
+ try:
+ edge_value=edge_type(edge_value)
+ except:
+ raise TypeError(
+ "Failed to convert %s data %s to type %s."
+ %(edge_key, edge_value, edge_type))
+ edgedata.update({edge_key:edge_value})
+ G.add_edge(u, v, attr_dict=edgedata)
+ return G
+
+@open_file(0,mode='rb')
+def read_edgelist(path, comments="#", delimiter=None, create_using=None,
+ nodetype=None, data=True, edgetype=None, encoding='utf-8'):
+ """Read a graph from a list of edges.
+
+ Parameters
+ ----------
+ path : file or string
+ File or filename to write. If a file is provided, it must be
+ opened in 'rb' mode.
+ Filenames ending in .gz or .bz2 will be uncompressed.
+ comments : string, optional
+ The character used to indicate the start of a comment.
+ delimiter : string, optional
+ The string used to separate values. The default is whitespace.
+ create_using : Graph container, optional,
+ Use specified container to build graph. The default is networkx.Graph,
+ an undirected graph.
+ nodetype : int, float, str, Python type, optional
+ Convert node data from strings to specified type
+ data : bool or list of (label,type) tuples
+ Tuples specifying dictionary key names and types for edge data
+ edgetype : int, float, str, Python type, optional OBSOLETE
+ Convert edge data from strings to specified type and use as 'weight'
+ encoding: string, optional
+ Specify which encoding to use when reading file.
+
+ Returns
+ -------
+ G : graph
+ A networkx Graph or other type specified with create_using
+
+ Examples
+ --------
+ >>> nx.write_edgelist(nx.path_graph(4), "test.edgelist")
+ >>> G=nx.read_edgelist("test.edgelist")
+
+ >>> fh=open("test.edgelist", 'rb')
+ >>> G=nx.read_edgelist(fh)
+ >>> fh.close()
+
+ >>> G=nx.read_edgelist("test.edgelist", nodetype=int)
+ >>> G=nx.read_edgelist("test.edgelist",create_using=nx.DiGraph())
+
+ Edgelist with data in a list:
+
+ >>> textline = '1 2 3'
+ >>> fh = open('test.edgelist','w')
+ >>> d = fh.write(textline)
+ >>> fh.close()
+ >>> G = nx.read_edgelist('test.edgelist', nodetype=int, data=(('weight',float),))
+ >>> G.nodes()
+ [1, 2]
+ >>> G.edges(data = True)
+ [(1, 2, {'weight': 3.0})]
+
+ See parse_edgelist() for more examples of formatting.
+
+ See Also
+ --------
+ parse_edgelist
+
+ Notes
+ -----
+ Since nodes must be hashable, the function nodetype must return hashable
+ types (e.g. int, float, str, frozenset - or tuples of those, etc.)
+ """
+ lines = (line.decode(encoding) for line in path)
+ return parse_edgelist(lines,comments=comments, delimiter=delimiter,
+ create_using=create_using, nodetype=nodetype,
+ data=data)
+
+
+def write_weighted_edgelist(G, path, comments="#",
+ delimiter=' ', encoding='utf-8'):
+ """Write graph G as a list of edges with numeric weights.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+ path : file or string
+ File or filename to write. If a file is provided, it must be
+ opened in 'wb' mode.
+ Filenames ending in .gz or .bz2 will be compressed.
+ comments : string, optional
+ The character used to indicate the start of a comment
+ delimiter : string, optional
+ The string used to separate values. The default is whitespace.
+ encoding: string, optional
+ Specify which encoding to use when writing file.
+
+ Examples
+ --------
+ >>> G=nx.Graph()
+ >>> G.add_edge(1,2,weight=7)
+ >>> nx.write_weighted_edgelist(G, 'test.weighted.edgelist')
+
+ See Also
+ --------
+ read_edgelist()
+ write_edgelist()
+ write_weighted_edgelist()
+
+ """
+ write_edgelist(G,path, comments=comments, delimiter=delimiter,
+ data=('weight',), encoding = encoding)
+
+def read_weighted_edgelist(path, comments="#", delimiter=None,
+ create_using=None, nodetype=None, encoding='utf-8'):
+
+ """Read a graph as list of edges with numeric weights.
+
+ Parameters
+ ----------
+ path : file or string
+ File or filename to write. If a file is provided, it must be
+ opened in 'rb' mode.
+ Filenames ending in .gz or .bz2 will be uncompressed.
+ comments : string, optional
+ The character used to indicate the start of a comment.
+ delimiter : string, optional
+ The string used to separate values. The default is whitespace.
+ create_using : Graph container, optional,
+ Use specified container to build graph. The default is networkx.Graph,
+ an undirected graph.
+ nodetype : int, float, str, Python type, optional
+ Convert node data from strings to specified type
+ encoding: string, optional
+ Specify which encoding to use when reading file.
+
+ Returns
+ -------
+ G : graph
+ A networkx Graph or other type specified with create_using
+
+ Notes
+ -----
+ Since nodes must be hashable, the function nodetype must return hashable
+ types (e.g. int, float, str, frozenset - or tuples of those, etc.)
+
+ Example edgelist file format.
+
+ With numeric edge data::
+
+ # read with
+ # >>> G=nx.read_weighted_edgelist(fh)
+ # source target data
+ a b 1
+ a c 3.14159
+ d e 42
+ """
+ return read_edgelist(path,
+ comments=comments,
+ delimiter=delimiter,
+ create_using=create_using,
+ nodetype=nodetype,
+ data=(('weight',float),),
+ encoding = encoding
+ )
+
+
+# fixture for nose tests
+def teardown_module(module):
+ import os
+ os.unlink('test.edgelist')
+ os.unlink('test.edgelist.gz')
+ os.unlink('test.weighted.edgelist')
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/gexf.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/gexf.py
new file mode 100644
index 0000000..88503ac
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/gexf.py
@@ -0,0 +1,926 @@
+"""
+****
+GEXF
+****
+Read and write graphs in GEXF format.
+
+GEXF (Graph Exchange XML Format) is a language for describing complex
+network structures, their associated data and dynamics.
+
+This implementation does not support mixed graphs (directed and
+undirected edges together).
+
+Format
+------
+GEXF is an XML format. See http://gexf.net/format/schema.html for the
+specification and http://gexf.net/format/basic.html for examples.
+"""
+# Copyright (C) 2013 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+# Based on GraphML NetworkX GraphML reader
+__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>'])
+__all__ = ['write_gexf', 'read_gexf', 'relabel_gexf_graph', 'generate_gexf']
+import itertools
+import networkx as nx
+from networkx.utils import open_file, make_str
+try:
+ from xml.etree.cElementTree import Element, ElementTree, tostring
+except ImportError:
+ try:
+ from xml.etree.ElementTree import Element, ElementTree, tostring
+ except ImportError:
+ pass
+
+@open_file(1,mode='wb')
+def write_gexf(G, path, encoding='utf-8',prettyprint=True,version='1.1draft'):
+ """Write G in GEXF format to path.
+
+ "GEXF (Graph Exchange XML Format) is a language for describing
+ complex networks structures, their associated data and dynamics" [1]_.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+ path : file or string
+ File or file name to write.
+ File names ending in .gz or .bz2 will be compressed.
+ encoding : string (optional)
+ Encoding for text data.
+ prettyprint : bool (optional)
+ If True use line breaks and indenting in output XML.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> nx.write_gexf(G, "test.gexf")
+
+ Notes
+ -----
+ This implementation does not support mixed graphs (directed and undirected
+ edges together).
+
+ The node id attribute is set to be the string of the node label.
+ If you want to specify an id use set it as node data, e.g.
+ node['a']['id']=1 to set the id of node 'a' to 1.
+
+ References
+ ----------
+ .. [1] GEXF graph format, http://gexf.net/format/
+ """
+ writer = GEXFWriter(encoding=encoding,prettyprint=prettyprint,
+ version=version)
+ writer.add_graph(G)
+ writer.write(path)
+
+def generate_gexf(G, encoding='utf-8',prettyprint=True,version='1.1draft'):
+ """Generate lines of GEXF format representation of G"
+
+ "GEXF (Graph Exchange XML Format) is a language for describing
+ complex networks structures, their associated data and dynamics" [1]_.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+ encoding : string (optional)
+ Encoding for text data.
+ prettyprint : bool (optional)
+ If True use line breaks and indenting in output XML.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> linefeed=chr(10) # linefeed=\n
+ >>> s=linefeed.join(nx.generate_gexf(G)) # doctest: +SKIP
+ >>> for line in nx.generate_gexf(G): # doctest: +SKIP
+ ... print line
+
+ Notes
+ -----
+ This implementation does not support mixed graphs (directed and undirected
+ edges together).
+
+ The node id attribute is set to be the string of the node label.
+ If you want to specify an id use set it as node data, e.g.
+ node['a']['id']=1 to set the id of node 'a' to 1.
+
+ References
+ ----------
+ .. [1] GEXF graph format, http://gexf.net/format/
+ """
+ writer = GEXFWriter(encoding=encoding,prettyprint=prettyprint,
+ version=version)
+ writer.add_graph(G)
+ for line in str(writer).splitlines():
+ yield line
+
+@open_file(0,mode='rb')
+def read_gexf(path,node_type=None,relabel=False,version='1.1draft'):
+ """Read graph in GEXF format from path.
+
+ "GEXF (Graph Exchange XML Format) is a language for describing
+ complex networks structures, their associated data and dynamics" [1]_.
+
+ Parameters
+ ----------
+ path : file or string
+ File or file name to write.
+ File names ending in .gz or .bz2 will be compressed.
+
+ node_type: Python type (default: None)
+ Convert node ids to this type if not None.
+
+ relabel : bool (default: False)
+ If True relabel the nodes to use the GEXF node "label" attribute
+ instead of the node "id" attribute as the NetworkX node label.
+
+ Returns
+ -------
+ graph: NetworkX graph
+ If no parallel edges are found a Graph or DiGraph is returned.
+ Otherwise a MultiGraph or MultiDiGraph is returned.
+
+ Notes
+ -----
+ This implementation does not support mixed graphs (directed and undirected
+ edges together).
+
+ References
+ ----------
+ .. [1] GEXF graph format, http://gexf.net/format/
+ """
+ reader = GEXFReader(node_type=node_type,version=version)
+ if relabel:
+ G=relabel_gexf_graph(reader(path))
+ else:
+ G=reader(path)
+ return G
+
+class GEXF(object):
+# global register_namespace
+
+ versions={}
+ d={'NS_GEXF':"http://www.gexf.net/1.1draft",
+ 'NS_VIZ':"http://www.gexf.net/1.1draft/viz",
+ 'NS_XSI':"http://www.w3.org/2001/XMLSchema-instance",
+ 'SCHEMALOCATION':' '.join(['http://www.gexf.net/1.1draft',
+ 'http://www.gexf.net/1.1draft/gexf.xsd'
+ ]),
+ 'VERSION':'1.1'
+ }
+ versions['1.1draft']=d
+ d={'NS_GEXF':"http://www.gexf.net/1.2draft",
+ 'NS_VIZ':"http://www.gexf.net/1.2draft/viz",
+ 'NS_XSI':"http://www.w3.org/2001/XMLSchema-instance",
+ 'SCHEMALOCATION':' '.join(['http://www.gexf.net/1.2draft',
+ 'http://www.gexf.net/1.2draft/gexf.xsd'
+ ]),
+ 'VERSION':'1.2'
+ }
+ versions['1.2draft']=d
+
+
+ types=[(int,"integer"),
+ (float,"float"),
+ (float,"double"),
+ (bool,"boolean"),
+ (list,"string"),
+ (dict,"string"),
+ ]
+
+ try: # Python 3.x
+ blurb = chr(1245) # just to trigger the exception
+ types.extend([
+ (str,"liststring"),
+ (str,"anyURI"),
+ (str,"string")])
+ except ValueError: # Python 2.6+
+ types.extend([
+ (str,"liststring"),
+ (str,"anyURI"),
+ (str,"string"),
+ (unicode,"liststring"),
+ (unicode,"anyURI"),
+ (unicode,"string")])
+
+ xml_type = dict(types)
+ python_type = dict(reversed(a) for a in types)
+ convert_bool={'true':True,'false':False}
+
+# try:
+# register_namespace = ET.register_namespace
+# except AttributeError:
+# def register_namespace(prefix, uri):
+# ET._namespace_map[uri] = prefix
+
+
+ def set_version(self,version):
+ d=self.versions.get(version)
+ if d is None:
+ raise nx.NetworkXError('Unknown GEXF version %s'%version)
+ self.NS_GEXF = d['NS_GEXF']
+ self.NS_VIZ = d['NS_VIZ']
+ self.NS_XSI = d['NS_XSI']
+ self.SCHEMALOCATION = d['NS_XSI']
+ self.VERSION=d['VERSION']
+ self.version=version
+
+# register_namespace('viz', d['NS_VIZ'])
+
+
+class GEXFWriter(GEXF):
+ # class for writing GEXF format files
+ # use write_gexf() function
+ def __init__(self, graph=None, encoding="utf-8",
+ mode='static',prettyprint=True,
+ version='1.1draft'):
+ try:
+ import xml.etree.ElementTree
+ except ImportError:
+ raise ImportError('GEXF writer requires '
+ 'xml.elementtree.ElementTree')
+ self.prettyprint=prettyprint
+ self.mode=mode
+ self.encoding = encoding
+ self.set_version(version)
+ self.xml = Element("gexf",
+ {'xmlns':self.NS_GEXF,
+ 'xmlns:xsi':self.NS_XSI,
+ 'xmlns:viz':self.NS_VIZ,
+ 'xsi:schemaLocation':self.SCHEMALOCATION,
+ 'version':self.VERSION})
+
+ # counters for edge and attribute identifiers
+ self.edge_id=itertools.count()
+ self.attr_id=itertools.count()
+ # default attributes are stored in dictionaries
+ self.attr={}
+ self.attr['node']={}
+ self.attr['edge']={}
+ self.attr['node']['dynamic']={}
+ self.attr['node']['static']={}
+ self.attr['edge']['dynamic']={}
+ self.attr['edge']['static']={}
+
+ if graph is not None:
+ self.add_graph(graph)
+
+ def __str__(self):
+ if self.prettyprint:
+ self.indent(self.xml)
+ s=tostring(self.xml).decode(self.encoding)
+ return s
+
+ def add_graph(self, G):
+ # Add a graph element to the XML
+ if G.is_directed():
+ default='directed'
+ else:
+ default='undirected'
+ graph_element = Element("graph",defaultedgetype=default,mode=self.mode)
+ self.graph_element=graph_element
+ self.add_nodes(G,graph_element)
+ self.add_edges(G,graph_element)
+ self.xml.append(graph_element)
+
+
+ def add_nodes(self, G, graph_element):
+ nodes_element = Element('nodes')
+ for node,data in G.nodes_iter(data=True):
+ node_data=data.copy()
+ node_id = make_str(node_data.pop('id', node))
+ kw={'id':node_id}
+ label = make_str(node_data.pop('label', node))
+ kw['label']=label
+ try:
+ pid=node_data.pop('pid')
+ kw['pid'] = make_str(pid)
+ except KeyError:
+ pass
+
+ # add node element with attributes
+ node_element = Element("node", **kw)
+
+ # add node element and attr subelements
+ default=G.graph.get('node_default',{})
+ node_data=self.add_parents(node_element, node_data)
+ if self.version=='1.1':
+ node_data=self.add_slices(node_element, node_data)
+ else:
+ node_data=self.add_spells(node_element, node_data)
+ node_data=self.add_viz(node_element,node_data)
+ node_data=self.add_attributes("node", node_element,
+ node_data, default)
+ nodes_element.append(node_element)
+ graph_element.append(nodes_element)
+
+
+ def add_edges(self, G, graph_element):
+ def edge_key_data(G):
+ # helper function to unify multigraph and graph edge iterator
+ if G.is_multigraph():
+ for u,v,key,data in G.edges_iter(data=True,keys=True):
+ edge_data=data.copy()
+ edge_data.update(key=key)
+ edge_id=edge_data.pop('id',None)
+ if edge_id is None:
+ edge_id=next(self.edge_id)
+ yield u,v,edge_id,edge_data
+ else:
+ for u,v,data in G.edges_iter(data=True):
+ edge_data=data.copy()
+ edge_id=edge_data.pop('id',None)
+ if edge_id is None:
+ edge_id=next(self.edge_id)
+ yield u,v,edge_id,edge_data
+
+ edges_element = Element('edges')
+ for u,v,key,edge_data in edge_key_data(G):
+ kw={'id':make_str(key)}
+ try:
+ edge_weight=edge_data.pop('weight')
+ kw['weight']=make_str(edge_weight)
+ except KeyError:
+ pass
+ try:
+ edge_type=edge_data.pop('type')
+ kw['type']=make_str(edge_type)
+ except KeyError:
+ pass
+ source_id = make_str(G.node[u].get('id', u))
+ target_id = make_str(G.node[v].get('id', v))
+ edge_element = Element("edge",
+ source=source_id,target=target_id,
+ **kw)
+ default=G.graph.get('edge_default',{})
+ edge_data=self.add_viz(edge_element,edge_data)
+ edge_data=self.add_attributes("edge", edge_element,
+ edge_data, default)
+ edges_element.append(edge_element)
+ graph_element.append(edges_element)
+
+
+ def add_attributes(self, node_or_edge, xml_obj, data, default):
+ # Add attrvalues to node or edge
+ attvalues=Element('attvalues')
+ if len(data)==0:
+ return data
+ if 'start' in data or 'end' in data:
+ mode='dynamic'
+ else:
+ mode='static'
+ for k,v in data.items():
+ # rename generic multigraph key to avoid any name conflict
+ if k == 'key':
+ k='networkx_key'
+ attr_id = self.get_attr_id(make_str(k), self.xml_type[type(v)],
+ node_or_edge, default, mode)
+ if type(v)==list:
+ # dynamic data
+ for val,start,end in v:
+ e=Element("attvalue")
+ e.attrib['for']=attr_id
+ e.attrib['value']=make_str(val)
+ if start is not None:
+ e.attrib['start']=make_str(start)
+ if end is not None:
+ e.attrib['end']=make_str(end)
+ attvalues.append(e)
+ else:
+ # static data
+ e=Element("attvalue")
+ e.attrib['for']=attr_id
+ e.attrib['value']=make_str(v)
+ attvalues.append(e)
+ xml_obj.append(attvalues)
+ return data
+
+ def get_attr_id(self, title, attr_type, edge_or_node, default, mode):
+ # find the id of the attribute or generate a new id
+ try:
+ return self.attr[edge_or_node][mode][title]
+ except KeyError:
+ # generate new id
+ new_id=str(next(self.attr_id))
+ self.attr[edge_or_node][mode][title] = new_id
+ attr_kwargs = {"id":new_id, "title":title, "type":attr_type}
+ attribute=Element("attribute",**attr_kwargs)
+ # add subelement for data default value if present
+ default_title=default.get(title)
+ if default_title is not None:
+ default_element=Element("default")
+ default_element.text=make_str(default_title)
+ attribute.append(default_element)
+ # new insert it into the XML
+ attributes_element=None
+ for a in self.graph_element.findall("attributes"):
+ # find existing attributes element by class and mode
+ a_class=a.get('class')
+ a_mode=a.get('mode','static') # default mode is static
+ if a_class==edge_or_node and a_mode==mode:
+ attributes_element=a
+ if attributes_element is None:
+ # create new attributes element
+ attr_kwargs = {"mode":mode,"class":edge_or_node}
+ attributes_element=Element('attributes', **attr_kwargs)
+ self.graph_element.insert(0,attributes_element)
+ attributes_element.append(attribute)
+ return new_id
+
+
+ def add_viz(self,element,node_data):
+ viz=node_data.pop('viz',False)
+ if viz:
+ color=viz.get('color')
+ if color is not None:
+ if self.VERSION=='1.1':
+ e=Element("{%s}color"%self.NS_VIZ,
+ r=str(color.get('r')),
+ g=str(color.get('g')),
+ b=str(color.get('b')),
+ )
+ else:
+ e=Element("{%s}color"%self.NS_VIZ,
+ r=str(color.get('r')),
+ g=str(color.get('g')),
+ b=str(color.get('b')),
+ a=str(color.get('a')),
+ )
+ element.append(e)
+
+ size=viz.get('size')
+ if size is not None:
+ e=Element("{%s}size"%self.NS_VIZ,value=str(size))
+ element.append(e)
+
+ thickness=viz.get('thickness')
+ if thickness is not None:
+ e=Element("{%s}thickness"%self.NS_VIZ,value=str(thickness))
+ element.append(e)
+
+ shape=viz.get('shape')
+ if shape is not None:
+ if shape.startswith('http'):
+ e=Element("{%s}shape"%self.NS_VIZ,
+ value='image',uri=str(shape))
+ else:
+ e=Element("{%s}shape"%self.NS_VIZ,value=str(shape))
+ element.append(e)
+
+ position=viz.get('position')
+ if position is not None:
+ e=Element("{%s}position"%self.NS_VIZ,
+ x=str(position.get('x')),
+ y=str(position.get('y')),
+ z=str(position.get('z')),
+ )
+ element.append(e)
+ return node_data
+
+ def add_parents(self,node_element,node_data):
+ parents=node_data.pop('parents',False)
+ if parents:
+ parents_element=Element('parents')
+ for p in parents:
+ e=Element('parent')
+ e.attrib['for']=str(p)
+ parents_element.append(e)
+ node_element.append(parents_element)
+ return node_data
+
+ def add_slices(self,node_element,node_data):
+ slices=node_data.pop('slices',False)
+ if slices:
+ slices_element=Element('slices')
+ for start,end in slices:
+ e=Element('slice',start=str(start),end=str(end))
+ slices_element.append(e)
+ node_element.append(slices_element)
+ return node_data
+
+
+ def add_spells(self,node_element,node_data):
+ spells=node_data.pop('spells',False)
+ if spells:
+ spells_element=Element('spells')
+ for start,end in spells:
+ e=Element('spell')
+ if start is not None:
+ e.attrib['start']=make_str(start)
+ if end is not None:
+ e.attrib['end']=make_str(end)
+ spells_element.append(e)
+ node_element.append(spells_element)
+ return node_data
+
+
+ def write(self, fh):
+ # Serialize graph G in GEXF to the open fh
+ if self.prettyprint:
+ self.indent(self.xml)
+ document = ElementTree(self.xml)
+ header='<?xml version="1.0" encoding="%s"?>'%self.encoding
+ fh.write(header.encode(self.encoding))
+ document.write(fh, encoding=self.encoding)
+
+
+ def indent(self, elem, level=0):
+ # in-place prettyprint formatter
+ i = "\n" + level*" "
+ if len(elem):
+ if not elem.text or not elem.text.strip():
+ elem.text = i + " "
+ if not elem.tail or not elem.tail.strip():
+ elem.tail = i
+ for elem in elem:
+ self.indent(elem, level+1)
+ if not elem.tail or not elem.tail.strip():
+ elem.tail = i
+ else:
+ if level and (not elem.tail or not elem.tail.strip()):
+ elem.tail = i
+
+
+class GEXFReader(GEXF):
+ # Class to read GEXF format files
+ # use read_gexf() function
+ def __init__(self, node_type=None,version='1.1draft'):
+ try:
+ import xml.etree.ElementTree
+ except ImportError:
+ raise ImportError('GEXF reader requires '
+ 'xml.elementtree.ElementTree')
+ self.node_type=node_type
+ # assume simple graph and test for multigraph on read
+ self.simple_graph=True
+ self.set_version(version)
+
+ def __call__(self, stream):
+ self.xml = ElementTree(file=stream)
+ g=self.xml.find("{%s}graph" % self.NS_GEXF)
+ if g is not None:
+ return self.make_graph(g)
+ # try all the versions
+ for version in self.versions:
+ self.set_version(version)
+ g=self.xml.find("{%s}graph" % self.NS_GEXF)
+ if g is not None:
+ return self.make_graph(g)
+ raise nx.NetworkXError("No <graph> element in GEXF file")
+
+
+ def make_graph(self, graph_xml):
+ # mode is "static" or "dynamic"
+ graph_mode = graph_xml.get("mode", "")
+ self.dynamic=(graph_mode=='dynamic')
+
+ # start with empty DiGraph or MultiDiGraph
+ edgedefault = graph_xml.get("defaultedgetype", None)
+ if edgedefault=='directed':
+ G=nx.MultiDiGraph()
+ else:
+ G=nx.MultiGraph()
+
+ # graph attributes
+ graph_start=graph_xml.get('start')
+ if graph_start is not None:
+ G.graph['start']=graph_start
+ graph_end=graph_xml.get('end')
+ if graph_end is not None:
+ G.graph['end']=graph_end
+
+ # node and edge attributes
+ attributes_elements=graph_xml.findall("{%s}attributes"%self.NS_GEXF)
+ # dictionaries to hold attributes and attribute defaults
+ node_attr={}
+ node_default={}
+ edge_attr={}
+ edge_default={}
+ for a in attributes_elements:
+ attr_class = a.get("class")
+ if attr_class=='node':
+ na,nd = self.find_gexf_attributes(a)
+ node_attr.update(na)
+ node_default.update(nd)
+ G.graph['node_default']=node_default
+ elif attr_class=='edge':
+ ea,ed = self.find_gexf_attributes(a)
+ edge_attr.update(ea)
+ edge_default.update(ed)
+ G.graph['edge_default']=edge_default
+ else:
+ raise # unknown attribute class
+
+ # Hack to handle Gephi0.7beta bug
+ # add weight attribute
+ ea={'weight':{'type': 'double', 'mode': 'static', 'title': 'weight'}}
+ ed={}
+ edge_attr.update(ea)
+ edge_default.update(ed)
+ G.graph['edge_default']=edge_default
+
+ # add nodes
+ nodes_element=graph_xml.find("{%s}nodes" % self.NS_GEXF)
+ if nodes_element is not None:
+ for node_xml in nodes_element.findall("{%s}node" % self.NS_GEXF):
+ self.add_node(G, node_xml, node_attr)
+
+ # add edges
+ edges_element=graph_xml.find("{%s}edges" % self.NS_GEXF)
+ if edges_element is not None:
+ for edge_xml in edges_element.findall("{%s}edge" % self.NS_GEXF):
+ self.add_edge(G, edge_xml, edge_attr)
+
+ # switch to Graph or DiGraph if no parallel edges were found.
+ if self.simple_graph:
+ if G.is_directed():
+ G=nx.DiGraph(G)
+ else:
+ G=nx.Graph(G)
+ return G
+
+ def add_node(self, G, node_xml, node_attr, node_pid=None):
+ # add a single node with attributes to the graph
+
+ # get attributes and subattributues for node
+ data = self.decode_attr_elements(node_attr, node_xml)
+ data = self.add_parents(data, node_xml) # add any parents
+ if self.version=='1.1':
+ data = self.add_slices(data, node_xml) # add slices
+ else:
+ data = self.add_spells(data, node_xml) # add spells
+ data = self.add_viz(data, node_xml) # add viz
+ data = self.add_start_end(data, node_xml) # add start/end
+
+ # find the node id and cast it to the appropriate type
+ node_id = node_xml.get("id")
+ if self.node_type is not None:
+ node_id=self.node_type(node_id)
+
+ # every node should have a label
+ node_label = node_xml.get("label")
+ data['label']=node_label
+
+ # parent node id
+ node_pid = node_xml.get("pid", node_pid)
+ if node_pid is not None:
+ data['pid']=node_pid
+
+ # check for subnodes, recursive
+ subnodes=node_xml.find("{%s}nodes" % self.NS_GEXF)
+ if subnodes is not None:
+ for node_xml in subnodes.findall("{%s}node" % self.NS_GEXF):
+ self.add_node(G, node_xml, node_attr, node_pid=node_id)
+
+ G.add_node(node_id, data)
+
+ def add_start_end(self, data, xml):
+ # start and end times
+ node_start = xml.get("start")
+ if node_start is not None:
+ data['start']=node_start
+ node_end = xml.get("end")
+ if node_end is not None:
+ data['end']=node_end
+ return data
+
+
+ def add_viz(self, data, node_xml):
+ # add viz element for node
+ viz={}
+ color=node_xml.find("{%s}color"%self.NS_VIZ)
+ if color is not None:
+ if self.VERSION=='1.1':
+ viz['color']={'r':int(color.get('r')),
+ 'g':int(color.get('g')),
+ 'b':int(color.get('b'))}
+ else:
+ viz['color']={'r':int(color.get('r')),
+ 'g':int(color.get('g')),
+ 'b':int(color.get('b')),
+ 'a':float(color.get('a', 1)),
+ }
+
+ size=node_xml.find("{%s}size"%self.NS_VIZ)
+ if size is not None:
+ viz['size']=float(size.get('value'))
+
+ thickness=node_xml.find("{%s}thickness"%self.NS_VIZ)
+ if thickness is not None:
+ viz['thickness']=float(thickness.get('value'))
+
+ shape=node_xml.find("{%s}shape"%self.NS_VIZ)
+ if shape is not None:
+ viz['shape']=shape.get('shape')
+ if viz['shape']=='image':
+ viz['shape']=shape.get('uri')
+
+ position=node_xml.find("{%s}position"%self.NS_VIZ)
+ if position is not None:
+ viz['position']={'x':float(position.get('x',0)),
+ 'y':float(position.get('y',0)),
+ 'z':float(position.get('z',0))}
+
+ if len(viz)>0:
+ data['viz']=viz
+ return data
+
+ def add_parents(self, data, node_xml):
+ parents_element=node_xml.find("{%s}parents"%self.NS_GEXF)
+ if parents_element is not None:
+ data['parents']=[]
+ for p in parents_element.findall("{%s}parent"%self.NS_GEXF):
+ parent=p.get('for')
+ data['parents'].append(parent)
+ return data
+
+ def add_slices(self, data, node_xml):
+ slices_element=node_xml.find("{%s}slices"%self.NS_GEXF)
+ if slices_element is not None:
+ data['slices']=[]
+ for s in slices_element.findall("{%s}slice"%self.NS_GEXF):
+ start=s.get('start')
+ end=s.get('end')
+ data['slices'].append((start,end))
+ return data
+
+ def add_spells(self, data, node_xml):
+ spells_element=node_xml.find("{%s}spells"%self.NS_GEXF)
+ if spells_element is not None:
+ data['spells']=[]
+ for s in spells_element.findall("{%s}spell"%self.NS_GEXF):
+ start=s.get('start')
+ end=s.get('end')
+ data['spells'].append((start,end))
+ return data
+
+
+ def add_edge(self, G, edge_element, edge_attr):
+ # add an edge to the graph
+
+ # raise error if we find mixed directed and undirected edges
+ edge_direction = edge_element.get("type")
+ if G.is_directed() and edge_direction=='undirected':
+ raise nx.NetworkXError(\
+ "Undirected edge found in directed graph.")
+ if (not G.is_directed()) and edge_direction=='directed':
+ raise nx.NetworkXError(\
+ "Directed edge found in undirected graph.")
+
+ # Get source and target and recast type if required
+ source = edge_element.get("source")
+ target = edge_element.get("target")
+ if self.node_type is not None:
+ source=self.node_type(source)
+ target=self.node_type(target)
+
+ data = self.decode_attr_elements(edge_attr, edge_element)
+ data = self.add_start_end(data,edge_element)
+
+ # GEXF stores edge ids as an attribute
+ # NetworkX uses them as keys in multigraphs
+ # if networkx_key is not specified as an attribute
+ edge_id = edge_element.get("id")
+ if edge_id is not None:
+ data["id"] = edge_id
+
+ # check if there is a 'multigraph_key' and use that as edge_id
+ multigraph_key = data.pop('networkx_key',None)
+ if multigraph_key is not None:
+ edge_id=multigraph_key
+
+ weight = edge_element.get('weight')
+ if weight is not None:
+ data['weight']=float(weight)
+
+ edge_label = edge_element.get("label")
+ if edge_label is not None:
+ data['label']=edge_label
+
+
+
+ if G.has_edge(source,target):
+ # seen this edge before - this is a multigraph
+ self.simple_graph=False
+ G.add_edge(source, target, key=edge_id, **data)
+ if edge_direction=='mutual':
+ G.add_edge(target, source, key=edge_id, **data)
+
+ def decode_attr_elements(self, gexf_keys, obj_xml):
+ # Use the key information to decode the attr XML
+ attr = {}
+ # look for outer "<attvalues>" element
+ attr_element=obj_xml.find("{%s}attvalues" % self.NS_GEXF)
+ if attr_element is not None:
+ # loop over <attvalue> elements
+ for a in attr_element.findall("{%s}attvalue" % self.NS_GEXF):
+ key = a.get('for') # for is required
+ try: # should be in our gexf_keys dictionary
+ title=gexf_keys[key]['title']
+ except KeyError:
+ raise nx.NetworkXError("No attribute defined for=%s"%key)
+ atype=gexf_keys[key]['type']
+ value=a.get('value')
+ if atype=='boolean':
+ value=self.convert_bool[value]
+ else:
+ value=self.python_type[atype](value)
+ if gexf_keys[key]['mode']=='dynamic':
+ # for dynamic graphs use list of three-tuples
+ # [(value1,start1,end1), (value2,start2,end2), etc]
+ start=a.get('start')
+ end=a.get('end')
+ if title in attr:
+ attr[title].append((value,start,end))
+ else:
+ attr[title]=[(value,start,end)]
+ else:
+ # for static graphs just assign the value
+ attr[title] = value
+ return attr
+
+ def find_gexf_attributes(self, attributes_element):
+ # Extract all the attributes and defaults
+ attrs = {}
+ defaults = {}
+ mode=attributes_element.get('mode')
+ for k in attributes_element.findall("{%s}attribute" % self.NS_GEXF):
+ attr_id = k.get("id")
+ title=k.get('title')
+ atype=k.get('type')
+ attrs[attr_id]={'title':title,'type':atype,'mode':mode}
+ # check for the "default" subelement of key element and add
+ default=k.find("{%s}default" % self.NS_GEXF)
+ if default is not None:
+ if atype=='boolean':
+ value=self.convert_bool[default.text]
+ else:
+ value=self.python_type[atype](default.text)
+ defaults[title]=value
+ return attrs,defaults
+
+
+def relabel_gexf_graph(G):
+ """Relabel graph using "label" node keyword for node label.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph read from GEXF data
+
+ Returns
+ -------
+ H : graph
+ A NetworkX graph with relabed nodes
+
+ Notes
+ -----
+ This function relabels the nodes in a NetworkX graph with the
+ "label" attribute. It also handles relabeling the specific GEXF
+ node attributes "parents", and "pid".
+ """
+ # build mapping of node labels, do some error checking
+ try:
+ mapping=[(u,G.node[u]['label']) for u in G]
+ except KeyError:
+ raise nx.NetworkXError('Failed to relabel nodes: '
+ 'missing node labels found. '
+ 'Use relabel=False.')
+ x,y=zip(*mapping)
+ if len(set(y))!=len(G):
+ raise nx.NetworkXError('Failed to relabel nodes: '
+ 'duplicate node labels found. '
+ 'Use relabel=False.')
+ mapping=dict(mapping)
+ H=nx.relabel_nodes(G,mapping)
+ # relabel attributes
+ for n in G:
+ m=mapping[n]
+ H.node[m]['id']=n
+ H.node[m].pop('label')
+ if 'pid' in H.node[m]:
+ H.node[m]['pid']=mapping[G.node[n]['pid']]
+ if 'parents' in H.node[m]:
+ H.node[m]['parents']=[mapping[p] for p in G.node[n]['parents']]
+ return H
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import xml.etree.cElementTree
+ except:
+ raise SkipTest("xml.etree.cElementTree not available")
+
+# fixture for nose tests
+def teardown_module(module):
+ import os
+ try:
+ os.unlink('test.gexf')
+ except:
+ pass
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/gml.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/gml.py
new file mode 100644
index 0000000..d248eb4
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/gml.py
@@ -0,0 +1,410 @@
+"""
+Read graphs in GML format.
+
+"GML, the G>raph Modelling Language, is our proposal for a portable
+file format for graphs. GML's key features are portability, simple
+syntax, extensibility and flexibility. A GML file consists of a
+hierarchical key-value lists. Graphs can be annotated with arbitrary
+data structures. The idea for a common file format was born at the
+GD'95; this proposal is the outcome of many discussions. GML is the
+standard file format in the Graphlet graph editor system. It has been
+overtaken and adapted by several other systems for drawing graphs."
+
+See http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
+
+Requires pyparsing: http://pyparsing.wikispaces.com/
+
+Format
+------
+See http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
+for format specification.
+
+Example graphs in GML format:
+http://www-personal.umich.edu/~mejn/netdata/
+"""
+__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
+# Copyright (C) 2008-2010 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+__all__ = ['read_gml', 'parse_gml', 'generate_gml', 'write_gml']
+
+import networkx as nx
+from networkx.exception import NetworkXError
+from networkx.utils import is_string_like, open_file
+
+
+@open_file(0,mode='rb')
+def read_gml(path,encoding='UTF-8',relabel=False):
+ """Read graph in GML format from path.
+
+ Parameters
+ ----------
+ path : filename or filehandle
+ The filename or filehandle to read from.
+
+ encoding : string, optional
+ Text encoding.
+
+ relabel : bool, optional
+ If True use the GML node label attribute for node names otherwise use
+ the node id.
+
+ Returns
+ -------
+ G : MultiGraph or MultiDiGraph
+
+ Raises
+ ------
+ ImportError
+ If the pyparsing module is not available.
+
+ See Also
+ --------
+ write_gml, parse_gml
+
+ Notes
+ -----
+ Requires pyparsing: http://pyparsing.wikispaces.com/
+
+ References
+ ----------
+ GML specification:
+ http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> nx.write_gml(G,'test.gml')
+ >>> H=nx.read_gml('test.gml')
+ """
+ lines=(line.decode(encoding) for line in path)
+ G=parse_gml(lines,relabel=relabel)
+ return G
+
+def parse_gml(lines, relabel=True):
+ """Parse GML graph from a string or iterable.
+
+ Parameters
+ ----------
+ lines : string or iterable
+ Data in GML format.
+
+ relabel : bool, optional
+ If True use the GML node label attribute for node names otherwise use
+ the node id.
+
+ Returns
+ -------
+ G : MultiGraph or MultiDiGraph
+
+ Raises
+ ------
+ ImportError
+ If the pyparsing module is not available.
+
+ See Also
+ --------
+ write_gml, read_gml
+
+ Notes
+ -----
+ This stores nested GML attributes as dictionaries in the
+ NetworkX graph, node, and edge attribute structures.
+
+ Requires pyparsing: http://pyparsing.wikispaces.com/
+
+ References
+ ----------
+ GML specification:
+ http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
+ """
+ try:
+ from pyparsing import ParseException
+ except ImportError:
+ try:
+ from matplotlib.pyparsing import ParseException
+ except:
+ raise ImportError('Import Error: not able to import pyparsing:',
+ 'http://pyparsing.wikispaces.com/')
+ try:
+ data = "".join(lines)
+ gml = pyparse_gml()
+ tokens =gml.parseString(data)
+ except ParseException as err:
+ print((err.line))
+ print((" "*(err.column-1) + "^"))
+ print(err)
+ raise
+
+ # function to recursively make dicts of key/value pairs
+ def wrap(tok):
+ listtype=type(tok)
+ result={}
+ for k,v in tok:
+ if type(v)==listtype:
+ result[str(k)]=wrap(v)
+ else:
+ result[str(k)]=v
+ return result
+
+ # Set flag
+ multigraph=False
+ # but assume multigraphs to start
+ if tokens.directed==1:
+ G=nx.MultiDiGraph()
+ else:
+ G=nx.MultiGraph()
+
+ for k,v in tokens.asList():
+ if k=="node":
+ vdict=wrap(v)
+ node=vdict['id']
+ G.add_node(node,attr_dict=vdict)
+ elif k=="edge":
+ vdict=wrap(v)
+ source=vdict.pop('source')
+ target=vdict.pop('target')
+ if G.has_edge(source,target):
+ multigraph=True
+ G.add_edge(source,target,attr_dict=vdict)
+ else:
+ G.graph[k]=v
+
+ # switch to Graph or DiGraph if no parallel edges were found.
+ if not multigraph:
+ if G.is_directed():
+ G=nx.DiGraph(G)
+ else:
+ G=nx.Graph(G)
+
+ if relabel:
+ # relabel, but check for duplicate labels first
+ mapping=[(n,d['label']) for n,d in G.node.items()]
+ x,y=zip(*mapping)
+ if len(set(y))!=len(G):
+ raise NetworkXError('Failed to relabel nodes: '
+ 'duplicate node labels found. '
+ 'Use relabel=False.')
+ G=nx.relabel_nodes(G,dict(mapping))
+ return G
+
+
+def pyparse_gml():
+ """A pyparsing tokenizer for GML graph format.
+
+ This is not intended to be called directly.
+
+ See Also
+ --------
+ write_gml, read_gml, parse_gml
+ """
+ try:
+ from pyparsing import \
+ Literal, CaselessLiteral, Word, Forward,\
+ ZeroOrMore, Group, Dict, Optional, Combine,\
+ ParseException, restOfLine, White, alphas, alphanums, nums,\
+ OneOrMore,quotedString,removeQuotes,dblQuotedString, Regex
+ except ImportError:
+ try:
+ from matplotlib.pyparsing import \
+ Literal, CaselessLiteral, Word, Forward,\
+ ZeroOrMore, Group, Dict, Optional, Combine,\
+ ParseException, restOfLine, White, alphas, alphanums, nums,\
+ OneOrMore,quotedString,removeQuotes,dblQuotedString, Regex
+ except:
+ raise ImportError('pyparsing not found',
+ 'http://pyparsing.wikispaces.com/')
+
+ lbrack = Literal("[").suppress()
+ rbrack = Literal("]").suppress()
+ pound = ("#")
+ comment = pound + Optional( restOfLine )
+ integer = Word(nums+'-').setParseAction(lambda s,l,t:[ int(t[0])])
+ real = Regex(r"[+-]?\d+\.\d*([eE][+-]?\d+)?").setParseAction(
+ lambda s,l,t:[ float(t[0]) ])
+ dblQuotedString.setParseAction( removeQuotes )
+ key = Word(alphas,alphanums+'_')
+ value_atom = (real | integer | Word(alphanums) | dblQuotedString)
+ value = Forward() # to be defined later with << operator
+ keyvalue = Group(key+value)
+ value << (value_atom | Group( lbrack + ZeroOrMore(keyvalue) + rbrack ))
+ node = Group(Literal("node") + lbrack + Group(OneOrMore(keyvalue)) + rbrack)
+ edge = Group(Literal("edge") + lbrack + Group(OneOrMore(keyvalue)) + rbrack)
+
+ creator = Group(Literal("Creator")+ Optional( restOfLine ))
+ version = Group(Literal("Version")+ Optional( restOfLine ))
+ graphkey = Literal("graph").suppress()
+
+ graph = Dict (Optional(creator)+Optional(version)+\
+ graphkey + lbrack + ZeroOrMore( (node|edge|keyvalue) ) + rbrack )
+ graph.ignore(comment)
+
+ return graph
+
+def generate_gml(G):
+ """Generate a single entry of the graph G in GML format.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ Returns
+ -------
+ lines: string
+ Lines in GML format.
+
+ Notes
+ -----
+ This implementation does not support all Python data types as GML
+ data. Nodes, node attributes, edge attributes, and graph
+ attributes must be either dictionaries or single stings or
+ numbers. If they are not an attempt is made to represent them as
+ strings. For example, a list as edge data
+ G[1][2]['somedata']=[1,2,3], will be represented in the GML file
+ as::
+
+ edge [
+ source 1
+ target 2
+ somedata "[1, 2, 3]"
+ ]
+ """
+ # recursively make dicts into gml brackets
+ def listify(d,indent,indentlevel):
+ result='[ \n'
+ for k,v in d.items():
+ if type(v)==dict:
+ v=listify(v,indent,indentlevel+1)
+ result += (indentlevel+1)*indent + \
+ string_item(k,v,indentlevel*indent)+'\n'
+ return result+indentlevel*indent+"]"
+
+ def string_item(k,v,indent):
+ # try to make a string of the data
+ if type(v)==dict:
+ v=listify(v,indent,2)
+ elif is_string_like(v):
+ v='"%s"'%v
+ elif type(v)==bool:
+ v=int(v)
+ return "%s %s"%(k,v)
+
+ # check for attributes or assign empty dict
+ if hasattr(G,'graph_attr'):
+ graph_attr=G.graph_attr
+ else:
+ graph_attr={}
+ if hasattr(G,'node_attr'):
+ node_attr=G.node_attr
+ else:
+ node_attr={}
+
+ indent=2*' '
+ count=iter(range(len(G)))
+ node_id={}
+
+ yield "graph ["
+ if G.is_directed():
+ yield indent+"directed 1"
+ # write graph attributes
+ for k,v in G.graph.items():
+ if k == 'directed':
+ continue
+ yield indent+string_item(k,v,indent)
+ # write nodes
+ for n in G:
+ yield indent+"node ["
+ # get id or assign number
+ nid=G.node[n].get('id',next(count))
+ node_id[n]=nid
+ yield 2*indent+"id %s"%nid
+ label=G.node[n].get('label',n)
+ if is_string_like(label):
+ label='"%s"'%label
+ yield 2*indent+'label %s'%label
+ if n in G:
+ for k,v in G.node[n].items():
+ if k=='id' or k == 'label': continue
+ yield 2*indent+string_item(k,v,indent)
+ yield indent+"]"
+ # write edges
+ for u,v,edgedata in G.edges_iter(data=True):
+ yield indent+"edge ["
+ yield 2*indent+"source %s"%node_id[u]
+ yield 2*indent+"target %s"%node_id[v]
+ for k,v in edgedata.items():
+ if k=='source': continue
+ if k=='target': continue
+ yield 2*indent+string_item(k,v,indent)
+ yield indent+"]"
+ yield "]"
+
+@open_file(1,mode='wb')
+def write_gml(G, path):
+ """
+ Write the graph G in GML format to the file or file handle path.
+
+ Parameters
+ ----------
+ path : filename or filehandle
+ The filename or filehandle to write. Filenames ending in
+ .gz or .gz2 will be compressed.
+
+ See Also
+ --------
+ read_gml, parse_gml
+
+ Notes
+ -----
+ GML specifications indicate that the file should only use
+ 7bit ASCII text encoding.iso8859-1 (latin-1).
+
+ This implementation does not support all Python data types as GML
+ data. Nodes, node attributes, edge attributes, and graph
+ attributes must be either dictionaries or single stings or
+ numbers. If they are not an attempt is made to represent them as
+ strings. For example, a list as edge data
+ G[1][2]['somedata']=[1,2,3], will be represented in the GML file
+ as::
+
+ edge [
+ source 1
+ target 2
+ somedata "[1, 2, 3]"
+ ]
+
+
+ Examples
+ ---------
+ >>> G=nx.path_graph(4)
+ >>> nx.write_gml(G,"test.gml")
+
+ Filenames ending in .gz or .bz2 will be compressed.
+
+ >>> nx.write_gml(G,"test.gml.gz")
+ """
+ for line in generate_gml(G):
+ line+='\n'
+ path.write(line.encode('latin-1'))
+
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import pyparsing
+ except:
+ try:
+ import matplotlib.pyparsing
+ except:
+ raise SkipTest("pyparsing not available")
+
+# fixture for nose tests
+def teardown_module(module):
+ import os
+ os.unlink('test.gml')
+ os.unlink('test.gml.gz')
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/gpickle.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/gpickle.py
new file mode 100644
index 0000000..688a35c
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/gpickle.py
@@ -0,0 +1,100 @@
+"""
+**************
+Pickled Graphs
+**************
+Read and write NetworkX graphs as Python pickles.
+
+"The pickle module implements a fundamental, but powerful algorithm
+for serializing and de-serializing a Python object
+structure. "Pickling" is the process whereby a Python object hierarchy
+is converted into a byte stream, and "unpickling" is the inverse
+operation, whereby a byte stream is converted back into an object
+hierarchy."
+
+Note that NetworkX graphs can contain any hashable Python object as
+node (not just integers and strings). For arbitrary data types it may
+be difficult to represent the data as text. In that case using Python
+pickles to store the graph data can be used.
+
+Format
+------
+See http://docs.python.org/library/pickle.html
+"""
+__author__ = """Aric Hagberg (hagberg@lanl.gov)\nDan Schult (dschult@colgate.edu)"""
+# Copyright (C) 2004-2010 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+__all__ = ['read_gpickle', 'write_gpickle']
+
+import networkx as nx
+from networkx.utils import open_file
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+@open_file(1,mode='wb')
+def write_gpickle(G, path):
+ """Write graph in Python pickle format.
+
+ Pickles are a serialized byte stream of a Python object [1]_.
+ This format will preserve Python objects used as nodes or edges.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+ path : file or string
+ File or filename to write.
+ Filenames ending in .gz or .bz2 will be compressed.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> nx.write_gpickle(G,"test.gpickle")
+
+ References
+ ----------
+ .. [1] http://docs.python.org/library/pickle.html
+ """
+ pickle.dump(G, path, pickle.HIGHEST_PROTOCOL)
+
+@open_file(0,mode='rb')
+def read_gpickle(path):
+ """Read graph object in Python pickle format.
+
+ Pickles are a serialized byte stream of a Python object [1]_.
+ This format will preserve Python objects used as nodes or edges.
+
+ Parameters
+ ----------
+ path : file or string
+ File or filename to write.
+ Filenames ending in .gz or .bz2 will be uncompressed.
+
+ Returns
+ -------
+ G : graph
+ A NetworkX graph
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> nx.write_gpickle(G,"test.gpickle")
+ >>> G=nx.read_gpickle("test.gpickle")
+
+ References
+ ----------
+ .. [1] http://docs.python.org/library/pickle.html
+ """
+ return pickle.load(path)
+
+# fixture for nose tests
+def teardown_module(module):
+ import os
+ os.unlink('test.gpickle')
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/graphml.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/graphml.py
new file mode 100644
index 0000000..7f896e0
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/graphml.py
@@ -0,0 +1,579 @@
+"""
+*******
+GraphML
+*******
+Read and write graphs in GraphML format.
+
+This implementation does not support mixed graphs (directed and unidirected
+edges together), hyperedges, nested graphs, or ports.
+
+"GraphML is a comprehensive and easy-to-use file format for graphs. It
+consists of a language core to describe the structural properties of a
+graph and a flexible extension mechanism to add application-specific
+data. Its main features include support of
+
+ * directed, undirected, and mixed graphs,
+ * hypergraphs,
+ * hierarchical graphs,
+ * graphical representations,
+ * references to external data,
+ * application-specific attribute data, and
+ * light-weight parsers.
+
+Unlike many other file formats for graphs, GraphML does not use a
+custom syntax. Instead, it is based on XML and hence ideally suited as
+a common denominator for all kinds of services generating, archiving,
+or processing graphs."
+
+http://graphml.graphdrawing.org/
+
+Format
+------
+GraphML is an XML format. See
+http://graphml.graphdrawing.org/specification.html for the specification and
+http://graphml.graphdrawing.org/primer/graphml-primer.html
+for examples.
+"""
+__author__ = """\n""".join(['Salim Fadhley',
+ 'Aric Hagberg (hagberg@lanl.gov)'
+ ])
+
+__all__ = ['write_graphml', 'read_graphml', 'generate_graphml',
+ 'parse_graphml', 'GraphMLWriter', 'GraphMLReader']
+
+import networkx as nx
+from networkx.utils import open_file, make_str
+import warnings
+try:
+ from xml.etree.cElementTree import Element, ElementTree, tostring, fromstring
+except ImportError:
+ try:
+ from xml.etree.ElementTree import Element, ElementTree, tostring, fromstring
+ except ImportError:
+ pass
+
+@open_file(1,mode='wb')
+def write_graphml(G, path, encoding='utf-8',prettyprint=True):
+ """Write G in GraphML XML format to path
+
+ Parameters
+ ----------
+ G : graph
+ A networkx graph
+ path : file or string
+ File or filename to write.
+ Filenames ending in .gz or .bz2 will be compressed.
+ encoding : string (optional)
+ Encoding for text data.
+ prettyprint : bool (optional)
+ If True use line breaks and indenting in output XML.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> nx.write_graphml(G, "test.graphml")
+
+ Notes
+ -----
+ This implementation does not support mixed graphs (directed and unidirected
+ edges together) hyperedges, nested graphs, or ports.
+ """
+ writer = GraphMLWriter(encoding=encoding,prettyprint=prettyprint)
+ writer.add_graph_element(G)
+ writer.dump(path)
+
+def generate_graphml(G, encoding='utf-8',prettyprint=True):
+ """Generate GraphML lines for G
+
+ Parameters
+ ----------
+ G : graph
+ A networkx graph
+ encoding : string (optional)
+ Encoding for text data.
+ prettyprint : bool (optional)
+ If True use line breaks and indenting in output XML.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> linefeed=chr(10) # linefeed=\n
+ >>> s=linefeed.join(nx.generate_graphml(G)) # doctest: +SKIP
+ >>> for line in nx.generate_graphml(G): # doctest: +SKIP
+ ... print(line)
+
+ Notes
+ -----
+ This implementation does not support mixed graphs (directed and unidirected
+ edges together) hyperedges, nested graphs, or ports.
+ """
+ writer = GraphMLWriter(encoding=encoding,prettyprint=prettyprint)
+ writer.add_graph_element(G)
+ for line in str(writer).splitlines():
+ yield line
+
+@open_file(0,mode='rb')
+def read_graphml(path,node_type=str):
+ """Read graph in GraphML format from path.
+
+ Parameters
+ ----------
+ path : file or string
+ File or filename to write.
+ Filenames ending in .gz or .bz2 will be compressed.
+
+ node_type: Python type (default: str)
+ Convert node ids to this type
+
+ Returns
+ -------
+ graph: NetworkX graph
+ If no parallel edges are found a Graph or DiGraph is returned.
+ Otherwise a MultiGraph or MultiDiGraph is returned.
+
+ Notes
+ -----
+ This implementation does not support mixed graphs (directed and unidirected
+ edges together), hypergraphs, nested graphs, or ports.
+
+ For multigraphs the GraphML edge "id" will be used as the edge
+ key. If not specified then they "key" attribute will be used. If
+ there is no "key" attribute a default NetworkX multigraph edge key
+ will be provided.
+
+ Files with the yEd "yfiles" extension will can be read but the graphics
+ information is discarded.
+
+ yEd compressed files ("file.graphmlz" extension) can be read by renaming
+ the file to "file.graphml.gz".
+
+ """
+ reader = GraphMLReader(node_type=node_type)
+ # need to check for multiple graphs
+ glist=list(reader(path=path))
+ return glist[0]
+
+
+def parse_graphml(graphml_string,node_type=str):
+ """Read graph in GraphML format from string.
+
+ Parameters
+ ----------
+ graphml_string : string
+ String containing graphml information
+ (e.g., contents of a graphml file).
+
+ node_type: Python type (default: str)
+ Convert node ids to this type
+
+ Returns
+ -------
+ graph: NetworkX graph
+ If no parallel edges are found a Graph or DiGraph is returned.
+ Otherwise a MultiGraph or MultiDiGraph is returned.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> linefeed=chr(10) # linefeed=\n
+ >>> s=linefeed.join(nx.generate_graphml(G))
+ >>> H=nx.parse_graphml(s)
+
+ Notes
+ -----
+ This implementation does not support mixed graphs (directed and unidirected
+ edges together), hypergraphs, nested graphs, or ports.
+
+ For multigraphs the GraphML edge "id" will be used as the edge
+ key. If not specified then they "key" attribute will be used. If
+ there is no "key" attribute a default NetworkX multigraph edge key
+ will be provided.
+
+ """
+ reader = GraphMLReader(node_type=node_type)
+ # need to check for multiple graphs
+ glist=list(reader(string=graphml_string))
+ return glist[0]
+
+
+class GraphML(object):
+ NS_GRAPHML = "http://graphml.graphdrawing.org/xmlns"
+ NS_XSI = "http://www.w3.org/2001/XMLSchema-instance"
+ #xmlns:y="http://www.yworks.com/xml/graphml"
+ NS_Y = "http://www.yworks.com/xml/graphml"
+ SCHEMALOCATION = \
+ ' '.join(['http://graphml.graphdrawing.org/xmlns',
+ 'http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd'])
+
+ try:
+ chr(12345) # Fails on Py!=3.
+ unicode = str # Py3k's str is our unicode type
+ long = int # Py3K's int is our long type
+ except ValueError:
+ # Python 2.x
+ pass
+
+ types=[(int,"integer"), # for Gephi GraphML bug
+ (str,"yfiles"),(str,"string"), (unicode,"string"),
+ (int,"int"), (long,"long"),
+ (float,"float"), (float,"double"),
+ (bool, "boolean")]
+
+ xml_type = dict(types)
+ python_type = dict(reversed(a) for a in types)
+ convert_bool={'true':True,'false':False,
+ 'True': True, 'False': False}
+
+
+
+class GraphMLWriter(GraphML):
+ def __init__(self, graph=None, encoding="utf-8",prettyprint=True):
+ try:
+ import xml.etree.ElementTree
+ except ImportError:
+ raise ImportError('GraphML writer requires '
+ 'xml.elementtree.ElementTree')
+ self.prettyprint=prettyprint
+ self.encoding = encoding
+ self.xml = Element("graphml",
+ {'xmlns':self.NS_GRAPHML,
+ 'xmlns:xsi':self.NS_XSI,
+ 'xsi:schemaLocation':self.SCHEMALOCATION}
+ )
+ self.keys={}
+
+ if graph is not None:
+ self.add_graph_element(graph)
+
+
+ def __str__(self):
+ if self.prettyprint:
+ self.indent(self.xml)
+ s=tostring(self.xml).decode(self.encoding)
+ return s
+
+ def get_key(self, name, attr_type, scope, default):
+ keys_key = (name, attr_type, scope)
+ try:
+ return self.keys[keys_key]
+ except KeyError:
+ new_id = "d%i" % len(list(self.keys))
+ self.keys[keys_key] = new_id
+ key_kwargs = {"id":new_id,
+ "for":scope,
+ "attr.name":name,
+ "attr.type":attr_type}
+ key_element=Element("key",**key_kwargs)
+ # add subelement for data default value if present
+ if default is not None:
+ default_element=Element("default")
+ default_element.text=make_str(default)
+ key_element.append(default_element)
+ self.xml.insert(0,key_element)
+ return new_id
+
+
+ def add_data(self, name, element_type, value,
+ scope="all",
+ default=None):
+ """
+ Make a data element for an edge or a node. Keep a log of the
+ type in the keys table.
+ """
+ if element_type not in self.xml_type:
+ raise nx.NetworkXError('GraphML writer does not support '
+ '%s as data values.'%element_type)
+ key_id = self.get_key(name, self.xml_type[element_type], scope, default)
+ data_element = Element("data", key=key_id)
+ data_element.text = make_str(value)
+ return data_element
+
+ def add_attributes(self, scope, xml_obj, data, default):
+ """Appends attributes to edges or nodes.
+ """
+ for k,v in data.items():
+ default_value=default.get(k)
+ obj=self.add_data(make_str(k), type(v), make_str(v),
+ scope=scope, default=default_value)
+ xml_obj.append(obj)
+
+ def add_nodes(self, G, graph_element):
+ for node,data in G.nodes_iter(data=True):
+ node_element = Element("node", id = make_str(node))
+ default=G.graph.get('node_default',{})
+ self.add_attributes("node", node_element, data, default)
+ graph_element.append(node_element)
+
+ def add_edges(self, G, graph_element):
+ if G.is_multigraph():
+ for u,v,key,data in G.edges_iter(data=True,keys=True):
+ edge_element = Element("edge",source=make_str(u),
+ target=make_str(v))
+ default=G.graph.get('edge_default',{})
+ self.add_attributes("edge", edge_element, data, default)
+ self.add_attributes("edge", edge_element,
+ {'key':key}, default)
+ graph_element.append(edge_element)
+ else:
+ for u,v,data in G.edges_iter(data=True):
+ edge_element = Element("edge",source=make_str(u),
+ target=make_str(v))
+ default=G.graph.get('edge_default',{})
+ self.add_attributes("edge", edge_element, data, default)
+ graph_element.append(edge_element)
+
+ def add_graph_element(self, G):
+ """
+ Serialize graph G in GraphML to the stream.
+ """
+ if G.is_directed():
+ default_edge_type='directed'
+ else:
+ default_edge_type='undirected'
+
+ graphid=G.graph.pop('id',None)
+ if graphid is None:
+ graph_element = Element("graph",
+ edgedefault = default_edge_type)
+ else:
+ graph_element = Element("graph",
+ edgedefault = default_edge_type,
+ id=graphid)
+
+ default={}
+ data=dict((k,v) for (k,v) in G.graph.items()
+ if k not in ['node_default','edge_default'])
+ self.add_attributes("graph", graph_element, data, default)
+ self.add_nodes(G,graph_element)
+ self.add_edges(G,graph_element)
+ self.xml.append(graph_element)
+
+ def add_graphs(self, graph_list):
+ """
+ Add many graphs to this GraphML document.
+ """
+ for G in graph_list:
+ self.add_graph_element(G)
+
+ def dump(self, stream):
+ if self.prettyprint:
+ self.indent(self.xml)
+ document = ElementTree(self.xml)
+ header='<?xml version="1.0" encoding="%s"?>'%self.encoding
+ stream.write(header.encode(self.encoding))
+ document.write(stream, encoding=self.encoding)
+
+ def indent(self, elem, level=0):
+ # in-place prettyprint formatter
+ i = "\n" + level*" "
+ if len(elem):
+ if not elem.text or not elem.text.strip():
+ elem.text = i + " "
+ if not elem.tail or not elem.tail.strip():
+ elem.tail = i
+ for elem in elem:
+ self.indent(elem, level+1)
+ if not elem.tail or not elem.tail.strip():
+ elem.tail = i
+ else:
+ if level and (not elem.tail or not elem.tail.strip()):
+ elem.tail = i
+
+
+class GraphMLReader(GraphML):
+ """Read a GraphML document. Produces NetworkX graph objects.
+ """
+ def __init__(self, node_type=str):
+ try:
+ import xml.etree.ElementTree
+ except ImportError:
+ raise ImportError('GraphML reader requires '
+ 'xml.elementtree.ElementTree')
+ self.node_type=node_type
+ self.multigraph=False # assume multigraph and test for parallel edges
+
+ def __call__(self, path=None, string=None):
+ if path is not None:
+ self.xml = ElementTree(file=path)
+ elif string is not None:
+ self.xml = fromstring(string)
+ else:
+ raise ValueError("Must specify either 'path' or 'string' as kwarg.")
+ (keys,defaults) = self.find_graphml_keys(self.xml)
+ for g in self.xml.findall("{%s}graph" % self.NS_GRAPHML):
+ yield self.make_graph(g, keys, defaults)
+
+ def make_graph(self, graph_xml, graphml_keys, defaults):
+ # set default graph type
+ edgedefault = graph_xml.get("edgedefault", None)
+ if edgedefault=='directed':
+ G=nx.MultiDiGraph()
+ else:
+ G=nx.MultiGraph()
+ # set defaults for graph attributes
+ G.graph['node_default']={}
+ G.graph['edge_default']={}
+ for key_id,value in defaults.items():
+ key_for=graphml_keys[key_id]['for']
+ name=graphml_keys[key_id]['name']
+ python_type=graphml_keys[key_id]['type']
+ if key_for=='node':
+ G.graph['node_default'].update({name:python_type(value)})
+ if key_for=='edge':
+ G.graph['edge_default'].update({name:python_type(value)})
+ # hyperedges are not supported
+ hyperedge=graph_xml.find("{%s}hyperedge" % self.NS_GRAPHML)
+ if hyperedge is not None:
+ raise nx.NetworkXError("GraphML reader does not support hyperedges")
+ # add nodes
+ for node_xml in graph_xml.findall("{%s}node" % self.NS_GRAPHML):
+ self.add_node(G, node_xml, graphml_keys)
+ # add edges
+ for edge_xml in graph_xml.findall("{%s}edge" % self.NS_GRAPHML):
+ self.add_edge(G, edge_xml, graphml_keys)
+ # add graph data
+ data = self.decode_data_elements(graphml_keys, graph_xml)
+ G.graph.update(data)
+
+ # switch to Graph or DiGraph if no parallel edges were found.
+ if not self.multigraph:
+ if G.is_directed():
+ return nx.DiGraph(G)
+ else:
+ return nx.Graph(G)
+ else:
+ return G
+
+ def add_node(self, G, node_xml, graphml_keys):
+ """Add a node to the graph.
+ """
+ # warn on finding unsupported ports tag
+ ports=node_xml.find("{%s}port" % self.NS_GRAPHML)
+ if ports is not None:
+ warnings.warn("GraphML port tag not supported.")
+ # find the node by id and cast it to the appropriate type
+ node_id = self.node_type(node_xml.get("id"))
+ # get data/attributes for node
+ data = self.decode_data_elements(graphml_keys, node_xml)
+ G.add_node(node_id, data)
+
+ def add_edge(self, G, edge_element, graphml_keys):
+ """Add an edge to the graph.
+ """
+ # warn on finding unsupported ports tag
+ ports=edge_element.find("{%s}port" % self.NS_GRAPHML)
+ if ports is not None:
+ warnings.warn("GraphML port tag not supported.")
+
+ # raise error if we find mixed directed and undirected edges
+ directed = edge_element.get("directed")
+ if G.is_directed() and directed=='false':
+ raise nx.NetworkXError(\
+ "directed=false edge found in directed graph.")
+ if (not G.is_directed()) and directed=='true':
+ raise nx.NetworkXError(\
+ "directed=true edge found in undirected graph.")
+
+ source = self.node_type(edge_element.get("source"))
+ target = self.node_type(edge_element.get("target"))
+ data = self.decode_data_elements(graphml_keys, edge_element)
+ # GraphML stores edge ids as an attribute
+ # NetworkX uses them as keys in multigraphs too if no key
+ # attribute is specified
+ edge_id = edge_element.get("id")
+ if edge_id:
+ data["id"] = edge_id
+ if G.has_edge(source,target):
+ # mark this as a multigraph
+ self.multigraph=True
+ if edge_id is None:
+ # no id specified, try using 'key' attribute as id
+ edge_id=data.pop('key',None)
+ G.add_edge(source, target, key=edge_id, **data)
+
+ def decode_data_elements(self, graphml_keys, obj_xml):
+ """Use the key information to decode the data XML if present."""
+ data = {}
+ for data_element in obj_xml.findall("{%s}data" % self.NS_GRAPHML):
+ key = data_element.get("key")
+ try:
+ data_name=graphml_keys[key]['name']
+ data_type=graphml_keys[key]['type']
+ except KeyError:
+ raise nx.NetworkXError("Bad GraphML data: no key %s"%key)
+ text=data_element.text
+ # assume anything with subelements is a yfiles extension
+ if text is not None and len(list(data_element))==0:
+ if data_type==bool:
+ data[data_name] = self.convert_bool[text]
+ else:
+ data[data_name] = data_type(text)
+ elif len(list(data_element)) > 0:
+ # Assume yfiles as subelements, try to extract node_label
+ node_label = None
+ for node_type in ['ShapeNode', 'SVGNode', 'ImageNode']:
+ geometry = data_element.find("{%s}%s/{%s}Geometry" %
+ (self.NS_Y, node_type, self.NS_Y))
+ if geometry is not None:
+ data['x'] = geometry.get('x')
+ data['y'] = geometry.get('y')
+ if node_label is None:
+ node_label = data_element.find("{%s}%s/{%s}NodeLabel" %
+ (self.NS_Y, node_type, self.NS_Y))
+ if node_label is not None:
+ data['label'] = node_label.text
+
+ # check all the diffrent types of edges avaivable in yEd.
+ for e in ['PolyLineEdge', 'SplineEdge', 'QuadCurveEdge', 'BezierEdge', 'ArcEdge']:
+ edge_label = data_element.find("{%s}%s/{%s}EdgeLabel"%
+ (self.NS_Y, e, (self.NS_Y)))
+ if edge_label is not None:
+ break
+
+ if edge_label is not None:
+ data['label'] = edge_label.text
+ return data
+
+ def find_graphml_keys(self, graph_element):
+ """Extracts all the keys and key defaults from the xml.
+ """
+ graphml_keys = {}
+ graphml_key_defaults = {}
+ for k in graph_element.findall("{%s}key" % self.NS_GRAPHML):
+ attr_id = k.get("id")
+ attr_type=k.get('attr.type')
+ attr_name=k.get("attr.name")
+ yfiles_type=k.get("yfiles.type")
+ if yfiles_type is not None:
+ attr_name = yfiles_type
+ attr_type = 'yfiles'
+ if attr_type is None:
+ attr_type = "string"
+ warnings.warn("No key type for id %s. Using string"%attr_id)
+ if attr_name is None:
+ raise nx.NetworkXError("Unknown key for id %s in file."%attr_id)
+ graphml_keys[attr_id] = {
+ "name":attr_name,
+ "type":self.python_type[attr_type],
+ "for":k.get("for")}
+ # check for "default" subelement of key element
+ default=k.find("{%s}default" % self.NS_GRAPHML)
+ if default is not None:
+ graphml_key_defaults[attr_id]=default.text
+ return graphml_keys,graphml_key_defaults
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import xml.etree.ElementTree
+ except:
+ raise SkipTest("xml.etree.ElementTree not available")
+
+# fixture for nose tests
+def teardown_module(module):
+ import os
+ try:
+ os.unlink('test.graphml')
+ except:
+ pass
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/__init__.py
new file mode 100644
index 0000000..ea3db6e
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/__init__.py
@@ -0,0 +1,10 @@
+"""
+*********
+JSON data
+*********
+Generate and parse JSON serializable data for NetworkX graphs.
+"""
+from networkx.readwrite.json_graph.node_link import *
+from networkx.readwrite.json_graph.adjacency import *
+from networkx.readwrite.json_graph.tree import *
+from networkx.readwrite.json_graph.serialize import *
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/adjacency.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/adjacency.py
new file mode 100644
index 0000000..d5b0df2
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/adjacency.py
@@ -0,0 +1,123 @@
+# Copyright (C) 2011-2013 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+from copy import deepcopy
+from itertools import count,repeat
+import json
+import networkx as nx
+__author__ = """Aric Hagberg <aric.hagberg@gmail.com>"""
+__all__ = ['adjacency_data', 'adjacency_graph']
+
+def adjacency_data(G):
+ """Return data in adjacency format that is suitable for JSON serialization
+ and use in Javascript documents.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ Returns
+ -------
+ data : dict
+ A dictionary with node-link formatted data.
+
+ Examples
+ --------
+ >>> from networkx.readwrite import json_graph
+ >>> G = nx.Graph([(1,2)])
+ >>> data = json_graph.adjacency_data(G)
+
+ To serialize with json
+
+ >>> import json
+ >>> s = json.dumps(data)
+
+ Notes
+ -----
+ Graph, node, and link attributes will be written when using this format
+ but attribute keys must be strings if you want to serialize the resulting
+ data with JSON.
+
+ See Also
+ --------
+ adjacency_graph, node_link_data, tree_data
+ """
+ multigraph = G.is_multigraph()
+ data = {}
+ data['directed'] = G.is_directed()
+ data['multigraph'] = multigraph
+ data['graph'] = list(G.graph.items())
+ data['nodes'] = []
+ data['adjacency'] = []
+ for n,nbrdict in G.adjacency_iter():
+ data['nodes'].append(dict(id=n, **G.node[n]))
+ adj = []
+ if multigraph:
+ for nbr,key in nbrdict.items():
+ for k,d in key.items():
+ adj.append(dict(id=nbr, key=k, **d))
+ else:
+ for nbr,d in nbrdict.items():
+ adj.append(dict(id=nbr, **d))
+ data['adjacency'].append(adj)
+ return data
+
+def adjacency_graph(data, directed=False, multigraph=True):
+ """Return graph from adjacency data format.
+
+ Parameters
+ ----------
+ data : dict
+ Adjacency list formatted graph data
+
+ Returns
+ -------
+ G : NetworkX graph
+ A NetworkX graph object
+
+ directed : bool
+ If True, and direction not specified in data, return a directed graph.
+
+ multigraph : bool
+ If True, and multigraph not specified in data, return a multigraph.
+
+ Examples
+ --------
+ >>> from networkx.readwrite import json_graph
+ >>> G = nx.Graph([(1,2)])
+ >>> data = json_graph.adjacency_data(G)
+ >>> H = json_graph.adjacency_graph(data)
+
+ See Also
+ --------
+ adjacency_graph, node_link_data, tree_data
+ """
+ multigraph = data.get('multigraph',multigraph)
+ directed = data.get('directed',directed)
+ if multigraph:
+ graph = nx.MultiGraph()
+ else:
+ graph = nx.Graph()
+ if directed:
+ graph = graph.to_directed()
+ graph.graph = dict(data.get('graph',[]))
+ mapping=[]
+ for d in data['nodes']:
+ node_data = d.copy()
+ node = node_data.pop('id')
+ mapping.append(node)
+ graph.add_node(node, attr_dict=node_data)
+ for i,d in enumerate(data['adjacency']):
+ source = mapping[i]
+ for tdata in d:
+ target_data = tdata.copy()
+ target = target_data.pop('id')
+ key = target_data.pop('key', None)
+ if not multigraph or key is None:
+ graph.add_edge(source,target,attr_dict=tdata)
+ else:
+ graph.add_edge(source,target,key=key, attr_dict=tdata)
+ return graph
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/node_link.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/node_link.py
new file mode 100644
index 0000000..03f150f
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/node_link.py
@@ -0,0 +1,116 @@
+# Copyright (C) 2011-2013 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+from itertools import count,repeat
+import json
+import networkx as nx
+__author__ = """Aric Hagberg <hagberg@lanl.gov>"""
+__all__ = ['node_link_data', 'node_link_graph']
+
+def node_link_data(G):
+ """Return data in node-link format that is suitable for JSON serialization
+ and use in Javascript documents.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ Returns
+ -------
+ data : dict
+ A dictionary with node-link formatted data.
+
+ Examples
+ --------
+ >>> from networkx.readwrite import json_graph
+ >>> G = nx.Graph([(1,2)])
+ >>> data = json_graph.node_link_data(G)
+
+ To serialize with json
+
+ >>> import json
+ >>> s = json.dumps(data)
+
+ Notes
+ -----
+ Graph, node, and link attributes are stored in this format but keys
+ for attributes must be strings if you want to serialize with JSON.
+
+ See Also
+ --------
+ node_link_graph, adjacency_data, tree_data
+ """
+ multigraph = G.is_multigraph()
+ mapping = dict(zip(G,count()))
+ data = {}
+ data['directed'] = G.is_directed()
+ data['multigraph'] = multigraph
+ data['graph'] = list(G.graph.items())
+ data['nodes'] = [ dict(id=n, **G.node[n]) for n in G ]
+ if multigraph:
+ data['links'] = [ dict(source=mapping[u], target=mapping[v], key=k, **d)
+ for u,v,k,d in G.edges(keys=True, data=True) ]
+ else:
+ data['links'] = [ dict(source=mapping[u], target=mapping[v], **d)
+ for u,v,d in G.edges(data=True) ]
+
+ return data
+
+
+def node_link_graph(data, directed=False, multigraph=True):
+ """Return graph from node-link data format.
+
+ Parameters
+ ----------
+ data : dict
+ node-link formatted graph data
+
+ directed : bool
+ If True, and direction not specified in data, return a directed graph.
+
+ multigraph : bool
+ If True, and multigraph not specified in data, return a multigraph.
+
+ Returns
+ -------
+ G : NetworkX graph
+ A NetworkX graph object
+
+ Examples
+ --------
+ >>> from networkx.readwrite import json_graph
+ >>> G = nx.Graph([(1,2)])
+ >>> data = json_graph.node_link_data(G)
+ >>> H = json_graph.node_link_graph(data)
+
+ See Also
+ --------
+ node_link_data, adjacency_data, tree_data
+ """
+ multigraph = data.get('multigraph',multigraph)
+ directed = data.get('directed',directed)
+ if multigraph:
+ graph = nx.MultiGraph()
+ else:
+ graph = nx.Graph()
+ if directed:
+ graph = graph.to_directed()
+ mapping=[]
+ graph.graph = dict(data.get('graph',[]))
+ c = count()
+ for d in data['nodes']:
+ node = d.get('id',next(c))
+ mapping.append(node)
+ nodedata = dict((str(k),v) for k,v in d.items() if k!='id')
+ graph.add_node(node, **nodedata)
+ for d in data['links']:
+ link_data = d.copy()
+ source = link_data.pop('source')
+ target = link_data.pop('target')
+ edgedata = dict((str(k),v) for k,v in d.items()
+ if k!='source' and k!='target')
+ graph.add_edge(mapping[source],mapping[target],**edgedata)
+ return graph
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/serialize.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/serialize.py
new file mode 100644
index 0000000..5861836
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/serialize.py
@@ -0,0 +1,31 @@
+# Copyright (C) 2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+from functools import partial,update_wrapper
+import json
+from networkx.readwrite.json_graph import node_link_data,node_link_graph
+__author__ = """Aric Hagberg (hagberg@lanl.gov))"""
+__all__ = ['dumps','loads','dump','load']
+
+class NXJSONEncoder(json.JSONEncoder):
+ def default(self, o):
+ return node_link_data(o)
+
+
+class NXJSONDecoder(json.JSONDecoder):
+ def decode(self, s):
+ d = json.loads(s)
+ return node_link_graph(d)
+
+# modification of json functions to serialize networkx graphs
+dumps = partial(json.dumps, cls=NXJSONEncoder)
+update_wrapper(dumps,json.dumps)
+loads = partial(json.loads, cls=NXJSONDecoder)
+update_wrapper(loads,json.loads)
+dump = partial(json.dump, cls=NXJSONEncoder)
+update_wrapper(dump,json.dump)
+load = partial(json.load, cls=NXJSONDecoder)
+update_wrapper(load,json.load)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/tests/test_adjacency.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/tests/test_adjacency.py
new file mode 100644
index 0000000..f8bf214
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/tests/test_adjacency.py
@@ -0,0 +1,52 @@
+import json
+from nose.tools import assert_equal, assert_raises, assert_not_equal,assert_true
+import networkx as nx
+from networkx.readwrite.json_graph import *
+
+class TestAdjacency:
+
+ def test_graph(self):
+ G = nx.path_graph(4)
+ H = adjacency_graph(adjacency_data(G))
+ nx.is_isomorphic(G,H)
+
+ def test_graph_attributes(self):
+ G = nx.path_graph(4)
+ G.add_node(1,color='red')
+ G.add_edge(1,2,width=7)
+ G.graph['foo']='bar'
+ G.graph[1]='one'
+
+ H = adjacency_graph(adjacency_data(G))
+ assert_equal(H.graph['foo'],'bar')
+ assert_equal(H.node[1]['color'],'red')
+ assert_equal(H[1][2]['width'],7)
+
+ d = json.dumps(adjacency_data(G))
+ H = adjacency_graph(json.loads(d))
+ assert_equal(H.graph['foo'],'bar')
+ assert_equal(H.graph[1],'one')
+ assert_equal(H.node[1]['color'],'red')
+ assert_equal(H[1][2]['width'],7)
+
+ def test_digraph(self):
+ G = nx.DiGraph()
+ G.add_path([1,2,3])
+ H = adjacency_graph(adjacency_data(G))
+ assert_true(H.is_directed())
+ nx.is_isomorphic(G,H)
+
+ def test_multidigraph(self):
+ G = nx.MultiDiGraph()
+ G.add_path([1,2,3])
+ H = adjacency_graph(adjacency_data(G))
+ assert_true(H.is_directed())
+ assert_true(H.is_multigraph())
+
+ def test_multigraph(self):
+ G = nx.MultiGraph()
+ G.add_edge(1,2,key='first')
+ G.add_edge(1,2,key='second',color='blue')
+ H = adjacency_graph(adjacency_data(G))
+ nx.is_isomorphic(G,H)
+ assert_equal(H[1][2]['second']['color'],'blue')
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/tests/test_node_link.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/tests/test_node_link.py
new file mode 100644
index 0000000..5430e0d
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/tests/test_node_link.py
@@ -0,0 +1,44 @@
+import json
+from nose.tools import assert_equal, assert_raises, assert_not_equal,assert_true
+import networkx as nx
+from networkx.readwrite.json_graph import *
+
+class TestNodeLink:
+
+ def test_graph(self):
+ G = nx.path_graph(4)
+ H = node_link_graph(node_link_data(G))
+ nx.is_isomorphic(G,H)
+
+ def test_graph_attributes(self):
+ G = nx.path_graph(4)
+ G.add_node(1,color='red')
+ G.add_edge(1,2,width=7)
+ G.graph[1]='one'
+ G.graph['foo']='bar'
+
+ H = node_link_graph(node_link_data(G))
+ assert_equal(H.graph['foo'],'bar')
+ assert_equal(H.node[1]['color'],'red')
+ assert_equal(H[1][2]['width'],7)
+
+ d=json.dumps(node_link_data(G))
+ H = node_link_graph(json.loads(d))
+ assert_equal(H.graph['foo'],'bar')
+ assert_equal(H.graph[1],'one')
+ assert_equal(H.node[1]['color'],'red')
+ assert_equal(H[1][2]['width'],7)
+
+ def test_digraph(self):
+ G = nx.DiGraph()
+ H = node_link_graph(node_link_data(G))
+ assert_true(H.is_directed())
+
+
+ def test_multigraph(self):
+ G = nx.MultiGraph()
+ G.add_edge(1,2,key='first')
+ G.add_edge(1,2,key='second',color='blue')
+ H = node_link_graph(node_link_data(G))
+ nx.is_isomorphic(G,H)
+ assert_equal(H[1][2]['second']['color'],'blue')
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/tests/test_serialize.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/tests/test_serialize.py
new file mode 100644
index 0000000..cc8d7ff
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/tests/test_serialize.py
@@ -0,0 +1,49 @@
+import json
+from nose.tools import assert_equal, assert_raises, assert_not_equal,assert_true
+import networkx as nx
+from networkx.readwrite.json_graph import *
+
+class TestAdjacency:
+
+ def test_graph(self):
+ G = nx.path_graph(4)
+ H = loads(dumps(G))
+ nx.is_isomorphic(G,H)
+
+ def test_graph_attributes(self):
+ G = nx.path_graph(4)
+ G.add_node(1,color='red')
+ G.add_edge(1,2,width=7)
+ G.graph['foo']='bar'
+ G.graph[1]='one'
+
+ H = loads(dumps(G))
+ assert_equal(H.graph['foo'],'bar')
+ assert_equal(H.graph[1],'one')
+ assert_equal(H.node[1]['color'],'red')
+ assert_equal(H[1][2]['width'],7)
+
+ try:
+ from StringIO import StringIO
+ except:
+ from io import StringIO
+ io = StringIO()
+ dump(G,io)
+ io.seek(0)
+ H=load(io)
+ assert_equal(H.graph['foo'],'bar')
+ assert_equal(H.graph[1],'one')
+ assert_equal(H.node[1]['color'],'red')
+ assert_equal(H[1][2]['width'],7)
+
+
+ def test_digraph(self):
+ G = nx.DiGraph()
+ H = loads(dumps(G))
+ assert_true(H.is_directed())
+
+ def test_multidigraph(self):
+ G = nx.MultiDiGraph()
+ H = loads(dumps(G))
+ assert_true(H.is_directed())
+ assert_true(H.is_multigraph())
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/tests/test_tree.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/tests/test_tree.py
new file mode 100644
index 0000000..19ba8f2
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/tests/test_tree.py
@@ -0,0 +1,29 @@
+import json
+from nose.tools import assert_equal, assert_raises, assert_not_equal,assert_true
+import networkx as nx
+from networkx.readwrite.json_graph import *
+
+class TestTree:
+
+ def test_graph(self):
+ G=nx.DiGraph()
+ G.add_nodes_from([1,2,3],color='red')
+ G.add_edge(1,2,foo=7)
+ G.add_edge(1,3,foo=10)
+ G.add_edge(3,4,foo=10)
+ H = tree_graph(tree_data(G,1))
+ nx.is_isomorphic(G,H)
+
+ def test_graph_attributes(self):
+ G=nx.DiGraph()
+ G.add_nodes_from([1,2,3],color='red')
+ G.add_edge(1,2,foo=7)
+ G.add_edge(1,3,foo=10)
+ G.add_edge(3,4,foo=10)
+ H = tree_graph(tree_data(G,1))
+ assert_equal(H.node[1]['color'],'red')
+
+ d = json.dumps(tree_data(G,1))
+ H = tree_graph(json.loads(d))
+ assert_equal(H.node[1]['color'],'red')
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/tree.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/tree.py
new file mode 100644
index 0000000..d2229e9
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/json_graph/tree.py
@@ -0,0 +1,113 @@
+# Copyright (C) 2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+from itertools import count,repeat
+import json
+import networkx as nx
+__author__ = """Aric Hagberg (hagberg@lanl.gov))"""
+__all__ = ['tree_data',
+ 'tree_graph']
+
+def tree_data(G, root):
+ """Return data in tree format that is suitable for JSON serialization
+ and use in Javascript documents.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ G must be an oriented tree
+
+ root : node
+ The root of the tree
+
+ Returns
+ -------
+ data : dict
+ A dictionary with node-link formatted data.
+
+ Examples
+ --------
+ >>> from networkx.readwrite import json_graph
+ >>> G = nx.DiGraph([(1,2)])
+ >>> data = json_graph.tree_data(G,root=1)
+
+ To serialize with json
+
+ >>> import json
+ >>> s = json.dumps(data)
+
+ Notes
+ -----
+ Node attributes are stored in this format but keys
+ for attributes must be strings if you want to serialize with JSON.
+
+ Graph and edge attributes are not stored.
+
+ See Also
+ --------
+ tree_graph, node_link_data, node_link_data
+ """
+ if not G.number_of_nodes()==G.number_of_edges()+1:
+ raise TypeError("G is not a tree.")
+ if not G.is_directed():
+ raise TypeError("G is not directed")
+ def add_children(n, G):
+ nbrs = G[n]
+ if len(nbrs) == 0:
+ return []
+ children = []
+ for child in nbrs:
+ d = dict(id=child, **G.node[child])
+ c = add_children(child,G)
+ if c:
+ d['children'] = c
+ children.append(d)
+ return children
+ data = dict(id=root, **G.node[root])
+ data['children'] = add_children(root,G)
+ return data
+
+def tree_graph(data):
+ """Return graph from tree data format.
+
+ Parameters
+ ----------
+ data : dict
+ Tree formatted graph data
+
+ Returns
+ -------
+ G : NetworkX DiGraph
+
+ Examples
+ --------
+ >>> from networkx.readwrite import json_graph
+ >>> G = nx.DiGraph([(1,2)])
+ >>> data = json_graph.tree_data(G,root=1)
+ >>> H = json_graph.tree_graph(data)
+
+ See Also
+ --------
+ tree_graph, node_link_data, adjacency_data
+ """
+ graph = nx.DiGraph()
+ def add_children(parent, children):
+ for data in children:
+ child = data['id']
+ graph.add_edge(parent, child)
+ grandchildren = data.get('children',[])
+ if grandchildren:
+ add_children(child,grandchildren)
+ nodedata = dict((str(k),v) for k,v in data.items()
+ if k!='id' and k!='children')
+ graph.add_node(child,attr_dict=nodedata)
+ root = data['id']
+ children = data.get('children',[])
+ nodedata = dict((k,v) for k,v in data.items()
+ if k!='id' and k!='children')
+ graph.add_node(root, attr_dict=nodedata)
+ add_children(root, children)
+ return graph
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/leda.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/leda.py
new file mode 100644
index 0000000..a4d17db
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/leda.py
@@ -0,0 +1,106 @@
+"""
+Read graphs in LEDA format.
+
+LEDA is a C++ class library for efficient data types and algorithms.
+
+Format
+------
+See http://www.algorithmic-solutions.info/leda_guide/graphs/leda_native_graph_fileformat.html
+
+"""
+# Original author: D. Eppstein, UC Irvine, August 12, 2003.
+# The original code at http://www.ics.uci.edu/~eppstein/PADS/ is public domain.
+__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
+# Copyright (C) 2004-2010 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+__all__ = ['read_leda', 'parse_leda']
+
+import networkx as nx
+from networkx.exception import NetworkXError
+from networkx.utils import open_file, is_string_like
+
+@open_file(0,mode='rb')
+def read_leda(path, encoding='UTF-8'):
+ """Read graph in LEDA format from path.
+
+ Parameters
+ ----------
+ path : file or string
+ File or filename to read. Filenames ending in .gz or .bz2 will be
+ uncompressed.
+
+ Returns
+ -------
+ G : NetworkX graph
+
+ Examples
+ --------
+ G=nx.read_leda('file.leda')
+
+ References
+ ----------
+ .. [1] http://www.algorithmic-solutions.info/leda_guide/graphs/leda_native_graph_fileformat.html
+ """
+ lines=(line.decode(encoding) for line in path)
+ G=parse_leda(lines)
+ return G
+
+
+def parse_leda(lines):
+ """Read graph in LEDA format from string or iterable.
+
+ Parameters
+ ----------
+ lines : string or iterable
+ Data in LEDA format.
+
+ Returns
+ -------
+ G : NetworkX graph
+
+ Examples
+ --------
+ G=nx.parse_leda(string)
+
+ References
+ ----------
+ .. [1] http://www.algorithmic-solutions.info/leda_guide/graphs/leda_native_graph_fileformat.html
+ """
+ if is_string_like(lines): lines=iter(lines.split('\n'))
+ lines = iter([line.rstrip('\n') for line in lines \
+ if not (line.startswith('#') or line.startswith('\n') or line=='')])
+ for i in range(3):
+ next(lines)
+ # Graph
+ du = int(next(lines)) # -1=directed, -2=undirected
+ if du==-1:
+ G = nx.DiGraph()
+ else:
+ G = nx.Graph()
+
+ # Nodes
+ n =int(next(lines)) # number of nodes
+ node={}
+ for i in range(1,n+1): # LEDA counts from 1 to n
+ symbol=next(lines).rstrip().strip('|{}| ')
+ if symbol=="": symbol=str(i) # use int if no label - could be trouble
+ node[i]=symbol
+
+ G.add_nodes_from([s for i,s in node.items()])
+
+ # Edges
+ m = int(next(lines)) # number of edges
+ for i in range(m):
+ try:
+ s,t,reversal,label=next(lines).split()
+ except:
+ raise NetworkXError('Too few fields in LEDA.GRAPH edge %d'%(i+1))
+ # BEWARE: no handling of reversal edges
+ G.add_edge(node[int(s)],node[int(t)],label=label[2:-2])
+ return G
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/multiline_adjlist.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/multiline_adjlist.py
new file mode 100644
index 0000000..30c3234
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/multiline_adjlist.py
@@ -0,0 +1,390 @@
+# -*- coding: utf-8 -*-
+"""
+*************************
+Multi-line Adjacency List
+*************************
+Read and write NetworkX graphs as multi-line adjacency lists.
+
+The multi-line adjacency list format is useful for graphs with
+nodes that can be meaningfully represented as strings. With this format
+simple edge data can be stored but node or graph data is not.
+
+Format
+------
+The first label in a line is the source node label followed by the node degree
+d. The next d lines are target node labels and optional edge data.
+That pattern repeats for all nodes in the graph.
+
+The graph with edges a-b, a-c, d-e can be represented as the following
+adjacency list (anything following the # in a line is a comment)::
+
+ # example.multiline-adjlist
+ a 2
+ b
+ c
+ d 1
+ e
+"""
+__author__ = '\n'.join(['Aric Hagberg <hagberg@lanl.gov>',
+ 'Dan Schult <dschult@colgate.edu>',
+ 'Loïc Séguin-C. <loicseguin@gmail.com>'])
+# Copyright (C) 2004-2010 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+__all__ = ['generate_multiline_adjlist',
+ 'write_multiline_adjlist',
+ 'parse_multiline_adjlist',
+ 'read_multiline_adjlist']
+
+from networkx.utils import make_str, open_file
+import networkx as nx
+
+def generate_multiline_adjlist(G, delimiter = ' '):
+ """Generate a single line of the graph G in multiline adjacency list format.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ delimiter : string, optional
+ Separator for node labels
+
+ Returns
+ -------
+ lines : string
+ Lines of data in multiline adjlist format.
+
+ Examples
+ --------
+ >>> G = nx.lollipop_graph(4, 3)
+ >>> for line in nx.generate_multiline_adjlist(G):
+ ... print(line)
+ 0 3
+ 1 {}
+ 2 {}
+ 3 {}
+ 1 2
+ 2 {}
+ 3 {}
+ 2 1
+ 3 {}
+ 3 1
+ 4 {}
+ 4 1
+ 5 {}
+ 5 1
+ 6 {}
+ 6 0
+
+ See Also
+ --------
+ write_multiline_adjlist, read_multiline_adjlist
+ """
+ if G.is_directed():
+ if G.is_multigraph():
+ for s,nbrs in G.adjacency_iter():
+ nbr_edges=[ (u,data)
+ for u,datadict in nbrs.items()
+ for key,data in datadict.items()]
+ deg=len(nbr_edges)
+ yield make_str(s)+delimiter+"%i"%(deg)
+ for u,d in nbr_edges:
+ if d is None:
+ yield make_str(u)
+ else:
+ yield make_str(u)+delimiter+make_str(d)
+ else: # directed single edges
+ for s,nbrs in G.adjacency_iter():
+ deg=len(nbrs)
+ yield make_str(s)+delimiter+"%i"%(deg)
+ for u,d in nbrs.items():
+ if d is None:
+ yield make_str(u)
+ else:
+ yield make_str(u)+delimiter+make_str(d)
+ else: # undirected
+ if G.is_multigraph():
+ seen=set() # helper dict used to avoid duplicate edges
+ for s,nbrs in G.adjacency_iter():
+ nbr_edges=[ (u,data)
+ for u,datadict in nbrs.items()
+ if u not in seen
+ for key,data in datadict.items()]
+ deg=len(nbr_edges)
+ yield make_str(s)+delimiter+"%i"%(deg)
+ for u,d in nbr_edges:
+ if d is None:
+ yield make_str(u)
+ else:
+ yield make_str(u)+delimiter+make_str(d)
+ seen.add(s)
+ else: # undirected single edges
+ seen=set() # helper dict used to avoid duplicate edges
+ for s,nbrs in G.adjacency_iter():
+ nbr_edges=[ (u,d) for u,d in nbrs.items() if u not in seen]
+ deg=len(nbr_edges)
+ yield make_str(s)+delimiter+"%i"%(deg)
+ for u,d in nbr_edges:
+ if d is None:
+ yield make_str(u)
+ else:
+ yield make_str(u)+delimiter+make_str(d)
+ seen.add(s)
+
+@open_file(1,mode='wb')
+def write_multiline_adjlist(G, path, delimiter=' ',
+ comments='#', encoding = 'utf-8'):
+ """ Write the graph G in multiline adjacency list format to path
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ comments : string, optional
+ Marker for comment lines
+
+ delimiter : string, optional
+ Separator for node labels
+
+ encoding : string, optional
+ Text encoding.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> nx.write_multiline_adjlist(G,"test.adjlist")
+
+ The path can be a file handle or a string with the name of the file. If a
+ file handle is provided, it has to be opened in 'wb' mode.
+
+ >>> fh=open("test.adjlist",'wb')
+ >>> nx.write_multiline_adjlist(G,fh)
+
+ Filenames ending in .gz or .bz2 will be compressed.
+
+ >>> nx.write_multiline_adjlist(G,"test.adjlist.gz")
+
+ See Also
+ --------
+ read_multiline_adjlist
+ """
+ import sys
+ import time
+
+ pargs=comments+" ".join(sys.argv)
+ header = ("%s\n" % (pargs)
+ + comments + " GMT %s\n" % (time.asctime(time.gmtime()))
+ + comments + " %s\n" % (G.name))
+ path.write(header.encode(encoding))
+
+ for multiline in generate_multiline_adjlist(G, delimiter):
+ multiline+='\n'
+ path.write(multiline.encode(encoding))
+
+def parse_multiline_adjlist(lines, comments = '#', delimiter = None,
+ create_using = None, nodetype = None,
+ edgetype = None):
+ """Parse lines of a multiline adjacency list representation of a graph.
+
+ Parameters
+ ----------
+ lines : list or iterator of strings
+ Input data in multiline adjlist format
+
+ create_using: NetworkX graph container
+ Use given NetworkX graph for holding nodes or edges.
+
+ nodetype : Python type, optional
+ Convert nodes to this type.
+
+ comments : string, optional
+ Marker for comment lines
+
+ delimiter : string, optional
+ Separator for node labels. The default is whitespace.
+
+ create_using: NetworkX graph container
+ Use given NetworkX graph for holding nodes or edges.
+
+
+ Returns
+ -------
+ G: NetworkX graph
+ The graph corresponding to the lines in multiline adjacency list format.
+
+ Examples
+ --------
+ >>> lines = ['1 2',
+ ... "2 {'weight':3, 'name': 'Frodo'}",
+ ... "3 {}",
+ ... "2 1",
+ ... "5 {'weight':6, 'name': 'Saruman'}"]
+ >>> G = nx.parse_multiline_adjlist(iter(lines), nodetype = int)
+ >>> G.nodes()
+ [1, 2, 3, 5]
+ """
+ from ast import literal_eval
+ if create_using is None:
+ G=nx.Graph()
+ else:
+ try:
+ G=create_using
+ G.clear()
+ except:
+ raise TypeError("Input graph is not a networkx graph type")
+
+ for line in lines:
+ p=line.find(comments)
+ if p>=0:
+ line = line[:p]
+ if not line: continue
+ try:
+ (u,deg)=line.strip().split(delimiter)
+ deg=int(deg)
+ except:
+ raise TypeError("Failed to read node and degree on line (%s)"%line)
+ if nodetype is not None:
+ try:
+ u=nodetype(u)
+ except:
+ raise TypeError("Failed to convert node (%s) to type %s"\
+ %(u,nodetype))
+ G.add_node(u)
+ for i in range(deg):
+ while True:
+ try:
+ line = next(lines)
+ except StopIteration:
+ msg = "Failed to find neighbor for node (%s)" % (u,)
+ raise TypeError(msg)
+ p=line.find(comments)
+ if p>=0:
+ line = line[:p]
+ if line: break
+ vlist=line.strip().split(delimiter)
+ numb=len(vlist)
+ if numb<1:
+ continue # isolated node
+ v=vlist.pop(0)
+ data=''.join(vlist)
+ if nodetype is not None:
+ try:
+ v=nodetype(v)
+ except:
+ raise TypeError(
+ "Failed to convert node (%s) to type %s"\
+ %(v,nodetype))
+ if edgetype is not None:
+ try:
+ edgedata={'weight':edgetype(data)}
+ except:
+ raise TypeError(
+ "Failed to convert edge data (%s) to type %s"\
+ %(data, edgetype))
+ else:
+ try: # try to evaluate
+ edgedata=literal_eval(data)
+ except:
+ edgedata={}
+ G.add_edge(u,v,attr_dict=edgedata)
+
+ return G
+
+@open_file(0,mode='rb')
+def read_multiline_adjlist(path, comments="#", delimiter=None,
+ create_using=None,
+ nodetype=None, edgetype=None,
+ encoding = 'utf-8'):
+ """Read graph in multi-line adjacency list format from path.
+
+ Parameters
+ ----------
+ path : string or file
+ Filename or file handle to read.
+ Filenames ending in .gz or .bz2 will be uncompressed.
+
+ create_using: NetworkX graph container
+ Use given NetworkX graph for holding nodes or edges.
+
+ nodetype : Python type, optional
+ Convert nodes to this type.
+
+ edgetype : Python type, optional
+ Convert edge data to this type.
+
+ comments : string, optional
+ Marker for comment lines
+
+ delimiter : string, optional
+ Separator for node labels. The default is whitespace.
+
+ create_using: NetworkX graph container
+ Use given NetworkX graph for holding nodes or edges.
+
+
+ Returns
+ -------
+ G: NetworkX graph
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> nx.write_multiline_adjlist(G,"test.adjlist")
+ >>> G=nx.read_multiline_adjlist("test.adjlist")
+
+ The path can be a file or a string with the name of the file. If a
+ file s provided, it has to be opened in 'rb' mode.
+
+ >>> fh=open("test.adjlist", 'rb')
+ >>> G=nx.read_multiline_adjlist(fh)
+
+ Filenames ending in .gz or .bz2 will be compressed.
+
+ >>> nx.write_multiline_adjlist(G,"test.adjlist.gz")
+ >>> G=nx.read_multiline_adjlist("test.adjlist.gz")
+
+ The optional nodetype is a function to convert node strings to nodetype.
+
+ For example
+
+ >>> G=nx.read_multiline_adjlist("test.adjlist", nodetype=int)
+
+ will attempt to convert all nodes to integer type.
+
+ The optional edgetype is a function to convert edge data strings to
+ edgetype.
+
+ >>> G=nx.read_multiline_adjlist("test.adjlist")
+
+ The optional create_using parameter is a NetworkX graph container.
+ The default is Graph(), an undirected graph. To read the data as
+ a directed graph use
+
+ >>> G=nx.read_multiline_adjlist("test.adjlist", create_using=nx.DiGraph())
+
+ Notes
+ -----
+ This format does not store graph, node, or edge data.
+
+ See Also
+ --------
+ write_multiline_adjlist
+ """
+ lines = (line.decode(encoding) for line in path)
+ return parse_multiline_adjlist(lines,
+ comments = comments,
+ delimiter = delimiter,
+ create_using = create_using,
+ nodetype = nodetype,
+ edgetype = edgetype)
+
+
+# fixture for nose tests
+def teardown_module(module):
+ import os
+ os.unlink('test.adjlist')
+ os.unlink('test.adjlist.gz')
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/nx_shp.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/nx_shp.py
new file mode 100644
index 0000000..d873ea6
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/nx_shp.py
@@ -0,0 +1,224 @@
+"""
+*********
+Shapefile
+*********
+
+Generates a networkx.DiGraph from point and line shapefiles.
+
+"The Esri Shapefile or simply a shapefile is a popular geospatial vector
+data format for geographic information systems software. It is developed
+and regulated by Esri as a (mostly) open specification for data
+interoperability among Esri and other software products."
+See http://en.wikipedia.org/wiki/Shapefile for additional information.
+"""
+# Copyright (C) 2004-2010 by
+# Ben Reilly <benwreilly@gmail.com>
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+__author__ = """Ben Reilly (benwreilly@gmail.com)"""
+__all__ = ['read_shp', 'write_shp']
+
+
+def read_shp(path):
+ """Generates a networkx.DiGraph from shapefiles. Point geometries are
+ translated into nodes, lines into edges. Coordinate tuples are used as
+ keys. Attributes are preserved, line geometries are simplified into start
+ and end coordinates. Accepts a single shapefile or directory of many
+ shapefiles.
+
+ "The Esri Shapefile or simply a shapefile is a popular geospatial vector
+ data format for geographic information systems software [1]_."
+
+ Parameters
+ ----------
+ path : file or string
+ File, directory, or filename to read.
+
+ Returns
+ -------
+ G : NetworkX graph
+
+ Examples
+ --------
+ >>> G=nx.read_shp('test.shp') # doctest: +SKIP
+
+ References
+ ----------
+ .. [1] http://en.wikipedia.org/wiki/Shapefile
+ """
+ try:
+ from osgeo import ogr
+ except ImportError:
+ raise ImportError("read_shp requires OGR: http://www.gdal.org/")
+
+ net = nx.DiGraph()
+
+ def getfieldinfo(lyr, feature, flds):
+ f = feature
+ return [f.GetField(f.GetFieldIndex(x)) for x in flds]
+
+ def addlyr(lyr, fields):
+ for findex in xrange(lyr.GetFeatureCount()):
+ f = lyr.GetFeature(findex)
+ flddata = getfieldinfo(lyr, f, fields)
+ g = f.geometry()
+ attributes = dict(zip(fields, flddata))
+ attributes["ShpName"] = lyr.GetName()
+ if g.GetGeometryType() == 1: # point
+ net.add_node((g.GetPoint_2D(0)), attributes)
+ if g.GetGeometryType() == 2: # linestring
+ attributes["Wkb"] = g.ExportToWkb()
+ attributes["Wkt"] = g.ExportToWkt()
+ attributes["Json"] = g.ExportToJson()
+ last = g.GetPointCount() - 1
+ net.add_edge(g.GetPoint_2D(0), g.GetPoint_2D(last), attributes)
+
+ if isinstance(path, str):
+ shp = ogr.Open(path)
+ lyrcount = shp.GetLayerCount() # multiple layers indicate a directory
+ for lyrindex in xrange(lyrcount):
+ lyr = shp.GetLayerByIndex(lyrindex)
+ flds = [x.GetName() for x in lyr.schema]
+ addlyr(lyr, flds)
+ return net
+
+
+def write_shp(G, outdir):
+ """Writes a networkx.DiGraph to two shapefiles, edges and nodes.
+ Nodes and edges are expected to have a Well Known Binary (Wkb) or
+ Well Known Text (Wkt) key in order to generate geometries. Also
+ acceptable are nodes with a numeric tuple key (x,y).
+
+ "The Esri Shapefile or simply a shapefile is a popular geospatial vector
+ data format for geographic information systems software [1]_."
+
+ Parameters
+ ----------
+ outdir : directory path
+ Output directory for the two shapefiles.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ nx.write_shp(digraph, '/shapefiles') # doctest +SKIP
+
+ References
+ ----------
+ .. [1] http://en.wikipedia.org/wiki/Shapefile
+ """
+ try:
+ from osgeo import ogr
+ except ImportError:
+ raise ImportError("write_shp requires OGR: http://www.gdal.org/")
+ # easier to debug in python if ogr throws exceptions
+ ogr.UseExceptions()
+
+ def netgeometry(key, data):
+ if 'Wkb' in data:
+ geom = ogr.CreateGeometryFromWkb(data['Wkb'])
+ elif 'Wkt' in data:
+ geom = ogr.CreateGeometryFromWkt(data['Wkt'])
+ elif type(key[0]).__name__ == 'tuple': # edge keys are packed tuples
+ geom = ogr.Geometry(ogr.wkbLineString)
+ _from, _to = key[0], key[1]
+ try:
+ geom.SetPoint(0, *_from)
+ geom.SetPoint(1, *_to)
+ except TypeError:
+ # assume user used tuple of int and choked ogr
+ _ffrom = [float(x) for x in _from]
+ _fto = [float(x) for x in _to]
+ geom.SetPoint(0, *_ffrom)
+ geom.SetPoint(1, *_fto)
+ else:
+ geom = ogr.Geometry(ogr.wkbPoint)
+ try:
+ geom.SetPoint(0, *key)
+ except TypeError:
+ # assume user used tuple of int and choked ogr
+ fkey = [float(x) for x in key]
+ geom.SetPoint(0, *fkey)
+
+ return geom
+
+ # Create_feature with new optional attributes arg (should be dict type)
+ def create_feature(geometry, lyr, attributes=None):
+ feature = ogr.Feature(lyr.GetLayerDefn())
+ feature.SetGeometry(g)
+ if attributes != None:
+ # Loop through attributes, assigning data to each field
+ for field, data in attributes.iter():
+ feature.SetField(field, data)
+ lyr.CreateFeature(feature)
+ feature.Destroy()
+
+ drv = ogr.GetDriverByName("ESRI Shapefile")
+ shpdir = drv.CreateDataSource(outdir)
+ # delete pre-existing output first otherwise ogr chokes
+ try:
+ shpdir.DeleteLayer("nodes")
+ except:
+ pass
+ nodes = shpdir.CreateLayer("nodes", None, ogr.wkbPoint)
+ for n in G:
+ data = G.node[n] or {}
+ g = netgeometry(n, data)
+ create_feature(g, nodes)
+ try:
+ shpdir.DeleteLayer("edges")
+ except:
+ pass
+ edges = shpdir.CreateLayer("edges", None, ogr.wkbLineString)
+
+ # New edge attribute write support merged into edge loop
+ fields = {} # storage for field names and their data types
+ attributes = {} # storage for attribute data (indexed by field names)
+
+ # Conversion dict between python and ogr types
+ OGRTypes = {int: ogr.OFTInteger, str: ogr.OFTString, float: ogr.OFTReal}
+
+ # Edge loop
+ for e in G.edges(data=True):
+ data = G.get_edge_data(*e)
+ g = netgeometry(e, data)
+ # Loop through attribute data in edges
+ for key, data in e[2].iter():
+ # Reject spatial data not required for attribute table
+ if (key != 'Json' and key != 'Wkt' and key != 'Wkb'
+ and key != 'ShpName'):
+ # For all edges check/add field and data type to fields dict
+ if key not in fields:
+ # Field not in previous edges so add to dict
+ if type(data) in OGRTypes:
+ fields[key] = OGRTypes[type(data)]
+ else:
+ # Data type not supported, default to string (char 80)
+ fields[key] = ogr.OFTString
+ # Create the new field
+ newfield = ogr.FieldDefn(key, fields[key])
+ edges.CreateField(newfield)
+ # Store the data from new field to dict for CreateLayer()
+ attributes[key] = data
+ else:
+ # Field already exists, add data to dict for CreateLayer()
+ attributes[key] = data
+ # Create the feature with, passing new attribute data
+ create_feature(g, edges, attributes)
+
+ nodes, edges = None, None
+
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import ogr
+ except:
+ raise SkipTest("OGR not available")
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/nx_yaml.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/nx_yaml.py
new file mode 100644
index 0000000..00c916f
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/nx_yaml.py
@@ -0,0 +1,109 @@
+"""
+****
+YAML
+****
+Read and write NetworkX graphs in YAML format.
+
+"YAML is a data serialization format designed for human readability
+and interaction with scripting languages."
+See http://www.yaml.org for documentation.
+
+Format
+------
+http://pyyaml.org/wiki/PyYAML
+
+"""
+__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
+# Copyright (C) 2004-2010 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+__all__ = ['read_yaml', 'write_yaml']
+
+import networkx as nx
+from networkx.utils import open_file
+
+@open_file(1,mode='w')
+def write_yaml(G, path, encoding='UTF-8', **kwds):
+ """Write graph G in YAML format to path.
+
+ YAML is a data serialization format designed for human readability
+ and interaction with scripting languages [1]_.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+ path : file or string
+ File or filename to write.
+ Filenames ending in .gz or .bz2 will be compressed.
+ encoding: string, optional
+ Specify which encoding to use when writing file.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> nx.write_yaml(G,'test.yaml')
+
+ References
+ ----------
+ .. [1] http://www.yaml.org
+ """
+ try:
+ import yaml
+ except ImportError:
+ raise ImportError("write_yaml() requires PyYAML: http://pyyaml.org/")
+ yaml.dump(G, path, **kwds)
+
+@open_file(0,mode='r')
+def read_yaml(path):
+ """Read graph in YAML format from path.
+
+ YAML is a data serialization format designed for human readability
+ and interaction with scripting languages [1]_.
+
+ Parameters
+ ----------
+ path : file or string
+ File or filename to read. Filenames ending in .gz or .bz2
+ will be uncompressed.
+
+ Returns
+ -------
+ G : NetworkX graph
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> nx.write_yaml(G,'test.yaml')
+ >>> G=nx.read_yaml('test.yaml')
+
+ References
+ ----------
+ .. [1] http://www.yaml.org
+
+ """
+ try:
+ import yaml
+ except ImportError:
+ raise ImportError("read_yaml() requires PyYAML: http://pyyaml.org/")
+
+ G=yaml.load(path)
+ return G
+
+
+# fixture for nose tests
+def setup_module(module):
+ from nose import SkipTest
+ try:
+ import yaml
+ except:
+ raise SkipTest("PyYAML not available")
+
+# fixture for nose tests
+def teardown_module(module):
+ import os
+ os.unlink('test.yaml')
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/p2g.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/p2g.py
new file mode 100644
index 0000000..92dca9d
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/p2g.py
@@ -0,0 +1,107 @@
+"""
+This module provides the following: read and write of p2g format
+used in metabolic pathway studies.
+
+See http://www.cs.purdue.edu/homes/koyuturk/pathway/ for a description.
+
+The summary is included here:
+
+A file that describes a uniquely labeled graph (with extension ".gr")
+format looks like the following:
+
+
+name
+3 4
+a
+1 2
+b
+
+c
+0 2
+
+"name" is simply a description of what the graph corresponds to. The
+second line displays the number of nodes and number of edges,
+respectively. This sample graph contains three nodes labeled "a", "b",
+and "c". The rest of the graph contains two lines for each node. The
+first line for a node contains the node label. After the declaration
+of the node label, the out-edges of that node in the graph are
+provided. For instance, "a" is linked to nodes 1 and 2, which are
+labeled "b" and "c", while the node labeled "b" has no outgoing
+edges. Observe that node labeled "c" has an outgoing edge to
+itself. Indeed, self-loops are allowed. Node index starts from 0.
+
+"""
+# Copyright (C) 2008-2012 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx
+from networkx.utils import is_string_like,open_file
+__author__ = '\n'.join(['Willem Ligtenberg (w.p.a.ligtenberg@tue.nl)',
+ 'Aric Hagberg (aric.hagberg@gmail.com)'])
+
+@open_file(1,mode='w')
+def write_p2g(G, path, encoding = 'utf-8'):
+ """Write NetworkX graph in p2g format.
+
+ Notes
+ -----
+ This format is meant to be used with directed graphs with
+ possible self loops.
+ """
+ path.write(("%s\n"%G.name).encode(encoding))
+ path.write(("%s %s\n"%(G.order(),G.size())).encode(encoding))
+ nodes = G.nodes()
+ # make dictionary mapping nodes to integers
+ nodenumber=dict(zip(nodes,range(len(nodes))))
+ for n in nodes:
+ path.write(("%s\n"%n).encode(encoding))
+ for nbr in G.neighbors(n):
+ path.write(("%s "%nodenumber[nbr]).encode(encoding))
+ path.write("\n".encode(encoding))
+
+@open_file(0,mode='r')
+def read_p2g(path, encoding='utf-8'):
+ """Read graph in p2g format from path.
+
+ Returns
+ -------
+ MultiDiGraph
+
+ Notes
+ -----
+ If you want a DiGraph (with no self loops allowed and no edge data)
+ use D=networkx.DiGraph(read_p2g(path))
+ """
+ lines = (line.decode(encoding) for line in path)
+ G=parse_p2g(lines)
+ return G
+
+def parse_p2g(lines):
+ """Parse p2g format graph from string or iterable.
+
+ Returns
+ -------
+ MultiDiGraph
+ """
+ description = next(lines).strip()
+ # are multiedges (parallel edges) allowed?
+ G=networkx.MultiDiGraph(name=description,selfloops=True)
+ nnodes,nedges=map(int,next(lines).split())
+ nodelabel={}
+ nbrs={}
+ # loop over the nodes keeping track of node labels and out neighbors
+ # defer adding edges until all node labels are known
+ for i in range(nnodes):
+ n=next(lines).strip()
+ nodelabel[i]=n
+ G.add_node(n)
+ nbrs[n]=map(int,next(lines).split())
+ # now we know all of the node labels so we can add the edges
+ # with the correct labels
+ for n in G:
+ for nbr in nbrs[n]:
+ G.add_edge(n,nodelabel[nbr])
+ return G
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/pajek.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/pajek.py
new file mode 100644
index 0000000..ffc3641
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/pajek.py
@@ -0,0 +1,231 @@
+"""
+*****
+Pajek
+*****
+Read graphs in Pajek format.
+
+This implementation handles directed and undirected graphs including
+those with self loops and parallel edges.
+
+Format
+------
+See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
+for format information.
+"""
+# Copyright (C) 2008-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+from networkx.utils import is_string_like, open_file, make_str
+__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
+__all__ = ['read_pajek', 'parse_pajek', 'generate_pajek', 'write_pajek']
+
+def generate_pajek(G):
+ """Generate lines in Pajek graph format.
+
+ Parameters
+ ----------
+ G : graph
+ A Networkx graph
+
+ References
+ ----------
+ See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
+ for format information.
+ """
+ if G.name=='':
+ name='NetworkX'
+ else:
+ name=G.name
+ # Apparently many Pajek format readers can't process this line
+ # So we'll leave it out for now.
+ # yield '*network %s'%name
+
+ # write nodes with attributes
+ yield '*vertices %s'%(G.order())
+ nodes = G.nodes()
+ # make dictionary mapping nodes to integers
+ nodenumber=dict(zip(nodes,range(1,len(nodes)+1)))
+ for n in nodes:
+ na=G.node.get(n,{})
+ x=na.get('x',0.0)
+ y=na.get('y',0.0)
+ id=int(na.get('id',nodenumber[n]))
+ nodenumber[n]=id
+ shape=na.get('shape','ellipse')
+ s=' '.join(map(make_qstr,(id,n,x,y,shape)))
+ for k,v in na.items():
+ s+=' %s %s'%(make_qstr(k),make_qstr(v))
+ yield s
+
+ # write edges with attributes
+ if G.is_directed():
+ yield '*arcs'
+ else:
+ yield '*edges'
+ for u,v,edgedata in G.edges(data=True):
+ d=edgedata.copy()
+ value=d.pop('weight',1.0) # use 1 as default edge value
+ s=' '.join(map(make_qstr,(nodenumber[u],nodenumber[v],value)))
+ for k,v in d.items():
+ s+=' %s %s'%(make_qstr(k),make_qstr(v))
+ s+=' %s %s'%(k,v)
+ yield s
+
+@open_file(1,mode='wb')
+def write_pajek(G, path, encoding='UTF-8'):
+ """Write graph in Pajek format to path.
+
+ Parameters
+ ----------
+ G : graph
+ A Networkx graph
+ path : file or string
+ File or filename to write.
+ Filenames ending in .gz or .bz2 will be compressed.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> nx.write_pajek(G, "test.net")
+
+ References
+ ----------
+ See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
+ for format information.
+ """
+ for line in generate_pajek(G):
+ line+='\n'
+ path.write(line.encode(encoding))
+
+@open_file(0,mode='rb')
+def read_pajek(path,encoding='UTF-8'):
+ """Read graph in Pajek format from path.
+
+ Parameters
+ ----------
+ path : file or string
+ File or filename to write.
+ Filenames ending in .gz or .bz2 will be uncompressed.
+
+ Returns
+ -------
+ G : NetworkX MultiGraph or MultiDiGraph.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(4)
+ >>> nx.write_pajek(G, "test.net")
+ >>> G=nx.read_pajek("test.net")
+
+ To create a Graph instead of a MultiGraph use
+
+ >>> G1=nx.Graph(G)
+
+ References
+ ----------
+ See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
+ for format information.
+ """
+ lines = (line.decode(encoding) for line in path)
+ return parse_pajek(lines)
+
+def parse_pajek(lines):
+ """Parse Pajek format graph from string or iterable.
+
+ Parameters
+ ----------
+ lines : string or iterable
+ Data in Pajek format.
+
+ Returns
+ -------
+ G : NetworkX graph
+
+ See Also
+ --------
+ read_pajek()
+
+ """
+ import shlex
+ # multigraph=False
+ if is_string_like(lines): lines=iter(lines.split('\n'))
+ lines = iter([line.rstrip('\n') for line in lines])
+ G=nx.MultiDiGraph() # are multiedges allowed in Pajek? assume yes
+ while lines:
+ try:
+ l=next(lines)
+ except: #EOF
+ break
+ if l.lower().startswith("*network"):
+ label,name=l.split()
+ G.name=name
+ if l.lower().startswith("*vertices"):
+ nodelabels={}
+ l,nnodes=l.split()
+ for i in range(int(nnodes)):
+ splitline=shlex.split(str(next(lines)))
+ id,label=splitline[0:2]
+ G.add_node(label)
+ nodelabels[id]=label
+ G.node[label]={'id':id}
+ try:
+ x,y,shape=splitline[2:5]
+ G.node[label].update({'x':float(x),
+ 'y':float(y),
+ 'shape':shape})
+ except:
+ pass
+ extra_attr=zip(splitline[5::2],splitline[6::2])
+ G.node[label].update(extra_attr)
+ if l.lower().startswith("*edges") or l.lower().startswith("*arcs"):
+ if l.lower().startswith("*edge"):
+ # switch from multidigraph to multigraph
+ G=nx.MultiGraph(G)
+ if l.lower().startswith("*arcs"):
+ # switch to directed with multiple arcs for each existing edge
+ G=G.to_directed()
+ for l in lines:
+ splitline=shlex.split(str(l))
+ if len(splitline)<2:
+ continue
+ ui,vi=splitline[0:2]
+ u=nodelabels.get(ui,ui)
+ v=nodelabels.get(vi,vi)
+ # parse the data attached to this edge and put in a dictionary
+ edge_data={}
+ try:
+ # there should always be a single value on the edge?
+ w=splitline[2:3]
+ edge_data.update({'weight':float(w[0])})
+ except:
+ pass
+ # if there isn't, just assign a 1
+# edge_data.update({'value':1})
+ extra_attr=zip(splitline[3::2],splitline[4::2])
+ edge_data.update(extra_attr)
+ # if G.has_edge(u,v):
+ # multigraph=True
+ G.add_edge(u,v,**edge_data)
+ return G
+
+
+
+def make_qstr(t):
+ """Return the string representation of t.
+ Add outer double-quotes if the string has a space.
+ """
+ if not is_string_like(t):
+ t = str(t)
+ if " " in t:
+ t=r'"%s"'%t
+ return t
+
+
+# fixture for nose tests
+def teardown_module(module):
+ import os
+ os.unlink('test.net')
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/sparsegraph6.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/sparsegraph6.py
new file mode 100644
index 0000000..440c92a
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/sparsegraph6.py
@@ -0,0 +1,169 @@
+"""
+**************
+SparseGraph 6
+**************
+Read graphs in graph6 and sparse6 format.
+
+Format
+------
+
+"graph6 and sparse6 are formats for storing undirected graphs in a
+compact manner, using only printable ASCII characters. Files in these
+formats have text type and contain one line per graph."
+http://cs.anu.edu.au/~bdm/data/formats.html
+
+See http://cs.anu.edu.au/~bdm/data/formats.txt for details.
+"""
+# Original author: D. Eppstein, UC Irvine, August 12, 2003.
+# The original code at http://www.ics.uci.edu/~eppstein/PADS/ is public domain.
+__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
+# Copyright (C) 2004-2010 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+__all__ = ['read_graph6', 'parse_graph6', 'read_graph6_list',
+ 'read_sparse6', 'parse_sparse6', 'read_sparse6_list']
+
+import networkx as nx
+from networkx.exception import NetworkXError
+from networkx.utils import open_file
+
+# graph6
+
+def read_graph6(path):
+ """Read simple undirected graphs in graph6 format from path.
+
+ Returns a single Graph.
+ """
+ return read_graph6_list(path)[0]
+
+def parse_graph6(str):
+ """Read a simple undirected graph in graph6 format from string.
+
+ Returns a single Graph.
+ """
+ def bits():
+ """Return sequence of individual bits from 6-bit-per-value
+ list of data values."""
+ for d in data:
+ for i in [5,4,3,2,1,0]:
+ yield (d>>i)&1
+
+ if str.startswith('>>graph6<<'):
+ str = str[10:]
+ data = graph6data(str)
+ n, data = graph6n(data)
+ nd = (n*(n-1)//2 + 5) // 6
+ if len(data) != nd:
+ raise NetworkXError(\
+ 'Expected %d bits but got %d in graph6' % (n*(n-1)//2, len(data)*6))
+
+ G=nx.Graph()
+ G.add_nodes_from(range(n))
+ for (i,j),b in zip([(i,j) for j in range(1,n) for i in range(j)], bits()):
+ if b: G.add_edge(i,j)
+ return G
+
+@open_file(0,mode='rt')
+def read_graph6_list(path):
+ """Read simple undirected graphs in graph6 format from path.
+
+ Returns a list of Graphs, one for each line in file.
+ """
+ glist=[]
+ for line in path:
+ line = line.strip()
+ if not len(line): continue
+ glist.append(parse_graph6(line))
+ return glist
+
+# sparse6
+
+def read_sparse6(path):
+ """Read simple undirected graphs in sparse6 format from path.
+
+ Returns a single MultiGraph."""
+ return read_sparse6_list(path)[0]
+
+@open_file(0,mode='rt')
+def read_sparse6_list(path):
+ """Read undirected graphs in sparse6 format from path.
+
+ Returns a list of MultiGraphs, one for each line in file."""
+ glist=[]
+ for line in path:
+ line = line.strip()
+ if not len(line): continue
+ glist.append(parse_sparse6(line))
+ return glist
+
+def parse_sparse6(string):
+ """Read undirected graph in sparse6 format from string.
+
+ Returns a MultiGraph.
+ """
+ if string.startswith('>>sparse6<<'):
+ string = str[10:]
+ if not string.startswith(':'):
+ raise NetworkXError('Expected colon in sparse6')
+ n, data = graph6n(graph6data(string[1:]))
+ k = 1
+ while 1<<k < n:
+ k += 1
+
+ def parseData():
+ """Return stream of pairs b[i], x[i] for sparse6 format."""
+ chunks = iter(data)
+ d = None # partial data word
+ dLen = 0 # how many unparsed bits are left in d
+
+ while 1:
+ if dLen < 1:
+ d = next(chunks)
+ dLen = 6
+ dLen -= 1
+ b = (d>>dLen) & 1 # grab top remaining bit
+
+ x = d & ((1<<dLen)-1) # partially built up value of x
+ xLen = dLen # how many bits included so far in x
+ while xLen < k: # now grab full chunks until we have enough
+ d = next(chunks)
+ dLen = 6
+ x = (x<<6) + d
+ xLen += 6
+ x = (x >> (xLen - k)) # shift back the extra bits
+ dLen = xLen - k
+ yield b,x
+
+ v = 0
+
+ G=nx.MultiGraph()
+ G.add_nodes_from(range(n))
+
+ for b,x in parseData():
+ if b: v += 1
+ if x >= n: break # padding with ones can cause overlarge number here
+ elif x > v: v = x
+ else:
+ G.add_edge(x,v)
+
+ return G
+
+# helper functions
+
+def graph6data(str):
+ """Convert graph6 character sequence to 6-bit integers."""
+ v = [ord(c)-63 for c in str]
+ if min(v) < 0 or max(v) > 63:
+ return None
+ return v
+
+def graph6n(data):
+ """Read initial one or four-unit value from graph6 sequence.
+ Return value, rest of seq."""
+ if data[0] <= 62:
+ return data[0], data[1:]
+ return (data[1]<<12) + (data[2]<<6) + data[3], data[4:]
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_adjlist.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_adjlist.py
new file mode 100644
index 0000000..9b3c0f2
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_adjlist.py
@@ -0,0 +1,283 @@
+# -*- coding: utf-8 -*-
+"""
+ Unit tests for adjlist.
+"""
+import io
+from nose.tools import assert_equal, assert_raises, assert_not_equal
+import os
+import tempfile
+import networkx as nx
+from networkx.testing import *
+
+
+class TestAdjlist():
+
+ def setUp(self):
+ self.G=nx.Graph(name="test")
+ e=[('a','b'),('b','c'),('c','d'),('d','e'),('e','f'),('a','f')]
+ self.G.add_edges_from(e)
+ self.G.add_node('g')
+ self.DG=nx.DiGraph(self.G)
+ self.XG=nx.MultiGraph()
+ self.XG.add_weighted_edges_from([(1,2,5),(1,2,5),(1,2,1),(3,3,42)])
+ self. XDG=nx.MultiDiGraph(self.XG)
+
+ def test_read_multiline_adjlist_1(self):
+ # Unit test for https://networkx.lanl.gov/trac/ticket/252
+ s = b"""# comment line
+1 2
+# comment line
+2
+3
+"""
+ bytesIO = io.BytesIO(s)
+ G = nx.read_multiline_adjlist(bytesIO)
+ adj = {'1': {'3': {}, '2': {}}, '3': {'1': {}}, '2': {'1': {}}}
+ assert_equal(G.adj, adj)
+
+ def test_unicode(self):
+ G = nx.Graph()
+ try: # Python 3.x
+ name1 = chr(2344) + chr(123) + chr(6543)
+ name2 = chr(5543) + chr(1543) + chr(324)
+ except ValueError: # Python 2.6+
+ name1 = unichr(2344) + unichr(123) + unichr(6543)
+ name2 = unichr(5543) + unichr(1543) + unichr(324)
+ G.add_edge(name1, 'Radiohead', {name2: 3})
+ fd, fname = tempfile.mkstemp()
+ nx.write_multiline_adjlist(G, fname)
+ H = nx.read_multiline_adjlist(fname)
+ assert_equal(G.adj, H.adj)
+ os.close(fd)
+ os.unlink(fname)
+
+ def test_latin1_error(self):
+ G = nx.Graph()
+ try: # Python 3.x
+ name1 = chr(2344) + chr(123) + chr(6543)
+ name2 = chr(5543) + chr(1543) + chr(324)
+ except ValueError: # Python 2.6+
+ name1 = unichr(2344) + unichr(123) + unichr(6543)
+ name2 = unichr(5543) + unichr(1543) + unichr(324)
+ G.add_edge(name1, 'Radiohead', {name2: 3})
+ fd, fname = tempfile.mkstemp()
+ assert_raises(UnicodeEncodeError,
+ nx.write_multiline_adjlist,
+ G, fname, encoding = 'latin-1')
+ os.close(fd)
+ os.unlink(fname)
+
+ def test_latin1(self):
+ G = nx.Graph()
+ try: # Python 3.x
+ blurb = chr(1245) # just to trigger the exception
+ name1 = 'Bj' + chr(246) + 'rk'
+ name2 = chr(220) + 'ber'
+ except ValueError: # Python 2.6+
+ name1 = 'Bj' + unichr(246) + 'rk'
+ name2 = unichr(220) + 'ber'
+ G.add_edge(name1, 'Radiohead', {name2: 3})
+ fd, fname = tempfile.mkstemp()
+ nx.write_multiline_adjlist(G, fname, encoding = 'latin-1')
+ H = nx.read_multiline_adjlist(fname, encoding = 'latin-1')
+ assert_equal(G.adj, H.adj)
+ os.close(fd)
+ os.unlink(fname)
+
+
+
+ def test_adjlist_graph(self):
+ G=self.G
+ (fd,fname)=tempfile.mkstemp()
+ nx.write_adjlist(G,fname)
+ H=nx.read_adjlist(fname)
+ H2=nx.read_adjlist(fname)
+ assert_not_equal(H,H2) # they should be different graphs
+ assert_equal(sorted(H.nodes()),sorted(G.nodes()))
+ assert_equal(sorted(H.edges()),sorted(G.edges()))
+ os.close(fd)
+ os.unlink(fname)
+
+ def test_adjlist_digraph(self):
+ G=self.DG
+ (fd,fname)=tempfile.mkstemp()
+ nx.write_adjlist(G,fname)
+ H=nx.read_adjlist(fname,create_using=nx.DiGraph())
+ H2=nx.read_adjlist(fname,create_using=nx.DiGraph())
+ assert_not_equal(H,H2) # they should be different graphs
+ assert_equal(sorted(H.nodes()),sorted(G.nodes()))
+ assert_equal(sorted(H.edges()),sorted(G.edges()))
+ os.close(fd)
+ os.unlink(fname)
+
+
+ def test_adjlist_integers(self):
+ (fd,fname)=tempfile.mkstemp()
+ G=nx.convert_node_labels_to_integers(self.G)
+ nx.write_adjlist(G,fname)
+ H=nx.read_adjlist(fname,nodetype=int)
+ H2=nx.read_adjlist(fname,nodetype=int)
+ assert_equal(sorted(H.nodes()),sorted(G.nodes()))
+ assert_equal(sorted(H.edges()),sorted(G.edges()))
+ os.close(fd)
+ os.unlink(fname)
+
+
+ def test_adjlist_digraph(self):
+ G=self.DG
+ (fd,fname)=tempfile.mkstemp()
+ nx.write_adjlist(G,fname)
+ H=nx.read_adjlist(fname,create_using=nx.DiGraph())
+ H2=nx.read_adjlist(fname,create_using=nx.DiGraph())
+ assert_not_equal(H,H2) # they should be different graphs
+ assert_equal(sorted(H.nodes()),sorted(G.nodes()))
+ assert_equal(sorted(H.edges()),sorted(G.edges()))
+ os.close(fd)
+ os.unlink(fname)
+
+
+ def test_adjlist_multigraph(self):
+ G=self.XG
+ (fd,fname)=tempfile.mkstemp()
+ nx.write_adjlist(G,fname)
+ H=nx.read_adjlist(fname,nodetype=int,
+ create_using=nx.MultiGraph())
+ H2=nx.read_adjlist(fname,nodetype=int,
+ create_using=nx.MultiGraph())
+ assert_not_equal(H,H2) # they should be different graphs
+ assert_equal(sorted(H.nodes()),sorted(G.nodes()))
+ assert_equal(sorted(H.edges()),sorted(G.edges()))
+ os.close(fd)
+ os.unlink(fname)
+
+ def test_adjlist_multidigraph(self):
+ G=self.XDG
+ (fd,fname)=tempfile.mkstemp()
+ nx.write_adjlist(G,fname)
+ H=nx.read_adjlist(fname,nodetype=int,
+ create_using=nx.MultiDiGraph())
+ H2=nx.read_adjlist(fname,nodetype=int,
+ create_using=nx.MultiDiGraph())
+ assert_not_equal(H,H2) # they should be different graphs
+ assert_equal(sorted(H.nodes()),sorted(G.nodes()))
+ assert_equal(sorted(H.edges()),sorted(G.edges()))
+ os.close(fd)
+ os.unlink(fname)
+
+
+ def test_adjlist_delimiter(self):
+ fh=io.BytesIO()
+ G = nx.path_graph(3)
+ nx.write_adjlist(G, fh, delimiter=':')
+ fh.seek(0)
+ H = nx.read_adjlist(fh, nodetype=int, delimiter=':')
+ assert_equal(sorted(H.nodes()),sorted(G.nodes()))
+ assert_equal(sorted(H.edges()),sorted(G.edges()))
+
+
+
+
+
+
+class TestMultilineAdjlist():
+
+ def setUp(self):
+ self.G=nx.Graph(name="test")
+ e=[('a','b'),('b','c'),('c','d'),('d','e'),('e','f'),('a','f')]
+ self.G.add_edges_from(e)
+ self.G.add_node('g')
+ self.DG=nx.DiGraph(self.G)
+ self.DG.remove_edge('b','a')
+ self.DG.remove_edge('b','c')
+ self.XG=nx.MultiGraph()
+ self.XG.add_weighted_edges_from([(1,2,5),(1,2,5),(1,2,1),(3,3,42)])
+ self. XDG=nx.MultiDiGraph(self.XG)
+
+
+ def test_multiline_adjlist_graph(self):
+ G=self.G
+ (fd,fname)=tempfile.mkstemp()
+ nx.write_multiline_adjlist(G,fname)
+ H=nx.read_multiline_adjlist(fname)
+ H2=nx.read_multiline_adjlist(fname)
+ assert_not_equal(H,H2) # they should be different graphs
+ assert_equal(sorted(H.nodes()),sorted(G.nodes()))
+ assert_equal(sorted(H.edges()),sorted(G.edges()))
+ os.close(fd)
+ os.unlink(fname)
+
+
+ def test_multiline_adjlist_digraph(self):
+ G=self.DG
+ (fd,fname)=tempfile.mkstemp()
+ nx.write_multiline_adjlist(G,fname)
+ H=nx.read_multiline_adjlist(fname,create_using=nx.DiGraph())
+ H2=nx.read_multiline_adjlist(fname,create_using=nx.DiGraph())
+ assert_not_equal(H,H2) # they should be different graphs
+ assert_equal(sorted(H.nodes()),sorted(G.nodes()))
+ assert_equal(sorted(H.edges()),sorted(G.edges()))
+ os.close(fd)
+ os.unlink(fname)
+
+
+ def test_multiline_adjlist_integers(self):
+ (fd,fname)=tempfile.mkstemp()
+ G=nx.convert_node_labels_to_integers(self.G)
+ nx.write_multiline_adjlist(G,fname)
+ H=nx.read_multiline_adjlist(fname,nodetype=int)
+ H2=nx.read_multiline_adjlist(fname,nodetype=int)
+ assert_equal(sorted(H.nodes()),sorted(G.nodes()))
+ assert_equal(sorted(H.edges()),sorted(G.edges()))
+ os.close(fd)
+ os.unlink(fname)
+
+
+ def test_multiline_adjlist_digraph(self):
+ G=self.DG
+ (fd,fname)=tempfile.mkstemp()
+ nx.write_multiline_adjlist(G,fname)
+ H=nx.read_multiline_adjlist(fname,create_using=nx.DiGraph())
+ H2=nx.read_multiline_adjlist(fname,create_using=nx.DiGraph())
+ assert_not_equal(H,H2) # they should be different graphs
+ assert_equal(sorted(H.nodes()),sorted(G.nodes()))
+ assert_edges_equal(H.edges(),G.edges())
+ os.close(fd)
+ os.unlink(fname)
+
+
+ def test_multiline_adjlist_multigraph(self):
+ G=self.XG
+ (fd,fname)=tempfile.mkstemp()
+ nx.write_multiline_adjlist(G,fname)
+ H=nx.read_multiline_adjlist(fname,nodetype=int,
+ create_using=nx.MultiGraph())
+ H2=nx.read_multiline_adjlist(fname,nodetype=int,
+ create_using=nx.MultiGraph())
+ assert_not_equal(H,H2) # they should be different graphs
+ assert_equal(sorted(H.nodes()),sorted(G.nodes()))
+ assert_equal(sorted(H.edges()),sorted(G.edges()))
+ os.close(fd)
+ os.unlink(fname)
+
+ def test_multiline_adjlist_multidigraph(self):
+ G=self.XDG
+ (fd,fname)=tempfile.mkstemp()
+ nx.write_multiline_adjlist(G,fname)
+ H=nx.read_multiline_adjlist(fname,nodetype=int,
+ create_using=nx.MultiDiGraph())
+ H2=nx.read_multiline_adjlist(fname,nodetype=int,
+ create_using=nx.MultiDiGraph())
+ assert_not_equal(H,H2) # they should be different graphs
+ assert_equal(sorted(H.nodes()),sorted(G.nodes()))
+ assert_equal(sorted(H.edges()),sorted(G.edges()))
+ os.close(fd)
+ os.unlink(fname)
+
+ def test_multiline_adjlist_delimiter(self):
+ fh=io.BytesIO()
+ G = nx.path_graph(3)
+ nx.write_multiline_adjlist(G, fh, delimiter=':')
+ fh.seek(0)
+ H = nx.read_multiline_adjlist(fh, nodetype=int, delimiter=':')
+ assert_equal(sorted(H.nodes()),sorted(G.nodes()))
+ assert_equal(sorted(H.edges()),sorted(G.edges()))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_edgelist.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_edgelist.py
new file mode 100644
index 0000000..be70bba
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_edgelist.py
@@ -0,0 +1,234 @@
+"""
+ Unit tests for edgelists.
+"""
+from nose.tools import assert_equal, assert_raises, assert_not_equal
+import networkx as nx
+import io
+import tempfile
+import os
+
+def assert_equal_edges(elist1,elist2):
+ if len(elist1[0]) == 2:
+ return assert_equal(sorted(sorted(e) for e in elist1),
+ sorted(sorted(e) for e in elist2))
+ else:
+ return assert_equal(sorted((sorted((u, v)), d) for u, v, d in elist1),
+ sorted((sorted((u, v)), d) for u, v, d in elist2))
+
+class TestEdgelist:
+
+ def setUp(self):
+ self.G=nx.Graph(name="test")
+ e=[('a','b'),('b','c'),('c','d'),('d','e'),('e','f'),('a','f')]
+ self.G.add_edges_from(e)
+ self.G.add_node('g')
+ self.DG=nx.DiGraph(self.G)
+ self.XG=nx.MultiGraph()
+ self.XG.add_weighted_edges_from([(1,2,5),(1,2,5),(1,2,1),(3,3,42)])
+ self. XDG=nx.MultiDiGraph(self.XG)
+
+
+ def test_read_edgelist_1(self):
+ s = b"""\
+# comment line
+1 2
+# comment line
+2 3
+"""
+ bytesIO = io.BytesIO(s)
+ G = nx.read_edgelist(bytesIO,nodetype=int)
+ assert_equal_edges(G.edges(),[(1,2),(2,3)])
+
+ def test_read_edgelist_2(self):
+ s = b"""\
+# comment line
+1 2 2.0
+# comment line
+2 3 3.0
+"""
+ bytesIO = io.BytesIO(s)
+ G = nx.read_edgelist(bytesIO,nodetype=int,data=False)
+ assert_equal_edges(G.edges(),[(1,2),(2,3)])
+
+ bytesIO = io.BytesIO(s)
+ G = nx.read_weighted_edgelist(bytesIO,nodetype=int)
+ assert_equal_edges(G.edges(data=True),[(1,2,{'weight':2.0}),(2,3,{'weight':3.0})])
+
+ def test_read_edgelist_3(self):
+ s = b"""\
+# comment line
+1 2 {'weight':2.0}
+# comment line
+2 3 {'weight':3.0}
+"""
+ bytesIO = io.BytesIO(s)
+ G = nx.read_edgelist(bytesIO,nodetype=int,data=False)
+ assert_equal_edges(G.edges(),[(1,2),(2,3)])
+
+ bytesIO = io.BytesIO(s)
+ G = nx.read_edgelist(bytesIO,nodetype=int,data=True)
+ assert_equal_edges(G.edges(data=True),[(1,2,{'weight':2.0}),(2,3,{'weight':3.0})])
+
+ def test_write_edgelist_1(self):
+ fh=io.BytesIO()
+ G=nx.Graph()
+ G.add_edges_from([(1,2),(2,3)])
+ nx.write_edgelist(G,fh,data=False)
+ fh.seek(0)
+ assert_equal(fh.read(),b"1 2\n2 3\n")
+
+ def test_write_edgelist_2(self):
+ fh=io.BytesIO()
+ G=nx.Graph()
+ G.add_edges_from([(1,2),(2,3)])
+ nx.write_edgelist(G,fh,data=True)
+ fh.seek(0)
+ assert_equal(fh.read(),b"1 2 {}\n2 3 {}\n")
+
+ def test_write_edgelist_3(self):
+ fh=io.BytesIO()
+ G=nx.Graph()
+ G.add_edge(1,2,weight=2.0)
+ G.add_edge(2,3,weight=3.0)
+ nx.write_edgelist(G,fh,data=True)
+ fh.seek(0)
+ assert_equal(fh.read(),b"1 2 {'weight': 2.0}\n2 3 {'weight': 3.0}\n")
+
+ def test_write_edgelist_4(self):
+ fh=io.BytesIO()
+ G=nx.Graph()
+ G.add_edge(1,2,weight=2.0)
+ G.add_edge(2,3,weight=3.0)
+ nx.write_edgelist(G,fh,data=[('weight')])
+ fh.seek(0)
+ assert_equal(fh.read(),b"1 2 2.0\n2 3 3.0\n")
+
+ def test_unicode(self):
+ G = nx.Graph()
+ try: # Python 3.x
+ name1 = chr(2344) + chr(123) + chr(6543)
+ name2 = chr(5543) + chr(1543) + chr(324)
+ except ValueError: # Python 2.6+
+ name1 = unichr(2344) + unichr(123) + unichr(6543)
+ name2 = unichr(5543) + unichr(1543) + unichr(324)
+ G.add_edge(name1, 'Radiohead', attr_dict={name2: 3})
+ fd, fname = tempfile.mkstemp()
+ nx.write_edgelist(G, fname)
+ H = nx.read_edgelist(fname)
+ assert_equal(G.adj, H.adj)
+ os.close(fd)
+ os.unlink(fname)
+
+ def test_latin1_error(self):
+ G = nx.Graph()
+ try: # Python 3.x
+ name1 = chr(2344) + chr(123) + chr(6543)
+ name2 = chr(5543) + chr(1543) + chr(324)
+ except ValueError: # Python 2.6+
+ name1 = unichr(2344) + unichr(123) + unichr(6543)
+ name2 = unichr(5543) + unichr(1543) + unichr(324)
+ G.add_edge(name1, 'Radiohead', attr_dict={name2: 3})
+ fd, fname = tempfile.mkstemp()
+ assert_raises(UnicodeEncodeError,
+ nx.write_edgelist,
+ G, fname, encoding = 'latin-1')
+ os.close(fd)
+ os.unlink(fname)
+
+ def test_latin1(self):
+ G = nx.Graph()
+ try: # Python 3.x
+ blurb = chr(1245) # just to trigger the exception
+ name1 = 'Bj' + chr(246) + 'rk'
+ name2 = chr(220) + 'ber'
+ except ValueError: # Python 2.6+
+ name1 = 'Bj' + unichr(246) + 'rk'
+ name2 = unichr(220) + 'ber'
+ G.add_edge(name1, 'Radiohead', attr_dict={name2: 3})
+ fd, fname = tempfile.mkstemp()
+ nx.write_edgelist(G, fname, encoding = 'latin-1')
+ H = nx.read_edgelist(fname, encoding = 'latin-1')
+ assert_equal(G.adj, H.adj)
+ os.close(fd)
+ os.unlink(fname)
+
+
+ def test_edgelist_graph(self):
+ G=self.G
+ (fd,fname)=tempfile.mkstemp()
+ nx.write_edgelist(G,fname)
+ H=nx.read_edgelist(fname)
+ H2=nx.read_edgelist(fname)
+ assert_not_equal(H,H2) # they should be different graphs
+ G.remove_node('g') # isolated nodes are not written in edgelist
+ assert_equal(sorted(H.nodes()),sorted(G.nodes()))
+ assert_equal(sorted(H.edges()),sorted(G.edges()))
+ os.close(fd)
+ os.unlink(fname)
+
+ def test_edgelist_digraph(self):
+ G=self.DG
+ (fd,fname)=tempfile.mkstemp()
+ nx.write_edgelist(G,fname)
+ H=nx.read_edgelist(fname,create_using=nx.DiGraph())
+ H2=nx.read_edgelist(fname,create_using=nx.DiGraph())
+ assert_not_equal(H,H2) # they should be different graphs
+ G.remove_node('g') # isolated nodes are not written in edgelist
+ assert_equal(sorted(H.nodes()),sorted(G.nodes()))
+ assert_equal(sorted(H.edges()),sorted(G.edges()))
+ os.close(fd)
+ os.unlink(fname)
+
+
+ def test_edgelist_integers(self):
+ G=nx.convert_node_labels_to_integers(self.G)
+ (fd,fname)=tempfile.mkstemp()
+ nx.write_edgelist(G,fname)
+ H=nx.read_edgelist(fname,nodetype=int)
+ # isolated nodes are not written in edgelist
+ G.remove_nodes_from(nx.isolates(G))
+ assert_equal(sorted(H.nodes()),sorted(G.nodes()))
+ assert_equal(sorted(H.edges()),sorted(G.edges()))
+ os.close(fd)
+ os.unlink(fname)
+
+
+ def test_edgelist_digraph(self):
+ G=self.DG
+ (fd,fname)=tempfile.mkstemp()
+ nx.write_edgelist(G,fname)
+ H=nx.read_edgelist(fname,create_using=nx.DiGraph())
+ G.remove_node('g') # isolated nodes are not written in edgelist
+ H2=nx.read_edgelist(fname,create_using=nx.DiGraph())
+ assert_not_equal(H,H2) # they should be different graphs
+ assert_equal(sorted(H.nodes()),sorted(G.nodes()))
+ assert_equal(sorted(H.edges()),sorted(G.edges()))
+ os.close(fd)
+ os.unlink(fname)
+
+
+ def test_edgelist_multigraph(self):
+ G=self.XG
+ (fd,fname)=tempfile.mkstemp()
+ nx.write_edgelist(G,fname)
+ H=nx.read_edgelist(fname,nodetype=int,create_using=nx.MultiGraph())
+ H2=nx.read_edgelist(fname,nodetype=int,create_using=nx.MultiGraph())
+ assert_not_equal(H,H2) # they should be different graphs
+ assert_equal(sorted(H.nodes()),sorted(G.nodes()))
+ assert_equal(sorted(H.edges()),sorted(G.edges()))
+ os.close(fd)
+ os.unlink(fname)
+
+ def test_edgelist_multidigraph(self):
+ G=self.XDG
+ (fd,fname)=tempfile.mkstemp()
+ nx.write_edgelist(G,fname)
+ H=nx.read_edgelist(fname,nodetype=int,create_using=nx.MultiDiGraph())
+ H2=nx.read_edgelist(fname,nodetype=int,create_using=nx.MultiDiGraph())
+ assert_not_equal(H,H2) # they should be different graphs
+ assert_equal(sorted(H.nodes()),sorted(G.nodes()))
+ assert_equal(sorted(H.edges()),sorted(G.edges()))
+ os.close(fd)
+ os.unlink(fname)
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_gexf.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_gexf.py
new file mode 100644
index 0000000..bbd86ba
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_gexf.py
@@ -0,0 +1,306 @@
+#!/usr/bin/env python
+from nose.tools import *
+from nose import SkipTest
+import networkx as nx
+import io
+
+class TestGEXF(object):
+ @classmethod
+ def setupClass(cls):
+ try:
+ import xml.etree.ElementTree
+ except ImportError:
+ raise SkipTest('xml.etree.ElementTree not available.')
+
+ def setUp(self):
+ self.simple_directed_data="""<?xml version="1.0" encoding="UTF-8"?>
+<gexf xmlns="http://www.gexf.net/1.1draft" version="1.1">
+ <graph mode="static" defaultedgetype="directed">
+ <nodes>
+ <node id="0" label="Hello" />
+ <node id="1" label="Word" />
+ </nodes>
+ <edges>
+ <edge id="0" source="0" target="1" />
+ </edges>
+ </graph>
+</gexf>
+"""
+ self.simple_directed_graph=nx.DiGraph()
+ self.simple_directed_graph.add_node('0',label='Hello')
+ self.simple_directed_graph.add_node('1',label='World')
+ self.simple_directed_graph.add_edge('0','1',id='0')
+
+ self.simple_directed_fh = \
+ io.BytesIO(self.simple_directed_data.encode('UTF-8'))
+
+
+ self.attribute_data="""<?xml version="1.0" encoding="UTF-8"?>
+<gexf xmlns="http://www.gexf.net/1.1draft" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.gexf.net/1.1draft http://www.gexf.net/1.1draft/gexf.xsd" version="1.1">
+ <meta lastmodifieddate="2009-03-20">
+ <creator>Gephi.org</creator>
+ <description>A Web network</description>
+ </meta>
+ <graph defaultedgetype="directed">
+ <attributes class="node">
+ <attribute id="0" title="url" type="string"/>
+ <attribute id="1" title="indegree" type="integer"/>
+ <attribute id="2" title="frog" type="boolean">
+ <default>true</default>
+ </attribute>
+ </attributes>
+ <nodes>
+ <node id="0" label="Gephi">
+ <attvalues>
+ <attvalue for="0" value="http://gephi.org"/>
+ <attvalue for="1" value="1"/>
+ </attvalues>
+ </node>
+ <node id="1" label="Webatlas">
+ <attvalues>
+ <attvalue for="0" value="http://webatlas.fr"/>
+ <attvalue for="1" value="2"/>
+ </attvalues>
+ </node>
+ <node id="2" label="RTGI">
+ <attvalues>
+ <attvalue for="0" value="http://rtgi.fr"/>
+ <attvalue for="1" value="1"/>
+ </attvalues>
+ </node>
+ <node id="3" label="BarabasiLab">
+ <attvalues>
+ <attvalue for="0" value="http://barabasilab.com"/>
+ <attvalue for="1" value="1"/>
+ <attvalue for="2" value="false"/>
+ </attvalues>
+ </node>
+ </nodes>
+ <edges>
+ <edge id="0" source="0" target="1"/>
+ <edge id="1" source="0" target="2"/>
+ <edge id="2" source="1" target="0"/>
+ <edge id="3" source="2" target="1"/>
+ <edge id="4" source="0" target="3"/>
+ </edges>
+ </graph>
+</gexf>
+"""
+ self.attribute_graph=nx.DiGraph()
+ self.attribute_graph.graph['node_default']={'frog':True}
+ self.attribute_graph.add_node('0',
+ label='Gephi',
+ url='http://gephi.org',
+ indegree=1)
+ self.attribute_graph.add_node('1',
+ label='Webatlas',
+ url='http://webatlas.fr',
+ indegree=2)
+
+ self.attribute_graph.add_node('2',
+ label='RTGI',
+ url='http://rtgi.fr',
+ indegree=1)
+
+ self.attribute_graph.add_node('3',
+ label='BarabasiLab',
+ url='http://barabasilab.com',
+ indegree=1,
+ frog=False)
+ self.attribute_graph.add_edge('0','1',id='0')
+ self.attribute_graph.add_edge('0','2',id='1')
+ self.attribute_graph.add_edge('1','0',id='2')
+ self.attribute_graph.add_edge('2','1',id='3')
+ self.attribute_graph.add_edge('0','3',id='4')
+ self.attribute_fh = io.BytesIO(self.attribute_data.encode('UTF-8'))
+
+ self.simple_undirected_data="""<?xml version="1.0" encoding="UTF-8"?>
+<gexf xmlns="http://www.gexf.net/1.1draft" version="1.1">
+ <graph mode="static" defaultedgetype="undirected">
+ <nodes>
+ <node id="0" label="Hello" />
+ <node id="1" label="Word" />
+ </nodes>
+ <edges>
+ <edge id="0" source="0" target="1" />
+ </edges>
+ </graph>
+</gexf>
+"""
+ self.simple_undirected_graph=nx.Graph()
+ self.simple_undirected_graph.add_node('0',label='Hello')
+ self.simple_undirected_graph.add_node('1',label='World')
+ self.simple_undirected_graph.add_edge('0','1',id='0')
+
+ self.simple_undirected_fh = io.BytesIO(self.simple_undirected_data.encode('UTF-8'))
+
+
+ def test_read_simple_directed_graphml(self):
+ G=self.simple_directed_graph
+ H=nx.read_gexf(self.simple_directed_fh)
+ assert_equal(sorted(G.nodes()),sorted(H.nodes()))
+ assert_equal(sorted(G.edges()),sorted(H.edges()))
+ assert_equal(sorted(G.edges(data=True)),
+ sorted(H.edges(data=True)))
+ self.simple_directed_fh.seek(0)
+
+ def test_write_read_simple_directed_graphml(self):
+ G=self.simple_directed_graph
+ fh=io.BytesIO()
+ nx.write_gexf(G,fh)
+ fh.seek(0)
+ H=nx.read_gexf(fh)
+ assert_equal(sorted(G.nodes()),sorted(H.nodes()))
+ assert_equal(sorted(G.edges()),sorted(H.edges()))
+ assert_equal(sorted(G.edges(data=True)),
+ sorted(H.edges(data=True)))
+ self.simple_directed_fh.seek(0)
+
+ def test_read_simple_undirected_graphml(self):
+ G=self.simple_undirected_graph
+ H=nx.read_gexf(self.simple_undirected_fh)
+ assert_equal(sorted(G.nodes()),sorted(H.nodes()))
+ assert_equal(
+ sorted(sorted(e) for e in G.edges()),
+ sorted(sorted(e) for e in H.edges()))
+ self.simple_undirected_fh.seek(0)
+
+ def test_read_attribute_graphml(self):
+ G=self.attribute_graph
+ H=nx.read_gexf(self.attribute_fh)
+ assert_equal(sorted(G.nodes(True)),sorted(H.nodes(data=True)))
+ ge=sorted(G.edges(data=True))
+ he=sorted(H.edges(data=True))
+ for a,b in zip(ge,he):
+ assert_equal(a,b)
+ self.attribute_fh.seek(0)
+
+ def test_directed_edge_in_undirected(self):
+ s="""<?xml version="1.0" encoding="UTF-8"?>
+<gexf xmlns="http://www.gexf.net/1.1draft" version="1.1">
+ <graph mode="static" defaultedgetype="undirected">
+ <nodes>
+ <node id="0" label="Hello" />
+ <node id="1" label="Word" />
+ </nodes>
+ <edges>
+ <edge id="0" source="0" target="1" type="directed"/>
+ </edges>
+ </graph>
+</gexf>
+"""
+ fh = io.BytesIO(s.encode('UTF-8'))
+ assert_raises(nx.NetworkXError,nx.read_gexf,fh)
+
+ def test_undirected_edge_in_directed(self):
+ s="""<?xml version="1.0" encoding="UTF-8"?>
+<gexf xmlns="http://www.gexf.net/1.1draft" version="1.1">
+ <graph mode="static" defaultedgetype="directed">
+ <nodes>
+ <node id="0" label="Hello" />
+ <node id="1" label="Word" />
+ </nodes>
+ <edges>
+ <edge id="0" source="0" target="1" type="undirected"/>
+ </edges>
+ </graph>
+</gexf>
+"""
+ fh = io.BytesIO(s.encode('UTF-8'))
+ assert_raises(nx.NetworkXError,nx.read_gexf,fh)
+
+
+ def test_key_error(self):
+ s="""<?xml version="1.0" encoding="UTF-8"?>
+<gexf xmlns="http://www.gexf.net/1.1draft" version="1.1">
+ <graph mode="static" defaultedgetype="directed">
+ <nodes>
+ <node id="0" label="Hello">
+ <attvalues>
+ <attvalue for='0' value='1'/>
+ </attvalues>
+ </node>
+ <node id="1" label="Word" />
+ </nodes>
+ <edges>
+ <edge id="0" source="0" target="1" type="undirected"/>
+ </edges>
+ </graph>
+</gexf>
+"""
+ fh = io.BytesIO(s.encode('UTF-8'))
+ assert_raises(nx.NetworkXError,nx.read_gexf,fh)
+
+ def test_relabel(self):
+ s="""<?xml version="1.0" encoding="UTF-8"?>
+<gexf xmlns="http://www.gexf.net/1.1draft" version="1.1">
+ <graph mode="static" defaultedgetype="directed">
+ <nodes>
+ <node id="0" label="Hello" />
+ <node id="1" label="Word" />
+ </nodes>
+ <edges>
+ <edge id="0" source="0" target="1"/>
+ </edges>
+ </graph>
+</gexf>
+"""
+ fh = io.BytesIO(s.encode('UTF-8'))
+ G=nx.read_gexf(fh,relabel=True)
+ assert_equal(sorted(G.nodes()),["Hello","Word"])
+
+
+ def test_default_attribute(self):
+ G=nx.Graph()
+ G.add_node(1,label='1',color='green')
+ G.add_path([0,1,2,3])
+ G.add_edge(1,2,foo=3)
+ G.graph['node_default']={'color':'yellow'}
+ G.graph['edge_default']={'foo':7}
+ fh = io.BytesIO()
+ nx.write_gexf(G,fh)
+ fh.seek(0)
+ H=nx.read_gexf(fh,node_type=int)
+ assert_equal(sorted(G.nodes()),sorted(H.nodes()))
+ assert_equal(
+ sorted(sorted(e) for e in G.edges()),
+ sorted(sorted(e) for e in H.edges()))
+ assert_equal(G.graph,H.graph)
+
+ def test_serialize_ints_to_strings(self):
+ G=nx.Graph()
+ G.add_node(1,id=7,label=77)
+ fh = io.BytesIO()
+ nx.write_gexf(G,fh)
+ fh.seek(0)
+ H=nx.read_gexf(fh,node_type=int)
+ assert_equal(H.nodes(),[7])
+ assert_equal(H.node[7]['label'],'77')
+
+ def test_write_with_node_attributes(self):
+ # Addresses #673.
+ G = nx.path_graph(4)
+ for i in range(4):
+ G.node[i]['id'] = i
+ G.node[i]['label'] = i
+ G.node[i]['pid'] = i
+
+ expected = """<gexf version="1.1" xmlns="http://www.gexf.net/1.1draft" xmlns:viz="http://www.gexf.net/1.1draft/viz" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.w3.org/2001/XMLSchema-instance">
+ <graph defaultedgetype="undirected" mode="static">
+ <nodes>
+ <node id="0" label="0" pid="0" />
+ <node id="1" label="1" pid="1" />
+ <node id="2" label="2" pid="2" />
+ <node id="3" label="3" pid="3" />
+ </nodes>
+ <edges>
+ <edge id="0" source="0" target="1" />
+ <edge id="1" source="1" target="2" />
+ <edge id="2" source="2" target="3" />
+ </edges>
+ </graph>
+</gexf>"""
+ obtained = '\n'.join(nx.generate_gexf(G))
+ assert_equal( expected, obtained )
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_gml.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_gml.py
new file mode 100644
index 0000000..ed63bd2
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_gml.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python
+import io
+from nose.tools import *
+from nose import SkipTest
+import networkx
+
+class TestGraph(object):
+ @classmethod
+ def setupClass(cls):
+ global pyparsing
+ try:
+ import pyparsing
+ except ImportError:
+ try:
+ import matplotlib.pyparsing as pyparsing
+ except:
+ raise SkipTest('gml test: pyparsing not available.')
+
+ def setUp(self):
+ self.simple_data="""Creator me
+graph [
+ comment "This is a sample graph"
+ directed 1
+ IsPlanar 1
+ pos [ x 0 y 1 ]
+ node [
+ id 1
+ label "Node 1"
+ pos [ x 1 y 1 ]
+ ]
+ node [
+ id 2
+ pos [ x 1 y 2 ]
+ label "Node 2"
+ ]
+ node [
+ id 3
+ label "Node 3"
+ pos [ x 1 y 3 ]
+ ]
+ edge [
+ source 1
+ target 2
+ label "Edge from node 1 to node 2"
+ color [line "blue" thickness 3]
+
+ ]
+ edge [
+ source 2
+ target 3
+ label "Edge from node 2 to node 3"
+ ]
+ edge [
+ source 3
+ target 1 label
+ "Edge from node 3 to node 1"
+ ]
+]
+"""
+ def test_parse_gml(self):
+ G=networkx.parse_gml(self.simple_data,relabel=True)
+ assert_equals(sorted(G.nodes()),\
+ ['Node 1', 'Node 2', 'Node 3'])
+ assert_equals( [e for e in sorted(G.edges())],\
+ [('Node 1', 'Node 2'),
+ ('Node 2', 'Node 3'),
+ ('Node 3', 'Node 1')])
+
+ assert_equals( [e for e in sorted(G.edges(data=True))],\
+ [('Node 1', 'Node 2',
+ {'color': {'line': 'blue', 'thickness': 3},
+ 'label': 'Edge from node 1 to node 2'}),
+ ('Node 2', 'Node 3',
+ {'label': 'Edge from node 2 to node 3'}),
+ ('Node 3', 'Node 1',
+ {'label': 'Edge from node 3 to node 1'})])
+
+
+ def test_read_gml(self):
+ import os,tempfile
+ (fd,fname)=tempfile.mkstemp()
+ fh=open(fname,'w')
+ fh.write(self.simple_data)
+ fh.close()
+ Gin=networkx.read_gml(fname,relabel=True)
+ G=networkx.parse_gml(self.simple_data,relabel=True)
+ assert_equals( sorted(G.nodes(data=True)), sorted(Gin.nodes(data=True)))
+ assert_equals( sorted(G.edges(data=True)), sorted(Gin.edges(data=True)))
+ os.close(fd)
+ os.unlink(fname)
+
+ def test_relabel_duplicate(self):
+ data="""
+graph
+[
+ label ""
+ directed 1
+ node
+ [
+ id 0
+ label "same"
+ ]
+ node
+ [
+ id 1
+ label "same"
+ ]
+]
+"""
+ fh = io.BytesIO(data.encode('UTF-8'))
+ fh.seek(0)
+ assert_raises(networkx.NetworkXError,networkx.read_gml,fh,relabel=True)
+
+ def test_bool(self):
+ G=networkx.Graph()
+ G.add_node(1,on=True)
+ G.add_edge(1,2,on=False)
+ data = '\n'.join(list(networkx.generate_gml(G)))
+ answer ="""graph [
+ node [
+ id 0
+ label 1
+ on 1
+ ]
+ node [
+ id 1
+ label 2
+ ]
+ edge [
+ source 0
+ target 1
+ on 0
+ ]
+]"""
+ assert_equal(data,answer)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_gpickle.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_gpickle.py
new file mode 100644
index 0000000..429f753
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_gpickle.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+from nose.tools import assert_equal
+import networkx as nx
+import os,tempfile
+
+class TestGpickle(object):
+ def setUp(self):
+ G=nx.Graph(name="test")
+ e=[('a','b'),('b','c'),('c','d'),('d','e'),('e','f'),('a','f')]
+ G.add_edges_from(e,width=10)
+ G.add_node('g',color='green')
+ G.graph['number']=1
+ self.G=G
+
+ def test_gpickle(self):
+ G=self.G
+ (fd,fname)=tempfile.mkstemp()
+ nx.write_gpickle(G,fname);
+ Gin=nx.read_gpickle(fname);
+ assert_equal(sorted(G.nodes(data=True)),
+ sorted(Gin.nodes(data=True)))
+ assert_equal(sorted(G.edges(data=True)),
+ sorted(Gin.edges(data=True)))
+ assert_equal(G.graph,Gin.graph)
+ os.close(fd)
+ os.unlink(fname)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_graphml.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_graphml.py
new file mode 100644
index 0000000..c21c89b
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_graphml.py
@@ -0,0 +1,445 @@
+#!/usr/bin/env python
+from nose.tools import *
+from nose import SkipTest
+import networkx as nx
+import io
+import tempfile
+import os
+
+class TestGraph(object):
+ @classmethod
+ def setupClass(cls):
+ try:
+ import xml.etree.ElementTree
+ except ImportError:
+ raise SkipTest('xml.etree.ElementTree not available.')
+
+ def setUp(self):
+ self.simple_directed_data="""<?xml version="1.0" encoding="UTF-8"?>
+<!-- This file was written by the JAVA GraphML Library.-->
+<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
+ <graph id="G" edgedefault="directed">
+ <node id="n0"/>
+ <node id="n1"/>
+ <node id="n2"/>
+ <node id="n3"/>
+ <node id="n4"/>
+ <node id="n5"/>
+ <node id="n6"/>
+ <node id="n7"/>
+ <node id="n8"/>
+ <node id="n9"/>
+ <node id="n10"/>
+ <edge id="foo" source="n0" target="n2"/>
+ <edge source="n1" target="n2"/>
+ <edge source="n2" target="n3"/>
+ <edge source="n3" target="n5"/>
+ <edge source="n3" target="n4"/>
+ <edge source="n4" target="n6"/>
+ <edge source="n6" target="n5"/>
+ <edge source="n5" target="n7"/>
+ <edge source="n6" target="n8"/>
+ <edge source="n8" target="n7"/>
+ <edge source="n8" target="n9"/>
+ </graph>
+</graphml>"""
+ self.simple_directed_graph=nx.DiGraph()
+ self.simple_directed_graph.add_node('n10')
+ self.simple_directed_graph.add_edge('n0','n2',id='foo')
+ self.simple_directed_graph.add_edges_from([('n1','n2'),
+ ('n2','n3'),
+ ('n3','n5'),
+ ('n3','n4'),
+ ('n4','n6'),
+ ('n6','n5'),
+ ('n5','n7'),
+ ('n6','n8'),
+ ('n8','n7'),
+ ('n8','n9'),
+ ])
+
+ self.simple_directed_fh = \
+ io.BytesIO(self.simple_directed_data.encode('UTF-8'))
+
+
+ self.attribute_data="""<?xml version="1.0" encoding="UTF-8"?>
+<graphml xmlns="http://graphml.graphdrawing.org/xmlns"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
+ <key id="d0" for="node" attr.name="color" attr.type="string">
+ <default>yellow</default>
+ </key>
+ <key id="d1" for="edge" attr.name="weight" attr.type="double"/>
+ <graph id="G" edgedefault="directed">
+ <node id="n0">
+ <data key="d0">green</data>
+ </node>
+ <node id="n1"/>
+ <node id="n2">
+ <data key="d0">blue</data>
+ </node>
+ <node id="n3">
+ <data key="d0">red</data>
+ </node>
+ <node id="n4"/>
+ <node id="n5">
+ <data key="d0">turquoise</data>
+ </node>
+ <edge id="e0" source="n0" target="n2">
+ <data key="d1">1.0</data>
+ </edge>
+ <edge id="e1" source="n0" target="n1">
+ <data key="d1">1.0</data>
+ </edge>
+ <edge id="e2" source="n1" target="n3">
+ <data key="d1">2.0</data>
+ </edge>
+ <edge id="e3" source="n3" target="n2"/>
+ <edge id="e4" source="n2" target="n4"/>
+ <edge id="e5" source="n3" target="n5"/>
+ <edge id="e6" source="n5" target="n4">
+ <data key="d1">1.1</data>
+ </edge>
+ </graph>
+</graphml>
+"""
+ self.attribute_graph=nx.DiGraph(id='G')
+ self.attribute_graph.graph['node_default']={'color':'yellow'}
+ self.attribute_graph.add_node('n0',color='green')
+ self.attribute_graph.add_node('n2',color='blue')
+ self.attribute_graph.add_node('n3',color='red')
+ self.attribute_graph.add_node('n4')
+ self.attribute_graph.add_node('n5',color='turquoise')
+ self.attribute_graph.add_edge('n0','n2',id='e0',weight=1.0)
+ self.attribute_graph.add_edge('n0','n1',id='e1',weight=1.0)
+ self.attribute_graph.add_edge('n1','n3',id='e2',weight=2.0)
+ self.attribute_graph.add_edge('n3','n2',id='e3')
+ self.attribute_graph.add_edge('n2','n4',id='e4')
+ self.attribute_graph.add_edge('n3','n5',id='e5')
+ self.attribute_graph.add_edge('n5','n4',id='e6',weight=1.1)
+ self.attribute_fh = io.BytesIO(self.attribute_data.encode('UTF-8'))
+
+ self.simple_undirected_data="""<?xml version="1.0" encoding="UTF-8"?>
+<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
+ <graph id="G">
+ <node id="n0"/>
+ <node id="n1"/>
+ <node id="n2"/>
+ <node id="n10"/>
+ <edge id="foo" source="n0" target="n2"/>
+ <edge source="n1" target="n2"/>
+ <edge source="n2" target="n3"/>
+ </graph>
+</graphml>"""
+# <edge source="n8" target="n10" directed="false"/>
+ self.simple_undirected_graph=nx.Graph()
+ self.simple_undirected_graph.add_node('n10')
+ self.simple_undirected_graph.add_edge('n0','n2',id='foo')
+ self.simple_undirected_graph.add_edges_from([('n1','n2'),
+ ('n2','n3'),
+ ])
+
+ self.simple_undirected_fh = io.BytesIO(self.simple_undirected_data.encode('UTF-8'))
+
+
+ def test_read_simple_directed_graphml(self):
+ G=self.simple_directed_graph
+ H=nx.read_graphml(self.simple_directed_fh)
+ assert_equal(sorted(G.nodes()),sorted(H.nodes()))
+ assert_equal(sorted(G.edges()),sorted(H.edges()))
+ assert_equal(sorted(G.edges(data=True)),
+ sorted(H.edges(data=True)))
+ self.simple_directed_fh.seek(0)
+
+ I=nx.parse_graphml(self.simple_directed_data)
+ assert_equal(sorted(G.nodes()),sorted(I.nodes()))
+ assert_equal(sorted(G.edges()),sorted(I.edges()))
+ assert_equal(sorted(G.edges(data=True)),
+ sorted(I.edges(data=True)))
+
+ def test_write_read_simple_directed_graphml(self):
+ G=self.simple_directed_graph
+ fh=io.BytesIO()
+ nx.write_graphml(G,fh)
+ fh.seek(0)
+ H=nx.read_graphml(fh)
+ assert_equal(sorted(G.nodes()),sorted(H.nodes()))
+ assert_equal(sorted(G.edges()),sorted(H.edges()))
+ assert_equal(sorted(G.edges(data=True)),
+ sorted(H.edges(data=True)))
+ self.simple_directed_fh.seek(0)
+
+ def test_read_simple_undirected_graphml(self):
+ G=self.simple_undirected_graph
+ H=nx.read_graphml(self.simple_undirected_fh)
+ assert_equal(sorted(G.nodes()),sorted(H.nodes()))
+ assert_equal(
+ sorted(sorted(e) for e in G.edges()),
+ sorted(sorted(e) for e in H.edges()))
+ self.simple_undirected_fh.seek(0)
+
+ I=nx.parse_graphml(self.simple_undirected_data)
+ assert_equal(sorted(G.nodes()),sorted(I.nodes()))
+ assert_equal(
+ sorted(sorted(e) for e in G.edges()),
+ sorted(sorted(e) for e in I.edges()))
+
+ def test_read_attribute_graphml(self):
+ G=self.attribute_graph
+ H=nx.read_graphml(self.attribute_fh)
+ assert_equal(sorted(G.nodes(True)),sorted(H.nodes(data=True)))
+ ge=sorted(G.edges(data=True))
+ he=sorted(H.edges(data=True))
+ for a,b in zip(ge,he):
+ assert_equal(a,b)
+ self.attribute_fh.seek(0)
+
+ I=nx.parse_graphml(self.attribute_data)
+ assert_equal(sorted(G.nodes(True)),sorted(I.nodes(data=True)))
+ ge=sorted(G.edges(data=True))
+ he=sorted(I.edges(data=True))
+ for a,b in zip(ge,he):
+ assert_equal(a,b)
+
+ def test_directed_edge_in_undirected(self):
+ s="""<?xml version="1.0" encoding="UTF-8"?>
+<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
+ <graph id="G">
+ <node id="n0"/>
+ <node id="n1"/>
+ <node id="n2"/>
+ <edge source="n0" target="n1"/>
+ <edge source="n1" target="n2" directed='true'/>
+ </graph>
+</graphml>"""
+ fh = io.BytesIO(s.encode('UTF-8'))
+ assert_raises(nx.NetworkXError,nx.read_graphml,fh)
+ assert_raises(nx.NetworkXError,nx.parse_graphml,s)
+
+ def test_undirected_edge_in_directed(self):
+ s="""<?xml version="1.0" encoding="UTF-8"?>
+<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
+ <graph id="G" edgedefault='directed'>
+ <node id="n0"/>
+ <node id="n1"/>
+ <node id="n2"/>
+ <edge source="n0" target="n1"/>
+ <edge source="n1" target="n2" directed='false'/>
+ </graph>
+</graphml>"""
+ fh = io.BytesIO(s.encode('UTF-8'))
+ assert_raises(nx.NetworkXError,nx.read_graphml,fh)
+ assert_raises(nx.NetworkXError,nx.parse_graphml,s)
+
+ def test_key_error(self):
+ s="""<?xml version="1.0" encoding="UTF-8"?>
+<graphml xmlns="http://graphml.graphdrawing.org/xmlns"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
+ <key id="d0" for="node" attr.name="color" attr.type="string">
+ <default>yellow</default>
+ </key>
+ <key id="d1" for="edge" attr.name="weight" attr.type="double"/>
+ <graph id="G" edgedefault="directed">
+ <node id="n0">
+ <data key="d0">green</data>
+ </node>
+ <node id="n1"/>
+ <node id="n2">
+ <data key="d0">blue</data>
+ </node>
+ <edge id="e0" source="n0" target="n2">
+ <data key="d2">1.0</data>
+ </edge>
+ </graph>
+</graphml>
+"""
+ fh = io.BytesIO(s.encode('UTF-8'))
+ assert_raises(nx.NetworkXError,nx.read_graphml,fh)
+ assert_raises(nx.NetworkXError,nx.parse_graphml,s)
+
+ def test_hyperedge_error(self):
+ s="""<?xml version="1.0" encoding="UTF-8"?>
+<graphml xmlns="http://graphml.graphdrawing.org/xmlns"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
+ <key id="d0" for="node" attr.name="color" attr.type="string">
+ <default>yellow</default>
+ </key>
+ <key id="d1" for="edge" attr.name="weight" attr.type="double"/>
+ <graph id="G" edgedefault="directed">
+ <node id="n0">
+ <data key="d0">green</data>
+ </node>
+ <node id="n1"/>
+ <node id="n2">
+ <data key="d0">blue</data>
+ </node>
+ <hyperedge id="e0" source="n0" target="n2">
+ <endpoint node="n0"/>
+ <endpoint node="n1"/>
+ <endpoint node="n2"/>
+ </hyperedge>
+ </graph>
+</graphml>
+"""
+ fh = io.BytesIO(s.encode('UTF-8'))
+ assert_raises(nx.NetworkXError,nx.read_graphml,fh)
+ assert_raises(nx.NetworkXError,nx.parse_graphml,s)
+
+ # remove test until we get the "name" issue sorted
+ # https://networkx.lanl.gov/trac/ticket/544
+ def test_default_attribute(self):
+ G=nx.Graph()
+ G.add_node(1,label=1,color='green')
+ G.add_path([0,1,2,3])
+ G.add_edge(1,2,weight=3)
+ G.graph['node_default']={'color':'yellow'}
+ G.graph['edge_default']={'weight':7}
+ fh = io.BytesIO()
+ nx.write_graphml(G,fh)
+ fh.seek(0)
+ H=nx.read_graphml(fh,node_type=int)
+ assert_equal(sorted(G.nodes()),sorted(H.nodes()))
+ assert_equal(
+ sorted(sorted(e) for e in G.edges()),
+ sorted(sorted(e) for e in H.edges()))
+ assert_equal(G.graph,H.graph)
+
+ def test_multigraph_keys(self):
+ # test that multigraphs use edge id attributes as key
+ pass
+
+ def test_multigraph_to_graph(self):
+ # test converting multigraph to graph if no parallel edges are found
+ pass
+
+ def test_yfiles_extension(self):
+ data="""<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:y="http://www.yworks.com/xml/graphml" xmlns:yed="http://www.yworks.com/xml/yed/3" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://www.yworks.com/xml/schema/graphml/1.1/ygraphml.xsd">
+ <!--Created by yFiles for Java 2.7-->
+ <key for="graphml" id="d0" yfiles.type="resources"/>
+ <key attr.name="url" attr.type="string" for="node" id="d1"/>
+ <key attr.name="description" attr.type="string" for="node" id="d2"/>
+ <key for="node" id="d3" yfiles.type="nodegraphics"/>
+ <key attr.name="Description" attr.type="string" for="graph" id="d4">
+ <default/>
+ </key>
+ <key attr.name="url" attr.type="string" for="edge" id="d5"/>
+ <key attr.name="description" attr.type="string" for="edge" id="d6"/>
+ <key for="edge" id="d7" yfiles.type="edgegraphics"/>
+ <graph edgedefault="directed" id="G">
+ <node id="n0">
+ <data key="d3">
+ <y:ShapeNode>
+ <y:Geometry height="30.0" width="30.0" x="125.0" y="100.0"/>
+ <y:Fill color="#FFCC00" transparent="false"/>
+ <y:BorderStyle color="#000000" type="line" width="1.0"/>
+ <y:NodeLabel alignment="center" autoSizePolicy="content" borderDistance="0.0" fontFamily="Dialog" fontSize="13" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="19.1328125" modelName="internal" modelPosition="c" textColor="#000000" visible="true" width="12.27099609375" x="8.864501953125" y="5.43359375">1</y:NodeLabel>
+ <y:Shape type="rectangle"/>
+ </y:ShapeNode>
+ </data>
+ </node>
+ <node id="n1">
+ <data key="d3">
+ <y:ShapeNode>
+ <y:Geometry height="30.0" width="30.0" x="183.0" y="205.0"/>
+ <y:Fill color="#FFCC00" transparent="false"/>
+ <y:BorderStyle color="#000000" type="line" width="1.0"/>
+ <y:NodeLabel alignment="center" autoSizePolicy="content" borderDistance="0.0" fontFamily="Dialog" fontSize="13" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="19.1328125" modelName="internal" modelPosition="c" textColor="#000000" visible="true" width="12.27099609375" x="8.864501953125" y="5.43359375">2</y:NodeLabel>
+ <y:Shape type="rectangle"/>
+ </y:ShapeNode>
+ </data>
+ </node>
+ <edge id="e0" source="n0" target="n1">
+ <data key="d7">
+ <y:PolyLineEdge>
+ <y:Path sx="0.0" sy="0.0" tx="0.0" ty="0.0"/>
+ <y:LineStyle color="#000000" type="line" width="1.0"/>
+ <y:Arrows source="none" target="standard"/>
+ <y:BendStyle smoothed="false"/>
+ </y:PolyLineEdge>
+ </data>
+ </edge>
+ </graph>
+ <data key="d0">
+ <y:Resources/>
+ </data>
+</graphml>
+"""
+ fh = io.BytesIO(data.encode('UTF-8'))
+ G=nx.read_graphml(fh)
+ assert_equal(G.edges(),[('n0','n1')])
+ assert_equal(G['n0']['n1']['id'],'e0')
+ assert_equal(G.node['n0']['label'],'1')
+ assert_equal(G.node['n1']['label'],'2')
+
+ H=nx.parse_graphml(data)
+ assert_equal(H.edges(),[('n0','n1')])
+ assert_equal(H['n0']['n1']['id'],'e0')
+ assert_equal(H.node['n0']['label'],'1')
+ assert_equal(H.node['n1']['label'],'2')
+
+ def test_unicode(self):
+ G = nx.Graph()
+ try: # Python 3.x
+ name1 = chr(2344) + chr(123) + chr(6543)
+ name2 = chr(5543) + chr(1543) + chr(324)
+ node_type=str
+ except ValueError: # Python 2.6+
+ name1 = unichr(2344) + unichr(123) + unichr(6543)
+ name2 = unichr(5543) + unichr(1543) + unichr(324)
+ node_type=unicode
+ G.add_edge(name1, 'Radiohead', attr_dict={'foo': name2})
+ fd, fname = tempfile.mkstemp()
+ nx.write_graphml(G, fname)
+ H = nx.read_graphml(fname,node_type=node_type)
+ assert_equal(G.adj, H.adj)
+ os.close(fd)
+ os.unlink(fname)
+
+
+ def test_bool(self):
+ s="""<?xml version="1.0" encoding="UTF-8"?>
+<graphml xmlns="http://graphml.graphdrawing.org/xmlns"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
+ http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
+ <key id="d0" for="node" attr.name="test" attr.type="boolean">
+ <default>false</default>
+ </key>
+ <graph id="G" edgedefault="directed">
+ <node id="n0">
+ <data key="d0">True</data>
+ </node>
+ <node id="n1"/>
+ <node id="n2">
+ <data key="d0">False</data>
+ </node>
+ <node id="n3">
+ <data key="d0">true</data>
+ </node>
+ <node id="n4">
+ <data key="d0">false</data>
+ </node>
+
+
+ </graph>
+</graphml>
+"""
+ fh = io.BytesIO(s.encode('UTF-8'))
+ G=nx.read_graphml(fh)
+ assert_equal(G.node['n0']['test'],True)
+ assert_equal(G.node['n2']['test'],False)
+
+ H=nx.parse_graphml(s)
+ assert_equal(H.node['n0']['test'],True)
+ assert_equal(H.node['n2']['test'],False)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_leda.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_leda.py
new file mode 100644
index 0000000..1c3614e
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_leda.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+import os,tempfile
+
+class TestLEDA(object):
+
+ def test_parse_leda(self):
+ data="""#header section \nLEDA.GRAPH \nstring\nint\n-1\n#nodes section\n5 \n|{v1}| \n|{v2}| \n|{v3}| \n|{v4}| \n|{v5}| \n\n#edges section\n7 \n1 2 0 |{4}| \n1 3 0 |{3}| \n2 3 0 |{2}| \n3 4 0 |{3}| \n3 5 0 |{7}| \n4 5 0 |{6}| \n5 1 0 |{foo}|"""
+ G=nx.parse_leda(data)
+ G=nx.parse_leda(data.split('\n'))
+ assert_equal(sorted(G.nodes()),
+ ['v1', 'v2', 'v3', 'v4', 'v5'])
+ assert_equal([e for e in sorted(G.edges(data=True))],
+ [('v1', 'v2', {'label': '4'}),
+ ('v1', 'v3', {'label': '3'}),
+ ('v2', 'v3', {'label': '2'}),
+ ('v3', 'v4', {'label': '3'}),
+ ('v3', 'v5', {'label': '7'}),
+ ('v4', 'v5', {'label': '6'}),
+ ('v5', 'v1', {'label': 'foo'})])
+
+
+ def test_read_LEDA(self):
+ data="""#header section \nLEDA.GRAPH \nstring\nint\n-1\n#nodes section\n5 \n|{v1}| \n|{v2}| \n|{v3}| \n|{v4}| \n|{v5}| \n\n#edges section\n7 \n1 2 0 |{4}| \n1 3 0 |{3}| \n2 3 0 |{2}| \n3 4 0 |{3}| \n3 5 0 |{7}| \n4 5 0 |{6}| \n5 1 0 |{foo}|"""
+ G=nx.parse_leda(data)
+ (fd,fname)=tempfile.mkstemp()
+ fh=open(fname,'w')
+ b=fh.write(data)
+ fh.close()
+ Gin=nx.read_leda(fname)
+ assert_equal(sorted(G.nodes()),sorted(Gin.nodes()))
+ assert_equal(sorted(G.edges()),sorted(Gin.edges()))
+ os.close(fd)
+ os.unlink(fname)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_p2g.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_p2g.py
new file mode 100644
index 0000000..6bd7d4e
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_p2g.py
@@ -0,0 +1,64 @@
+from nose.tools import assert_equal, assert_raises, assert_not_equal
+import networkx as nx
+import io
+import tempfile
+import os
+from networkx.readwrite.p2g import *
+from networkx.testing import *
+
+
+class TestP2G:
+
+ def setUp(self):
+ self.G=nx.Graph(name="test")
+ e=[('a','b'),('b','c'),('c','d'),('d','e'),('e','f'),('a','f')]
+ self.G.add_edges_from(e)
+ self.G.add_node('g')
+ self.DG=nx.DiGraph(self.G)
+
+ def test_read_p2g(self):
+ s = b"""\
+name
+3 4
+a
+1 2
+b
+
+c
+0 2
+"""
+ bytesIO = io.BytesIO(s)
+ G = read_p2g(bytesIO)
+ assert_equal(G.name,'name')
+ assert_equal(sorted(G),['a','b','c'])
+ edges = [(str(u),str(v)) for u,v in G.edges()]
+ assert_edges_equal(G.edges(),[('a','c'),('a','b'),('c','a'),('c','c')])
+
+ def test_write_p2g(self):
+ s=b"""foo
+3 2
+1
+1
+2
+2
+3
+
+"""
+ fh=io.BytesIO()
+ G=nx.DiGraph()
+ G.name='foo'
+ G.add_edges_from([(1,2),(2,3)])
+ write_p2g(G,fh)
+ fh.seek(0)
+ r=fh.read()
+ assert_equal(r,s)
+
+ def test_write_read_p2g(self):
+ fh=io.BytesIO()
+ G=nx.DiGraph()
+ G.name='foo'
+ G.add_edges_from([('a','b'),('b','c')])
+ write_p2g(G,fh)
+ fh.seek(0)
+ H=read_p2g(fh)
+ assert_edges_equal(G.edges(),H.edges())
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_pajek.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_pajek.py
new file mode 100644
index 0000000..a2e43d0
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_pajek.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+"""
+Pajek tests
+"""
+from nose.tools import assert_equal
+from networkx import *
+import os,tempfile
+from io import open
+from networkx.testing import *
+
+class TestPajek(object):
+ def setUp(self):
+ self.data="""*network Tralala\n*vertices 4\n 1 "A1" 0.0938 0.0896 ellipse x_fact 1 y_fact 1\n 2 "Bb" 0.8188 0.2458 ellipse x_fact 1 y_fact 1\n 3 "C" 0.3688 0.7792 ellipse x_fact 1\n 4 "D2" 0.9583 0.8563 ellipse x_fact 1\n*arcs\n1 1 1 h2 0 w 3 c Blue s 3 a1 -130 k1 0.6 a2 -130 k2 0.6 ap 0.5 l "Bezier loop" lc BlueViolet fos 20 lr 58 lp 0.3 la 360\n2 1 1 h2 0 a1 120 k1 1.3 a2 -120 k2 0.3 ap 25 l "Bezier arc" lphi 270 la 180 lr 19 lp 0.5\n1 2 1 h2 0 a1 40 k1 2.8 a2 30 k2 0.8 ap 25 l "Bezier arc" lphi 90 la 0 lp 0.65\n4 2 -1 h2 0 w 1 k1 -2 k2 250 ap 25 l "Circular arc" c Red lc OrangeRed\n3 4 1 p Dashed h2 0 w 2 c OliveGreen ap 25 l "Straight arc" lc PineGreen\n1 3 1 p Dashed h2 0 w 5 k1 -1 k2 -20 ap 25 l "Oval arc" c Brown lc Black\n3 3 -1 h1 6 w 1 h2 12 k1 -2 k2 -15 ap 0.5 l "Circular loop" c Red lc OrangeRed lphi 270 la 180"""
+ self.G=nx.MultiDiGraph()
+ self.G.add_nodes_from(['A1', 'Bb', 'C', 'D2'])
+ self.G.add_edges_from([('A1', 'A1'), ('A1', 'Bb'), ('A1', 'C'),
+ ('Bb', 'A1'),('C', 'C'), ('C', 'D2'),
+ ('D2', 'Bb')])
+
+ self.G.graph['name']='Tralala'
+ (self.fd,self.fname)=tempfile.mkstemp()
+ fh=open(self.fname,'wb')
+ fh.write(self.data.encode('UTF-8'))
+ fh.close()
+
+ def tearDown(self):
+ os.close(self.fd)
+ os.unlink(self.fname)
+
+ def test_parse_pajek_simple(self):
+ # Example without node positions or shape
+ data="""*Vertices 2\n1 "1"\n2 "2"\n*Edges\n1 2\n2 1"""
+ G=parse_pajek(data)
+ assert_equal(sorted(G.nodes()), ['1', '2'])
+ assert_edges_equal(G.edges(), [('1', '2'), ('1', '2')])
+
+ def test_parse_pajek(self):
+ G=parse_pajek(self.data)
+ assert_equal(sorted(G.nodes()), ['A1', 'Bb', 'C', 'D2'])
+ assert_edges_equal(G.edges(), [('A1', 'A1'), ('A1', 'Bb'),
+ ('A1', 'C'), ('Bb', 'A1'),
+ ('C', 'C'), ('C', 'D2'), ('D2', 'Bb')])
+
+ def test_read_pajek(self):
+ G=parse_pajek(self.data)
+ Gin=read_pajek(self.fname)
+ assert_equal(sorted(G.nodes()), sorted(Gin.nodes()))
+ assert_edges_equal(G.edges(), Gin.edges())
+ assert_equal(self.G.graph,Gin.graph)
+ for n in G.node:
+ assert_equal(G.node[n],Gin.node[n])
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_shp.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_shp.py
new file mode 100644
index 0000000..91c392c
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_shp.py
@@ -0,0 +1,140 @@
+"""Unit tests for shp.
+"""
+
+import os
+import tempfile
+from nose import SkipTest
+from nose.tools import assert_equal
+
+import networkx as nx
+
+
+class TestShp(object):
+ @classmethod
+ def setupClass(cls):
+ global ogr
+ try:
+ from osgeo import ogr
+ except ImportError:
+ raise SkipTest('ogr not available.')
+
+ def deletetmp(self, drv, *paths):
+ for p in paths:
+ if os.path.exists(p):
+ drv.DeleteDataSource(p)
+
+ def setUp(self):
+
+ def createlayer(driver):
+ lyr = shp.CreateLayer("edges", None, ogr.wkbLineString)
+ namedef = ogr.FieldDefn("Name", ogr.OFTString)
+ namedef.SetWidth(32)
+ lyr.CreateField(namedef)
+ return lyr
+
+ drv = ogr.GetDriverByName("ESRI Shapefile")
+
+ testdir = os.path.join(tempfile.gettempdir(), 'shpdir')
+ shppath = os.path.join(tempfile.gettempdir(), 'tmpshp.shp')
+
+ self.deletetmp(drv, testdir, shppath)
+ os.mkdir(testdir)
+
+ shp = drv.CreateDataSource(shppath)
+ lyr = createlayer(shp)
+ self.names = ['a', 'b', 'c'] # edgenames
+ self.paths = ( [(1.0, 1.0), (2.0, 2.0)],
+ [(2.0, 2.0), (3.0, 3.0)],
+ [(0.9, 0.9), (4.0, 2.0)]
+ )
+ for path, name in zip(self.paths, self.names):
+ feat = ogr.Feature(lyr.GetLayerDefn())
+ g = ogr.Geometry(ogr.wkbLineString)
+ map(lambda xy: g.AddPoint_2D(*xy), path)
+ feat.SetGeometry(g)
+ feat.SetField("Name", name)
+ lyr.CreateFeature(feat)
+ self.shppath = shppath
+ self.testdir = testdir
+ self.drv = drv
+
+ def testload(self):
+ expected = nx.DiGraph()
+ map(expected.add_path, self.paths)
+ G = nx.read_shp(self.shppath)
+ assert_equal(sorted(expected.node), sorted(G.node))
+ assert_equal(sorted(expected.edges()), sorted(G.edges()))
+ names = [G.get_edge_data(s, e)['Name'] for s, e in G.edges()]
+ assert_equal(self.names, sorted(names))
+
+ def checkgeom(self, lyr, expected):
+ feature = lyr.GetNextFeature()
+ actualwkt = []
+ while feature:
+ actualwkt.append(feature.GetGeometryRef().ExportToWkt())
+ feature = lyr.GetNextFeature()
+ assert_equal(sorted(expected), sorted(actualwkt))
+
+ def test_geometryexport(self):
+ expectedpoints = (
+ "POINT (1 1)",
+ "POINT (2 2)",
+ "POINT (3 3)",
+ "POINT (0.9 0.9)",
+ "POINT (4 2)"
+ )
+ expectedlines = (
+ "LINESTRING (1 1,2 2)",
+ "LINESTRING (2 2,3 3)",
+ "LINESTRING (0.9 0.9,4 2)"
+ )
+ tpath = os.path.join(tempfile.gettempdir(), 'shpdir')
+ G = nx.read_shp(self.shppath)
+ nx.write_shp(G, tpath)
+ shpdir = ogr.Open(tpath)
+ self.checkgeom(shpdir.GetLayerByName("nodes"), expectedpoints)
+ self.checkgeom(shpdir.GetLayerByName("edges"), expectedlines)
+
+ def test_attributeexport(self):
+ def testattributes(lyr, graph):
+ feature = lyr.GetNextFeature()
+ while feature:
+ coords = []
+ ref = feature.GetGeometryRef()
+ for i in xrange(ref.GetPointCount()):
+ coords.append(ref.GetPoint_2D(i))
+ name = feature.GetFieldAsString('Name')
+ assert_equal(graph.get_edge_data(*coords)['Name'], name)
+ feature = lyr.GetNextFeature()
+
+ tpath = os.path.join(tempfile.gettempdir(), 'shpdir')
+
+ G = nx.read_shp(self.shppath)
+ nx.write_shp(G, tpath)
+ shpdir = ogr.Open(tpath)
+ edges = shpdir.GetLayerByName("edges")
+ testattributes(edges, G)
+
+ def test_wkt_export(self):
+ G = nx.DiGraph()
+ tpath = os.path.join(tempfile.gettempdir(), 'shpdir')
+ points = (
+ "POINT (0.9 0.9)",
+ "POINT (4 2)"
+ )
+ line = (
+ "LINESTRING (0.9 0.9,4 2)",
+ )
+ G.add_node(1, Wkt=points[0])
+ G.add_node(2, Wkt=points[1])
+ G.add_edge(1, 2, Wkt=line[0])
+ try:
+ nx.write_shp(G, tpath)
+ except Exception as e:
+ assert False, e
+ shpdir = ogr.Open(tpath)
+ self.checkgeom(shpdir.GetLayerByName("nodes"), points)
+ self.checkgeom(shpdir.GetLayerByName("edges"), line)
+
+ def tearDown(self):
+ self.deletetmp(self.drv, self.testdir, self.shppath)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_sparsegraph6.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_sparsegraph6.py
new file mode 100644
index 0000000..fea9161
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_sparsegraph6.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+from nose.tools import *
+import networkx as nx
+import os,tempfile
+
+class TestGraph6(object):
+
+ def test_parse_graph6(self):
+ data="""DF{"""
+ G=nx.parse_graph6(data)
+ assert_equal(sorted(G.nodes()),[0, 1, 2, 3, 4])
+ assert_equal([e for e in sorted(G.edges())],
+ [(0, 3), (0, 4), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)])
+
+ def test_read_graph6(self):
+ data="""DF{"""
+ G=nx.parse_graph6(data)
+ (fd,fname)=tempfile.mkstemp()
+ fh=open(fname,'w')
+ b=fh.write(data)
+ fh.close()
+ Gin=nx.read_graph6(fname)
+ assert_equal(sorted(G.nodes()),sorted(Gin.nodes()))
+ assert_equal(sorted(G.edges()),sorted(Gin.edges()))
+ os.close(fd)
+ os.unlink(fname)
+
+ def test_read_many_graph6(self):
+ # Read many graphs into list
+ data="""DF{\nD`{\nDqK\nD~{\n"""
+ (fd,fname)=tempfile.mkstemp()
+ fh=open(fname,'w')
+ b=fh.write(data)
+ fh.close()
+ glist=nx.read_graph6_list(fname)
+ assert_equal(len(glist),4)
+ for G in glist:
+ assert_equal(sorted(G.nodes()),[0, 1, 2, 3, 4])
+ os.close(fd)
+ os.unlink(fname)
+
+
+class TestSparseGraph6(object):
+
+ def test_parse_sparse6(self):
+ data=""":Q___eDcdFcDeFcE`GaJ`IaHbKNbLM"""
+ G=nx.parse_sparse6(data)
+ assert_equal(sorted(G.nodes()),
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17])
+ assert_equal([e for e in sorted(G.edges())],
+ [(0, 1), (0, 2), (0, 3), (1, 12), (1, 14), (2, 13),
+ (2, 15), (3, 16), (3, 17), (4, 7), (4, 9), (4, 11),
+ (5, 6), (5, 8), (5, 9), (6, 10), (6, 11), (7, 8),
+ (7, 10), (8, 12), (9, 15), (10, 14), (11, 13),
+ (12, 16), (13, 17), (14, 17), (15, 16)])
+
+
+ def test_read_sparse6(self):
+ data=""":Q___eDcdFcDeFcE`GaJ`IaHbKNbLM"""
+ G=nx.parse_sparse6(data)
+ (fd,fname)=tempfile.mkstemp()
+ fh=open(fname,'w')
+ b=fh.write(data)
+ fh.close()
+ Gin=nx.read_sparse6(fname)
+ assert_equal(sorted(G.nodes()),sorted(Gin.nodes()))
+ assert_equal(sorted(G.edges()),sorted(Gin.edges()))
+ os.close(fd)
+ os.unlink(fname)
+
+ def test_read_many_graph6(self):
+ # Read many graphs into list
+ data=""":Q___eDcdFcDeFcE`GaJ`IaHbKNbLM\n:Q___dCfDEdcEgcbEGbFIaJ`JaHN`IM"""
+ (fd,fname)=tempfile.mkstemp()
+ fh=open(fname,'w')
+ b=fh.write(data)
+ fh.close()
+ glist=nx.read_sparse6_list(fname)
+ assert_equal(len(glist),2)
+ for G in glist:
+ assert_equal(sorted(G.nodes()),
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17])
+ os.close(fd)
+ os.unlink(fname)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_yaml.py b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_yaml.py
new file mode 100644
index 0000000..ca0ff56
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/readwrite/tests/test_yaml.py
@@ -0,0 +1,53 @@
+"""
+ Unit tests for yaml.
+"""
+
+import os,tempfile
+from nose import SkipTest
+from nose.tools import assert_equal
+
+import networkx as nx
+
+class TestYaml(object):
+ @classmethod
+ def setupClass(cls):
+ global yaml
+ try:
+ import yaml
+ except ImportError:
+ raise SkipTest('yaml not available.')
+
+ def setUp(self):
+ self.build_graphs()
+
+ def build_graphs(self):
+ self.G = nx.Graph(name="test")
+ e = [('a','b'),('b','c'),('c','d'),('d','e'),('e','f'),('a','f')]
+ self.G.add_edges_from(e)
+ self.G.add_node('g')
+
+ self.DG = nx.DiGraph(self.G)
+
+ self.MG = nx.MultiGraph()
+ self.MG.add_weighted_edges_from([(1,2,5),(1,2,5),(1,2,1),(3,3,42)])
+
+ def assert_equal(self, G, data=False):
+ (fd, fname) = tempfile.mkstemp()
+ nx.write_yaml(G, fname)
+ Gin = nx.read_yaml(fname);
+
+ assert_equal(sorted(G.nodes()),sorted(Gin.nodes()))
+ assert_equal(G.edges(data=data),Gin.edges(data=data))
+
+ os.close(fd)
+ os.unlink(fname)
+
+ def testUndirected(self):
+ self.assert_equal(self.G, False)
+
+ def testDirected(self):
+ self.assert_equal(self.DG, False)
+
+ def testMultiGraph(self):
+ self.assert_equal(self.MG, True)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/relabel.py b/lib/python2.7/site-packages/setoolsgui/networkx/relabel.py
new file mode 100644
index 0000000..4ff1196
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/relabel.py
@@ -0,0 +1,205 @@
+# Copyright (C) 2006-2013 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>',
+ 'Pieter Swart (swart@lanl.gov)',
+ 'Dan Schult (dschult@colgate.edu)'])
+__all__ = ['convert_node_labels_to_integers', 'relabel_nodes']
+
+def relabel_nodes(G, mapping, copy=True):
+ """Relabel the nodes of the graph G.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ mapping : dictionary
+ A dictionary with the old labels as keys and new labels as values.
+ A partial mapping is allowed.
+
+ copy : bool (optional, default=True)
+ If True return a copy, or if False relabel the nodes in place.
+
+ Examples
+ --------
+ >>> G=nx.path_graph(3) # nodes 0-1-2
+ >>> mapping={0:'a',1:'b',2:'c'}
+ >>> H=nx.relabel_nodes(G,mapping)
+ >>> print(sorted(H.nodes()))
+ ['a', 'b', 'c']
+
+ >>> G=nx.path_graph(26) # nodes 0..25
+ >>> mapping=dict(zip(G.nodes(),"abcdefghijklmnopqrstuvwxyz"))
+ >>> H=nx.relabel_nodes(G,mapping) # nodes a..z
+ >>> mapping=dict(zip(G.nodes(),range(1,27)))
+ >>> G1=nx.relabel_nodes(G,mapping) # nodes 1..26
+
+ Partial in-place mapping:
+
+ >>> G=nx.path_graph(3) # nodes 0-1-2
+ >>> mapping={0:'a',1:'b'} # 0->'a' and 1->'b'
+ >>> G=nx.relabel_nodes(G,mapping, copy=False)
+
+ print(G.nodes())
+ [2, 'b', 'a']
+
+ Mapping as function:
+
+ >>> G=nx.path_graph(3)
+ >>> def mapping(x):
+ ... return x**2
+ >>> H=nx.relabel_nodes(G,mapping)
+ >>> print(H.nodes())
+ [0, 1, 4]
+
+ Notes
+ -----
+ Only the nodes specified in the mapping will be relabeled.
+
+ The keyword setting copy=False modifies the graph in place.
+ This is not always possible if the mapping is circular.
+ In that case use copy=True.
+
+ See Also
+ --------
+ convert_node_labels_to_integers
+ """
+ # you can pass a function f(old_label)->new_label
+ # but we'll just make a dictionary here regardless
+ if not hasattr(mapping,"__getitem__"):
+ m = dict((n,mapping(n)) for n in G)
+ else:
+ m=mapping
+ if copy:
+ return _relabel_copy(G,m)
+ else:
+ return _relabel_inplace(G,m)
+
+
+def _relabel_inplace(G, mapping):
+ old_labels=set(mapping.keys())
+ new_labels=set(mapping.values())
+ if len(old_labels & new_labels) > 0:
+ # labels sets overlap
+ # can we topological sort and still do the relabeling?
+ D=nx.DiGraph(list(mapping.items()))
+ D.remove_edges_from(D.selfloop_edges())
+ try:
+ nodes=nx.topological_sort(D)
+ except nx.NetworkXUnfeasible:
+ raise nx.NetworkXUnfeasible('The node label sets are overlapping '
+ 'and no ordering can resolve the '
+ 'mapping. Use copy=True.')
+ nodes.reverse() # reverse topological order
+ else:
+ # non-overlapping label sets
+ nodes=old_labels
+
+ multigraph = G.is_multigraph()
+ directed = G.is_directed()
+
+ for old in nodes:
+ try:
+ new=mapping[old]
+ except KeyError:
+ continue
+ try:
+ G.add_node(new,attr_dict=G.node[old])
+ except KeyError:
+ raise KeyError("Node %s is not in the graph"%old)
+ if multigraph:
+ new_edges=[(new,old == target and new or target,key,data)
+ for (_,target,key,data)
+ in G.edges(old,data=True,keys=True)]
+ if directed:
+ new_edges+=[(old == source and new or source,new,key,data)
+ for (source,_,key,data)
+ in G.in_edges(old,data=True,keys=True)]
+ else:
+ new_edges=[(new,old == target and new or target,data)
+ for (_,target,data) in G.edges(old,data=True)]
+ if directed:
+ new_edges+=[(old == source and new or source,new,data)
+ for (source,_,data) in G.in_edges(old,data=True)]
+ G.remove_node(old)
+ G.add_edges_from(new_edges)
+ return G
+
+def _relabel_copy(G, mapping):
+ H=G.__class__()
+ H.name="(%s)" % G.name
+ if G.is_multigraph():
+ H.add_edges_from( (mapping.get(n1,n1),mapping.get(n2,n2),k,d.copy())
+ for (n1,n2,k,d) in G.edges_iter(keys=True,data=True))
+ else:
+ H.add_edges_from( (mapping.get(n1,n1),mapping.get(n2,n2),d.copy())
+ for (n1,n2,d) in G.edges_iter(data=True))
+
+ H.add_nodes_from(mapping.get(n,n) for n in G)
+ H.node.update(dict((mapping.get(n,n),d.copy()) for n,d in G.node.items()))
+ H.graph.update(G.graph.copy())
+
+ return H
+
+
+def convert_node_labels_to_integers(G, first_label=0, ordering="default",
+ label_attribute=None):
+ """Return a copy of the graph G with the nodes relabeled with integers.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ first_label : int, optional (default=0)
+ An integer specifying the offset in numbering nodes.
+ The n new integer labels are numbered first_label, ..., n-1+first_label.
+
+ ordering : string
+ "default" : inherit node ordering from G.nodes()
+ "sorted" : inherit node ordering from sorted(G.nodes())
+ "increasing degree" : nodes are sorted by increasing degree
+ "decreasing degree" : nodes are sorted by decreasing degree
+
+ label_attribute : string, optional (default=None)
+ Name of node attribute to store old label. If None no attribute
+ is created.
+
+ Notes
+ -----
+ Node and edge attribute data are copied to the new (relabeled) graph.
+
+ See Also
+ --------
+ relabel_nodes
+ """
+ N = G.number_of_nodes()+first_label
+ if ordering == "default":
+ mapping = dict(zip(G.nodes(),range(first_label,N)))
+ elif ordering == "sorted":
+ nlist = G.nodes()
+ nlist.sort()
+ mapping=dict(zip(nlist,range(first_label,N)))
+ elif ordering == "increasing degree":
+ dv_pairs=[(d,n) for (n,d) in G.degree_iter()]
+ dv_pairs.sort() # in-place sort from lowest to highest degree
+ mapping = dict(zip([n for d,n in dv_pairs],range(first_label,N)))
+ elif ordering == "decreasing degree":
+ dv_pairs = [(d,n) for (n,d) in G.degree_iter()]
+ dv_pairs.sort() # in-place sort from lowest to highest degree
+ dv_pairs.reverse()
+ mapping = dict(zip([n for d,n in dv_pairs],range(first_label,N)))
+ else:
+ raise nx.NetworkXError('Unknown node ordering: %s'%ordering)
+ H = relabel_nodes(G,mapping)
+ H.name="("+G.name+")_with_int_labels"
+ # create node attribute with the old label
+ if label_attribute is not None:
+ nx.set_node_attributes(H, label_attribute,
+ dict((v,k) for k,v in mapping.items()))
+ return H
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/release.py b/lib/python2.7/site-packages/setoolsgui/networkx/release.py
new file mode 100644
index 0000000..285db5f
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/release.py
@@ -0,0 +1,254 @@
+"""Release data for NetworkX.
+
+When NetworkX is imported a number of steps are followed to determine
+the version information.
+
+ 1) If the release is not a development release (dev=False), then version
+ information is read from version.py, a file containing statically
+ defined version information. This file should exist on every
+ downloadable release of NetworkX since setup.py creates it during
+ packaging/installation. However, version.py might not exist if one
+ is running NetworkX from the mercurial repository. In the event that
+ version.py does not exist, then no vcs information will be available.
+
+ 2) If the release is a development release, then version information
+ is read dynamically, when possible. If no dynamic information can be
+ read, then an attempt is made to read the information from version.py.
+ If version.py does not exist, then no vcs information will be available.
+
+Clarification:
+ version.py is created only by setup.py
+
+When setup.py creates version.py, it does so before packaging/installation.
+So the created file is included in the source distribution. When a user
+downloads a tar.gz file and extracts the files, the files will not be in a
+live version control repository. So when the user runs setup.py to install
+NetworkX, we must make sure write_versionfile() does not overwrite the
+revision information contained in the version.py that was included in the
+tar.gz file. This is why write_versionfile() includes an early escape.
+
+"""
+
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+
+from __future__ import absolute_import
+
+import os
+import sys
+import time
+import datetime
+import subprocess
+
+basedir = os.path.abspath(os.path.split(__file__)[0])
+
+def write_versionfile():
+ """Creates a static file containing version information."""
+ versionfile = os.path.join(basedir, 'version.py')
+
+ text = '''"""
+Version information for NetworkX, created during installation.
+
+Do not add this file to the repository.
+
+"""
+
+import datetime
+
+version = %(version)r
+date = %(date)r
+
+# Was NetworkX built from a development version? If so, remember that the major
+# and minor versions reference the "target" (rather than "current") release.
+dev = %(dev)r
+
+# Format: (name, major, min, revision)
+version_info = %(version_info)r
+
+# Format: a 'datetime.datetime' instance
+date_info = %(date_info)r
+
+# Format: (vcs, vcs_tuple)
+vcs_info = %(vcs_info)r
+
+'''
+
+ # Try to update all information
+ date, date_info, version, version_info, vcs_info = get_info(dynamic=True)
+
+ def writefile():
+ fh = open(versionfile, 'w')
+ subs = {
+ 'dev' : dev,
+ 'version': version,
+ 'version_info': version_info,
+ 'date': date,
+ 'date_info': date_info,
+ 'vcs_info': vcs_info
+ }
+ fh.write(text % subs)
+ fh.close()
+
+ if vcs_info[0] == 'mercurial':
+ # Then, we want to update version.py.
+ writefile()
+ else:
+ if os.path.isfile(versionfile):
+ # This is *good*, and the most likely place users will be when
+ # running setup.py. We do not want to overwrite version.py.
+ # Grab the version so that setup can use it.
+ sys.path.insert(0, basedir)
+ from version import version
+ del sys.path[0]
+ else:
+ # This is *bad*. It means the user might have a tarball that
+ # does not include version.py. Let this error raise so we can
+ # fix the tarball.
+ ##raise Exception('version.py not found!')
+
+ # We no longer require that prepared tarballs include a version.py
+ # So we use the possibly trunctated value from get_info()
+ # Then we write a new file.
+ writefile()
+
+ return version
+
+def get_revision():
+ """Returns revision and vcs information, dynamically obtained."""
+ vcs, revision, tag = None, None, None
+
+ hgdir = os.path.join(basedir, '..', '.hg')
+ gitdir = os.path.join(basedir, '..', '.git')
+
+ if os.path.isdir(hgdir):
+ vcs = 'mercurial'
+ try:
+ p = subprocess.Popen(['hg', 'id'],
+ cwd=basedir,
+ stdout=subprocess.PIPE)
+ except OSError:
+ # Could not run hg, even though this is a mercurial repository.
+ pass
+ else:
+ stdout = p.communicate()[0]
+ # Force strings instead of unicode.
+ x = list(map(str, stdout.decode().strip().split()))
+
+ if len(x) == 0:
+ # Somehow stdout was empty. This can happen, for example,
+ # if you're running in a terminal which has redirected stdout.
+ # In this case, we do not use any revision/tag info.
+ pass
+ elif len(x) == 1:
+ # We don't have 'tip' or anything similar...so no tag.
+ revision = str(x[0])
+ else:
+ revision = str(x[0])
+ tag = str(x[1])
+
+ elif os.path.isdir(gitdir):
+ vcs = 'git'
+ # For now, we are not bothering with revision and tag.
+
+ vcs_info = (vcs, (revision, tag))
+
+ return revision, vcs_info
+
+def get_info(dynamic=True):
+ ## Date information
+ date_info = datetime.datetime.now()
+ date = time.asctime(date_info.timetuple())
+
+ revision, version, version_info, vcs_info = None, None, None, None
+
+ import_failed = False
+ dynamic_failed = False
+
+ if dynamic:
+ revision, vcs_info = get_revision()
+ if revision is None:
+ dynamic_failed = True
+
+ if dynamic_failed or not dynamic:
+ # This is where most final releases of NetworkX will be.
+ # All info should come from version.py. If it does not exist, then
+ # no vcs information will be provided.
+ sys.path.insert(0, basedir)
+ try:
+ from version import date, date_info, version, version_info, vcs_info
+ except ImportError:
+ import_failed = True
+ vcs_info = (None, (None, None))
+ else:
+ revision = vcs_info[1][0]
+ del sys.path[0]
+
+ if import_failed or (dynamic and not dynamic_failed):
+ # We are here if:
+ # we failed to determine static versioning info, or
+ # we successfully obtained dynamic revision info
+ version = ''.join([str(major), '.', str(minor)])
+ if dev:
+ version += '.dev_' + date_info.strftime("%Y%m%d%H%M%S")
+ version_info = (name, major, minor, revision)
+
+ return date, date_info, version, version_info, vcs_info
+
+## Version information
+name = 'networkx'
+major = "1"
+minor = "8.1"
+
+
+## Declare current release as a development release.
+## Change to False before tagging a release; then change back.
+dev = False
+
+
+description = "Python package for creating and manipulating graphs and networks"
+
+long_description = \
+"""
+NetworkX is a Python package for the creation, manipulation, and
+study of the structure, dynamics, and functions of complex networks.
+
+"""
+license = 'BSD'
+authors = {'Hagberg' : ('Aric Hagberg','hagberg@lanl.gov'),
+ 'Schult' : ('Dan Schult','dschult@colgate.edu'),
+ 'Swart' : ('Pieter Swart','swart@lanl.gov')
+ }
+maintainer = "NetworkX Developers"
+maintainer_email = "networkx-discuss@googlegroups.com"
+url = 'http://networkx.lanl.gov/'
+download_url="http://networkx.lanl.gov/download/networkx"
+platforms = ['Linux','Mac OSX','Windows','Unix']
+keywords = ['Networks', 'Graph Theory', 'Mathematics', 'network', 'graph', 'discrete mathematics', 'math']
+classifiers = [
+ 'Development Status :: 4 - Beta',
+ 'Intended Audience :: Developers',
+ 'Intended Audience :: Science/Research',
+ 'License :: OSI Approved :: BSD License',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.1',
+ 'Programming Language :: Python :: 3.2',
+ 'Topic :: Software Development :: Libraries :: Python Modules',
+ 'Topic :: Scientific/Engineering :: Bio-Informatics',
+ 'Topic :: Scientific/Engineering :: Information Analysis',
+ 'Topic :: Scientific/Engineering :: Mathematics',
+ 'Topic :: Scientific/Engineering :: Physics']
+
+date, date_info, version, version_info, vcs_info = get_info()
+
+if __name__ == '__main__':
+ # Write versionfile for nightly snapshots.
+ write_versionfile()
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/testing/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/testing/__init__.py
new file mode 100644
index 0000000..db57076
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/testing/__init__.py
@@ -0,0 +1 @@
+from networkx.testing.utils import *
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/testing/tests/test_utils.py b/lib/python2.7/site-packages/setoolsgui/networkx/testing/tests/test_utils.py
new file mode 100644
index 0000000..9c57649
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/testing/tests/test_utils.py
@@ -0,0 +1,108 @@
+from nose.tools import *
+import networkx as nx
+from networkx.testing import *
+
+# thanks to numpy for this GenericTest class (numpy/testing/test_utils.py)
+class _GenericTest(object):
+ def _test_equal(self, a, b):
+ self._assert_func(a, b)
+
+ def _test_not_equal(self, a, b):
+ try:
+ self._assert_func(a, b)
+ passed = True
+ except AssertionError:
+ pass
+ else:
+ raise AssertionError("a and b are found equal but are not")
+
+
+class TestNodesEqual(_GenericTest):
+ def setUp(self):
+ self._assert_func = assert_nodes_equal
+
+ def test_nodes_equal(self):
+ a = [1,2,5,4]
+ b = [4,5,1,2]
+ self._test_equal(a,b)
+
+ def test_nodes_not_equal(self):
+ a = [1,2,5,4]
+ b = [4,5,1,3]
+ self._test_not_equal(a,b)
+
+ def test_nodes_with_data_equal(self):
+ G = nx.Graph()
+ G.add_nodes_from([1,2,3],color='red')
+ H = nx.Graph()
+ H.add_nodes_from([1,2,3],color='red')
+ self._test_equal(G.nodes(data=True), H.nodes(data=True))
+
+ def test_edges_with_data_not_equal(self):
+ G = nx.Graph()
+ G.add_nodes_from([1,2,3],color='red')
+ H = nx.Graph()
+ H.add_nodes_from([1,2,3],color='blue')
+ self._test_not_equal(G.nodes(data=True), H.nodes(data=True))
+
+
+class TestEdgesEqual(_GenericTest):
+ def setUp(self):
+ self._assert_func = assert_edges_equal
+
+ def test_edges_equal(self):
+ a = [(1,2),(5,4)]
+ b = [(4,5),(1,2)]
+ self._test_equal(a,b)
+
+ def test_edges_not_equal(self):
+ a = [(1,2),(5,4)]
+ b = [(4,5),(1,3)]
+ self._test_not_equal(a,b)
+
+ def test_edges_with_data_equal(self):
+ G = nx.MultiGraph()
+ G.add_path([0,1,2],weight=1)
+ H = nx.MultiGraph()
+ H.add_path([0,1,2],weight=1)
+ self._test_equal(G.edges(data=True, keys=True),
+ H.edges(data=True, keys=True))
+
+ def test_edges_with_data_not_equal(self):
+ G = nx.MultiGraph()
+ G.add_path([0,1,2],weight=1)
+ H = nx.MultiGraph()
+ H.add_path([0,1,2],weight=2)
+ self._test_not_equal(G.edges(data=True, keys=True),
+ H.edges(data=True, keys=True))
+
+class TestGraphsEqual(_GenericTest):
+ def setUp(self):
+ self._assert_func = assert_graphs_equal
+
+ def test_graphs_equal(self):
+ G = nx.path_graph(4)
+ H = nx.Graph()
+ H.add_path(range(4))
+ H.name='path_graph(4)'
+ self._test_equal(G,H)
+
+ def test_graphs_not_equal(self):
+ G = nx.path_graph(4)
+ H = nx.Graph()
+ H.add_cycle(range(4))
+ self._test_not_equal(G,H)
+
+ def test_graphs_not_equal2(self):
+ G = nx.path_graph(4)
+ H = nx.Graph()
+ H.add_path(range(3))
+ H.name='path_graph(4)'
+ self._test_not_equal(G,H)
+
+ def test_graphs_not_equal3(self):
+ G = nx.path_graph(4)
+ H = nx.Graph()
+ H.add_path(range(4))
+ H.name='path_graph(foo)'
+ self._test_not_equal(G,H)
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/testing/utils.py b/lib/python2.7/site-packages/setoolsgui/networkx/testing/utils.py
new file mode 100644
index 0000000..5cca7f8
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/testing/utils.py
@@ -0,0 +1,57 @@
+import operator
+from nose.tools import *
+__all__ = ['assert_nodes_equal', 'assert_edges_equal','assert_graphs_equal']
+
+def assert_nodes_equal(nlist1, nlist2):
+ # Assumes lists are either nodes, or (node,datadict) tuples,
+ # and also that nodes are orderable/sortable.
+ try:
+ l = len(nlist1[0])
+ n1 = sorted(nlist1,key=operator.itemgetter(0))
+ n2 = sorted(nlist2,key=operator.itemgetter(0))
+ assert_equal(len(n1),len(n2))
+ for a,b in zip(n1,n2):
+ assert_equal(a,b)
+ except TypeError:
+ assert_equal(set(nlist1),set(nlist2))
+ return
+
+def assert_edges_equal(elist1, elist2):
+ # Assumes lists with u,v nodes either as
+ # edge tuples (u,v)
+ # edge tuples with data dicts (u,v,d)
+ # edge tuples with keys and data dicts (u,v,k, d)
+ # and also that nodes are orderable/sortable.
+ e1 = sorted(elist1,key=lambda x: sorted(x[0:2]))
+ e2 = sorted(elist2,key=lambda x: sorted(x[0:2]))
+ assert_equal(len(e1),len(e2))
+ if len(e1) == 0:
+ return True
+ if len(e1[0]) == 2:
+ for a,b in zip(e1,e2):
+ assert_equal(set(a[0:2]),set(b[0:2]))
+ elif len(e1[0]) == 3:
+ for a,b in zip(e1,e2):
+ assert_equal(set(a[0:2]),set(b[0:2]))
+ assert_equal(a[2],b[2])
+ elif len(e1[0]) == 4:
+ for a,b in zip(e1,e2):
+ assert_equal(set(a[0:2]),set(b[0:2]))
+ assert_equal(a[2],b[2])
+ assert_equal(a[3],b[3])
+
+
+def assert_graphs_equal(graph1, graph2):
+ if graph1.is_multigraph():
+ edges1 = graph1.edges(data=True,keys=True)
+ else:
+ edges1 = graph1.edges(data=True)
+ if graph2.is_multigraph():
+ edges2 = graph2.edges(data=True,keys=True)
+ else:
+ edges2 = graph2.edges(data=True)
+ assert_nodes_equal(graph1.nodes(data=True),
+ graph2.nodes(data=True))
+ assert_edges_equal(edges1, edges2)
+ assert_equal(graph1.graph,graph2.graph)
+ return
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/tests/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/tests/__init__.py
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/tests/benchmark.py b/lib/python2.7/site-packages/setoolsgui/networkx/tests/benchmark.py
new file mode 100644
index 0000000..5eb68d0
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/tests/benchmark.py
@@ -0,0 +1,248 @@
+from timeit import Timer
+
+# This is gratefully modeled after the benchmarks found in
+# the numpy svn repository. http://svn.scipy.org/svn/numpy/trunk
+
+class Benchmark(object):
+ """
+ Benchmark a method or simple bit of code using different Graph classes.
+ If the test code is the same for each graph class, then you can set it
+ during instantiation through the argument test_string.
+ The argument test_string can also be a tuple of test code and setup code.
+ The code is entered as a string valid for use with the timeit module.
+
+ Example:
+ >>> b=Benchmark(['Graph','XGraph'])
+ >>> b['Graph']=('G.add_nodes_from(nlist)','nlist=range(100)')
+ >>> b.run()
+ """
+ def __init__(self,graph_classes,title='',test_string=None,runs=3,reps=1000):
+ self.runs = runs
+ self.reps = reps
+ self.title = title
+ self.class_tests = dict((gc,'') for gc in graph_classes)
+ # set up the test string if it is the same for all classes.
+ if test_string is not None:
+ if isinstance(test_string,tuple):
+ self['all']=test_string
+ else:
+ self['all']=(test_string,'')
+
+ def __setitem__(self,graph_class,some_strs):
+ """
+ Set a simple bit of code and setup string for the test.
+ Use this for cases where the code differs from one class to another.
+ """
+ test_str, setup_str = some_strs
+ if graph_class == 'all':
+ graph_class = self.class_tests.keys()
+ elif not isinstance(graph_class,list):
+ graph_class = [graph_class]
+
+ for GC in graph_class:
+ setup_string='import networkx as NX\nG=NX.%s.%s()\n'%\
+ (GC.lower(),GC) + setup_str
+ self.class_tests[GC] = Timer(test_str, setup_string)
+
+
+ def run(self):
+ """Run the benchmark for each class and print results."""
+ column_len = max(len(G) for G in self.class_tests)
+
+ print('='*72)
+ if self.title:
+ print("%s: %s runs, %s reps"% (self.title,self.runs,self.reps))
+ print('='*72)
+
+ times=[]
+ for GC,timer in self.class_tests.items():
+ name = GC.ljust(column_len)
+ try:
+ t=sum(timer.repeat(self.runs,self.reps))/self.runs
+# print "%s: %s" % (name, timer.repeat(self.runs,self.reps))
+ times.append((t,name))
+ except Exception as e:
+ print("%s: Failed to benchmark (%s)." % (name,e))
+
+
+ times.sort()
+ tmin=times[0][0]
+ for t,name in times:
+ print("%s: %5.2f %s" % (name, t/tmin*100.,t))
+ print('-'*72)
+ print()
+
+if __name__ == "__main__":
+ # set up for all routines:
+ classes=['Graph','MultiGraph','DiGraph','MultiDiGraph']
+ all_tests=['add_nodes','add_edges','remove_nodes','remove_edges',\
+ 'neighbors','edges','degree','dijkstra','shortest path',\
+ 'subgraph','edgedata_subgraph','laplacian']
+ # Choose which tests to run
+ tests=all_tests
+ tests=['subgraph','edgedata_subgraph']
+ #tests=all_tests[-1:]
+ N=100
+
+ if 'add_nodes' in tests:
+ title='Benchmark: Adding nodes'
+ test_string=('G.add_nodes_from(nlist)','nlist=range(%i)'%N)
+ b=Benchmark(classes,title,test_string,runs=3,reps=1000)
+ b.run()
+
+ if 'add_edges' in tests:
+ title='Benchmark: Adding edges'
+ setup='elist=[(i,i+3) for i in range(%s-3)]\nG.add_nodes_from(range(%i))'%(N,N)
+ test_string=('G.add_edges_from(elist)',setup)
+ b=Benchmark(classes,title,test_string,runs=3,reps=1000)
+ b.run()
+
+ if 'remove_nodes' in tests:
+ title='Benchmark: Adding and Deleting nodes'
+ setup='nlist=range(%i)'%N
+ test_string=('G.add_nodes_from(nlist)\nG.remove_nodes_from(nlist)',setup)
+ b=Benchmark(classes,title,test_string,runs=3,reps=1000)
+ b.run()
+
+ if 'remove_edges' in tests:
+ title='Benchmark: Adding and Deleting edges'
+ setup='elist=[(i,i+3) for i in range(%s-3)]'%N
+ test_string=('G.add_edges_from(elist)\nG.remove_edges_from(elist)',setup)
+ b=Benchmark(classes,title,test_string,runs=3,reps=1000)
+ b.run()
+
+ if 'neighbors' in tests:
+ N=500
+ p=0.3
+ title='Benchmark: reporting neighbors'
+ b=Benchmark(classes,title,runs=3,reps=1)
+ test_string='for n in G:\n for nbr in G.neighbors(n):\n pass'
+ all_setup='H=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
+ setup=all_setup+'G.add_edge(u,v)\n'
+ if 'Graph' in classes: b['Graph']=(test_string,setup)
+ setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
+ if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
+ setup=all_setup+'G.add_edge(u,v)'
+ if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
+ setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
+ if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
+ b.run()
+
+ if 'edges' in tests:
+ N=500
+ p=0.3
+ title='Benchmark: reporting edges'
+ b=Benchmark(classes,title,runs=3,reps=1)
+ test_string='for n in G:\n for e in G.edges(n):\n pass'
+ all_setup='H=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
+ setup=all_setup+'G.add_edge(u,v)\n'
+ if 'Graph' in classes: b['Graph']=(test_string,setup)
+ setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
+ if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
+ setup=all_setup+'G.add_edge(u,v)'
+ if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
+ setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
+ if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
+ b.run()
+
+ if 'degree' in tests:
+ N=500
+ p=0.3
+ title='Benchmark: reporting degree'
+ b=Benchmark(classes,title,runs=3,reps=1)
+ test_string='for d in G.degree():\n pass'
+ all_setup='H=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
+ setup=all_setup+'G.add_edge(u,v)\n'
+ if 'Graph' in classes: b['Graph']=(test_string,setup)
+ setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
+ if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
+ setup=all_setup+'G.add_edge(u,v)'
+ if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
+ setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
+ if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
+ b.run()
+
+ if 'dijkstra' in tests:
+ N=500
+ p=0.3
+ title='dijkstra single source shortest path'
+ b=Benchmark(classes,title,runs=3,reps=1)
+ test_string='p=NX.single_source_dijkstra(G,i)'
+ all_setup='i=6\nH=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
+ setup=all_setup+'G.add_edge(u,v)'
+ if 'Graph' in classes: b['Graph']=(test_string,setup)
+ setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
+ if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
+ setup=all_setup+'G.add_edge(u,v)'
+ if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
+ setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
+ if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
+ b.run()
+
+ if 'shortest path' in tests:
+ N=500
+ p=0.3
+ title='single source shortest path'
+ b=Benchmark(classes,title,runs=3,reps=1)
+ test_string='p=NX.single_source_shortest_path(G,i)'
+ all_setup='i=6\nH=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
+ setup=all_setup+'G.add_edge(u,v)'
+ if 'Graph' in classes: b['Graph']=(test_string,setup)
+ setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
+ if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
+ setup=all_setup+'G.add_edge(u,v)'
+ if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
+ setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
+ if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
+ b.run()
+
+ if 'subgraph' in tests:
+ N=500
+ p=0.3
+ title='subgraph method'
+ b=Benchmark(classes,title,runs=3,reps=1)
+ test_string='G.subgraph(nlist)'
+ all_setup='nlist=range(100,150)\nH=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
+ setup=all_setup+'G.add_edge(u,v)'
+ if 'Graph' in classes: b['Graph']=(test_string,setup)
+ setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
+ if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
+ setup=all_setup+'G.add_edge(u,v)'
+ if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
+ setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
+ if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
+ b.run()
+
+ if 'edgedata_subgraph' in tests:
+ N=500
+ p=0.3
+ title='subgraph method with edge data present'
+ b=Benchmark(classes,title,runs=3,reps=1)
+ test_string='G.subgraph(nlist)'
+ all_setup='nlist=range(100,150)\nH=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
+ setup=all_setup+'G.add_edge(u,v,hi=3)'
+ if 'Graph' in classes: b['Graph']=(test_string,setup)
+ setup=all_setup+'G.add_edges_from([(u,v),(v,u)],hi=2)'
+ if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
+ setup=all_setup+'G.add_edge(u,v,hi=1)'
+ if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
+ setup=all_setup+'G.add_edges_from([(u,v),(v,u)],hi=2)'
+ if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
+ b.run()
+
+ if 'laplacian' in tests:
+ N=500
+ p=0.3
+ title='creation of laplacian matrix'
+ b=Benchmark(classes,title,runs=3,reps=1)
+ test_string='NX.laplacian(G)'
+ all_setup='H=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
+ setup=all_setup+'G.add_edge(u,v)'
+ if 'Graph' in classes: b['Graph']=(test_string,setup)
+ setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
+ if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
+ setup=all_setup+'G.add_edge(u,v)'
+ if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
+ setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
+ if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
+ b.run()
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/tests/test.py b/lib/python2.7/site-packages/setoolsgui/networkx/tests/test.py
new file mode 100755
index 0000000..e776d32
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/tests/test.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+import sys
+from os import path,getcwd
+
+def run(verbosity=1,doctest=False,numpy=True):
+ """Run NetworkX tests.
+
+ Parameters
+ ----------
+ verbosity: integer, optional
+ Level of detail in test reports. Higher numbers provide more detail.
+
+ doctest: bool, optional
+ True to run doctests in code modules
+
+ numpy: bool, optional
+ True to test modules dependent on numpy
+ """
+ try:
+ import nose
+ except ImportError:
+ raise ImportError(\
+ "The nose package is needed to run the NetworkX tests.")
+
+ sys.stderr.write("Running NetworkX tests:")
+ nx_install_dir=path.join(path.dirname(__file__), path.pardir)
+ # stop if running from source directory
+ if getcwd() == path.abspath(path.join(nx_install_dir,path.pardir)):
+ raise RuntimeError("Can't run tests from source directory.\n"
+ "Run 'nosetests' from the command line.")
+
+ argv=[' ','--verbosity=%d'%verbosity,
+ '-w',nx_install_dir,
+ '-exe']
+ if doctest:
+ argv.extend(['--with-doctest','--doctest-extension=txt'])
+ if not numpy:
+ argv.extend(['-A not numpy'])
+
+
+ nose.run(argv=argv)
+
+if __name__=="__main__":
+ run()
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/tests/test_convert.py b/lib/python2.7/site-packages/setoolsgui/networkx/tests/test_convert.py
new file mode 100644
index 0000000..38a66e2
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/tests/test_convert.py
@@ -0,0 +1,224 @@
+#!/usr/bin/env python
+
+"""Convert
+=======
+"""
+
+from nose.tools import *
+from networkx import *
+from networkx.convert import *
+from networkx.algorithms.operators import *
+from networkx.generators.classic import barbell_graph,cycle_graph
+
+class TestConvert():
+ def edgelists_equal(self,e1,e2):
+ return sorted(sorted(e) for e in e1)==sorted(sorted(e) for e in e2)
+
+
+
+ def test_simple_graphs(self):
+ for dest, source in [(to_dict_of_dicts, from_dict_of_dicts),
+ (to_dict_of_lists, from_dict_of_lists)]:
+ G=barbell_graph(10,3)
+ dod=dest(G)
+
+ # Dict of [dicts, lists]
+ GG=source(dod)
+ assert_equal(sorted(G.nodes()), sorted(GG.nodes()))
+ assert_equal(sorted(G.edges()), sorted(GG.edges()))
+ GW=to_networkx_graph(dod)
+ assert_equal(sorted(G.nodes()), sorted(GW.nodes()))
+ assert_equal(sorted(G.edges()), sorted(GW.edges()))
+ GI=Graph(dod)
+ assert_equal(sorted(G.nodes()), sorted(GI.nodes()))
+ assert_equal(sorted(G.edges()), sorted(GI.edges()))
+
+ # With nodelist keyword
+ P4=path_graph(4)
+ P3=path_graph(3)
+ dod=dest(P4,nodelist=[0,1,2])
+ Gdod=Graph(dod)
+ assert_equal(sorted(Gdod.nodes()), sorted(P3.nodes()))
+ assert_equal(sorted(Gdod.edges()), sorted(P3.edges()))
+
+ def test_digraphs(self):
+ for dest, source in [(to_dict_of_dicts, from_dict_of_dicts),
+ (to_dict_of_lists, from_dict_of_lists)]:
+ G=cycle_graph(10)
+
+ # Dict of [dicts, lists]
+ dod=dest(G)
+ GG=source(dod)
+ assert_equal(sorted(G.nodes()), sorted(GG.nodes()))
+ assert_equal(sorted(G.edges()), sorted(GG.edges()))
+ GW=to_networkx_graph(dod)
+ assert_equal(sorted(G.nodes()), sorted(GW.nodes()))
+ assert_equal(sorted(G.edges()), sorted(GW.edges()))
+ GI=Graph(dod)
+ assert_equal(sorted(G.nodes()), sorted(GI.nodes()))
+ assert_equal(sorted(G.edges()), sorted(GI.edges()))
+
+ G=cycle_graph(10,create_using=DiGraph())
+ dod=dest(G)
+ GG=source(dod, create_using=DiGraph())
+ assert_equal(sorted(G.nodes()), sorted(GG.nodes()))
+ assert_equal(sorted(G.edges()), sorted(GG.edges()))
+ GW=to_networkx_graph(dod, create_using=DiGraph())
+ assert_equal(sorted(G.nodes()), sorted(GW.nodes()))
+ assert_equal(sorted(G.edges()), sorted(GW.edges()))
+ GI=DiGraph(dod)
+ assert_equal(sorted(G.nodes()), sorted(GI.nodes()))
+ assert_equal(sorted(G.edges()), sorted(GI.edges()))
+
+ def test_graph(self):
+ G=cycle_graph(10)
+ e=G.edges()
+ source=[u for u,v in e]
+ dest=[v for u,v in e]
+ ex=zip(source,dest,source)
+ G=Graph()
+ G.add_weighted_edges_from(ex)
+
+ # Dict of dicts
+ dod=to_dict_of_dicts(G)
+ GG=from_dict_of_dicts(dod,create_using=Graph())
+ assert_equal(sorted(G.nodes()), sorted(GG.nodes()))
+ assert_equal(sorted(G.edges()), sorted(GG.edges()))
+ GW=to_networkx_graph(dod,create_using=Graph())
+ assert_equal(sorted(G.nodes()), sorted(GW.nodes()))
+ assert_equal(sorted(G.edges()), sorted(GW.edges()))
+ GI=Graph(dod)
+ assert_equal(sorted(G.nodes()), sorted(GI.nodes()))
+ assert_equal(sorted(G.edges()), sorted(GI.edges()))
+
+ # Dict of lists
+ dol=to_dict_of_lists(G)
+ GG=from_dict_of_lists(dol,create_using=Graph())
+ # dict of lists throws away edge data so set it to none
+ enone=[(u,v,{}) for (u,v,d) in G.edges(data=True)]
+ assert_equal(sorted(G.nodes()), sorted(GG.nodes()))
+ assert_equal(enone, sorted(GG.edges(data=True)))
+ GW=to_networkx_graph(dol,create_using=Graph())
+ assert_equal(sorted(G.nodes()), sorted(GW.nodes()))
+ assert_equal(enone, sorted(GW.edges(data=True)))
+ GI=Graph(dol)
+ assert_equal(sorted(G.nodes()), sorted(GI.nodes()))
+ assert_equal(enone, sorted(GI.edges(data=True)))
+
+
+ def test_with_multiedges_self_loops(self):
+ G=cycle_graph(10)
+ e=G.edges()
+ source,dest = list(zip(*e))
+ ex=list(zip(source,dest,source))
+ XG=Graph()
+ XG.add_weighted_edges_from(ex)
+ XGM=MultiGraph()
+ XGM.add_weighted_edges_from(ex)
+ XGM.add_edge(0,1,weight=2) # multiedge
+ XGS=Graph()
+ XGS.add_weighted_edges_from(ex)
+ XGS.add_edge(0,0,weight=100) # self loop
+
+ # Dict of dicts
+ # with self loops, OK
+ dod=to_dict_of_dicts(XGS)
+ GG=from_dict_of_dicts(dod,create_using=Graph())
+ assert_equal(sorted(XGS.nodes()), sorted(GG.nodes()))
+ assert_equal(sorted(XGS.edges()), sorted(GG.edges()))
+ GW=to_networkx_graph(dod,create_using=Graph())
+ assert_equal(sorted(XGS.nodes()), sorted(GW.nodes()))
+ assert_equal(sorted(XGS.edges()), sorted(GW.edges()))
+ GI=Graph(dod)
+ assert_equal(sorted(XGS.nodes()), sorted(GI.nodes()))
+ assert_equal(sorted(XGS.edges()), sorted(GI.edges()))
+
+ # Dict of lists
+ # with self loops, OK
+ dol=to_dict_of_lists(XGS)
+ GG=from_dict_of_lists(dol,create_using=Graph())
+ # dict of lists throws away edge data so set it to none
+ enone=[(u,v,{}) for (u,v,d) in XGS.edges(data=True)]
+ assert_equal(sorted(XGS.nodes()), sorted(GG.nodes()))
+ assert_equal(enone, sorted(GG.edges(data=True)))
+ GW=to_networkx_graph(dol,create_using=Graph())
+ assert_equal(sorted(XGS.nodes()), sorted(GW.nodes()))
+ assert_equal(enone, sorted(GW.edges(data=True)))
+ GI=Graph(dol)
+ assert_equal(sorted(XGS.nodes()), sorted(GI.nodes()))
+ assert_equal(enone, sorted(GI.edges(data=True)))
+
+ # Dict of dicts
+ # with multiedges, OK
+ dod=to_dict_of_dicts(XGM)
+ GG=from_dict_of_dicts(dod,create_using=MultiGraph(),
+ multigraph_input=True)
+ assert_equal(sorted(XGM.nodes()), sorted(GG.nodes()))
+ assert_equal(sorted(XGM.edges()), sorted(GG.edges()))
+ GW=to_networkx_graph(dod,create_using=MultiGraph(),multigraph_input=True)
+ assert_equal(sorted(XGM.nodes()), sorted(GW.nodes()))
+ assert_equal(sorted(XGM.edges()), sorted(GW.edges()))
+ GI=MultiGraph(dod) # convert can't tell whether to duplicate edges!
+ assert_equal(sorted(XGM.nodes()), sorted(GI.nodes()))
+ #assert_not_equal(sorted(XGM.edges()), sorted(GI.edges()))
+ assert_false(sorted(XGM.edges()) == sorted(GI.edges()))
+ GE=from_dict_of_dicts(dod,create_using=MultiGraph(),
+ multigraph_input=False)
+ assert_equal(sorted(XGM.nodes()), sorted(GE.nodes()))
+ assert_not_equal(sorted(XGM.edges()), sorted(GE.edges()))
+ GI=MultiGraph(XGM)
+ assert_equal(sorted(XGM.nodes()), sorted(GI.nodes()))
+ assert_equal(sorted(XGM.edges()), sorted(GI.edges()))
+ GM=MultiGraph(G)
+ assert_equal(sorted(GM.nodes()), sorted(G.nodes()))
+ assert_equal(sorted(GM.edges()), sorted(G.edges()))
+
+ # Dict of lists
+ # with multiedges, OK, but better write as DiGraph else you'll
+ # get double edges
+ dol=to_dict_of_lists(G)
+ GG=from_dict_of_lists(dol,create_using=MultiGraph())
+ assert_equal(sorted(G.nodes()), sorted(GG.nodes()))
+ assert_equal(sorted(G.edges()), sorted(GG.edges()))
+ GW=to_networkx_graph(dol,create_using=MultiGraph())
+ assert_equal(sorted(G.nodes()), sorted(GW.nodes()))
+ assert_equal(sorted(G.edges()), sorted(GW.edges()))
+ GI=MultiGraph(dol)
+ assert_equal(sorted(G.nodes()), sorted(GI.nodes()))
+ assert_equal(sorted(G.edges()), sorted(GI.edges()))
+
+ def test_edgelists(self):
+ P=path_graph(4)
+ e=[(0,1),(1,2),(2,3)]
+ G=Graph(e)
+ assert_equal(sorted(G.nodes()), sorted(P.nodes()))
+ assert_equal(sorted(G.edges()), sorted(P.edges()))
+ assert_equal(sorted(G.edges(data=True)), sorted(P.edges(data=True)))
+
+ e=[(0,1,{}),(1,2,{}),(2,3,{})]
+ G=Graph(e)
+ assert_equal(sorted(G.nodes()), sorted(P.nodes()))
+ assert_equal(sorted(G.edges()), sorted(P.edges()))
+ assert_equal(sorted(G.edges(data=True)), sorted(P.edges(data=True)))
+
+ e=((n,n+1) for n in range(3))
+ G=Graph(e)
+ assert_equal(sorted(G.nodes()), sorted(P.nodes()))
+ assert_equal(sorted(G.edges()), sorted(P.edges()))
+ assert_equal(sorted(G.edges(data=True)), sorted(P.edges(data=True)))
+
+ def test_directed_to_undirected(self):
+ edges1 = [(0, 1), (1, 2), (2, 0)]
+ edges2 = [(0, 1), (1, 2), (0, 2)]
+ assert_true(self.edgelists_equal(nx.Graph(nx.DiGraph(edges1)).edges(),edges1))
+ assert_true(self.edgelists_equal(nx.Graph(nx.DiGraph(edges2)).edges(),edges1))
+ assert_true(self.edgelists_equal(nx.MultiGraph(nx.DiGraph(edges1)).edges(),edges1))
+ assert_true(self.edgelists_equal(nx.MultiGraph(nx.DiGraph(edges2)).edges(),edges1))
+
+ assert_true(self.edgelists_equal(nx.MultiGraph(nx.MultiDiGraph(edges1)).edges(),
+ edges1))
+ assert_true(self.edgelists_equal(nx.MultiGraph(nx.MultiDiGraph(edges2)).edges(),
+ edges1))
+
+ assert_true(self.edgelists_equal(nx.Graph(nx.MultiDiGraph(edges1)).edges(),edges1))
+ assert_true(self.edgelists_equal(nx.Graph(nx.MultiDiGraph(edges2)).edges(),edges1))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/tests/test_convert_numpy.py b/lib/python2.7/site-packages/setoolsgui/networkx/tests/test_convert_numpy.py
new file mode 100644
index 0000000..2a4ef96
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/tests/test_convert_numpy.py
@@ -0,0 +1,172 @@
+from nose import SkipTest
+from nose.tools import assert_raises, assert_true, assert_equal
+
+import networkx as nx
+from networkx.generators.classic import barbell_graph,cycle_graph,path_graph
+
+class TestConvertNumpy(object):
+ numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
+ @classmethod
+ def setupClass(cls):
+ global np
+ global np_assert_equal
+ try:
+ import numpy as np
+ np_assert_equal=np.testing.assert_equal
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+ def __init__(self):
+ self.G1 = barbell_graph(10, 3)
+ self.G2 = cycle_graph(10, create_using=nx.DiGraph())
+
+ self.G3 = self.create_weighted(nx.Graph())
+ self.G4 = self.create_weighted(nx.DiGraph())
+
+ def create_weighted(self, G):
+ g = cycle_graph(4)
+ e = g.edges()
+ source = [u for u,v in e]
+ dest = [v for u,v in e]
+ weight = [s+10 for s in source]
+ ex = zip(source, dest, weight)
+ G.add_weighted_edges_from(ex)
+ return G
+
+ def assert_equal(self, G1, G2):
+ assert_true( sorted(G1.nodes())==sorted(G2.nodes()) )
+ assert_true( sorted(G1.edges())==sorted(G2.edges()) )
+
+ def identity_conversion(self, G, A, create_using):
+ GG = nx.from_numpy_matrix(A, create_using=create_using)
+ self.assert_equal(G, GG)
+ GW = nx.to_networkx_graph(A, create_using=create_using)
+ self.assert_equal(G, GW)
+ GI = create_using.__class__(A)
+ self.assert_equal(G, GI)
+
+ def test_shape(self):
+ "Conversion from non-square array."
+ A=np.array([[1,2,3],[4,5,6]])
+ assert_raises(nx.NetworkXError, nx.from_numpy_matrix, A)
+
+ def test_identity_graph_matrix(self):
+ "Conversion from graph to matrix to graph."
+ A = nx.to_numpy_matrix(self.G1)
+ self.identity_conversion(self.G1, A, nx.Graph())
+
+ def test_identity_graph_array(self):
+ "Conversion from graph to array to graph."
+ A = nx.to_numpy_matrix(self.G1)
+ A = np.asarray(A)
+ self.identity_conversion(self.G1, A, nx.Graph())
+
+ def test_identity_digraph_matrix(self):
+ """Conversion from digraph to matrix to digraph."""
+ A = nx.to_numpy_matrix(self.G2)
+ self.identity_conversion(self.G2, A, nx.DiGraph())
+
+ def test_identity_digraph_array(self):
+ """Conversion from digraph to array to digraph."""
+ A = nx.to_numpy_matrix(self.G2)
+ A = np.asarray(A)
+ self.identity_conversion(self.G2, A, nx.DiGraph())
+
+ def test_identity_weighted_graph_matrix(self):
+ """Conversion from weighted graph to matrix to weighted graph."""
+ A = nx.to_numpy_matrix(self.G3)
+ self.identity_conversion(self.G3, A, nx.Graph())
+
+ def test_identity_weighted_graph_array(self):
+ """Conversion from weighted graph to array to weighted graph."""
+ A = nx.to_numpy_matrix(self.G3)
+ A = np.asarray(A)
+ self.identity_conversion(self.G3, A, nx.Graph())
+
+ def test_identity_weighted_digraph_matrix(self):
+ """Conversion from weighted digraph to matrix to weighted digraph."""
+ A = nx.to_numpy_matrix(self.G4)
+ self.identity_conversion(self.G4, A, nx.DiGraph())
+
+ def test_identity_weighted_digraph_array(self):
+ """Conversion from weighted digraph to array to weighted digraph."""
+ A = nx.to_numpy_matrix(self.G4)
+ A = np.asarray(A)
+ self.identity_conversion(self.G4, A, nx.DiGraph())
+
+ def test_nodelist(self):
+ """Conversion from graph to matrix to graph with nodelist."""
+ P4 = path_graph(4)
+ P3 = path_graph(3)
+ nodelist = P3.nodes()
+ A = nx.to_numpy_matrix(P4, nodelist=nodelist)
+ GA = nx.Graph(A)
+ self.assert_equal(GA, P3)
+
+ # Make nodelist ambiguous by containing duplicates.
+ nodelist += [nodelist[0]]
+ assert_raises(nx.NetworkXError, nx.to_numpy_matrix, P3, nodelist=nodelist)
+
+ def test_weight_keyword(self):
+ WP4 = nx.Graph()
+ WP4.add_edges_from( (n,n+1,dict(weight=0.5,other=0.3)) for n in range(3) )
+ P4 = path_graph(4)
+ A = nx.to_numpy_matrix(P4)
+ np_assert_equal(A, nx.to_numpy_matrix(WP4,weight=None))
+ np_assert_equal(0.5*A, nx.to_numpy_matrix(WP4))
+ np_assert_equal(0.3*A, nx.to_numpy_matrix(WP4,weight='other'))
+
+ def test_from_numpy_matrix_type(self):
+ A=np.matrix([[1]])
+ G=nx.from_numpy_matrix(A)
+ assert_equal(type(G[0][0]['weight']),int)
+
+ A=np.matrix([[1]]).astype(np.float)
+ G=nx.from_numpy_matrix(A)
+ assert_equal(type(G[0][0]['weight']),float)
+
+ A=np.matrix([[1]]).astype(np.str)
+ G=nx.from_numpy_matrix(A)
+ assert_equal(type(G[0][0]['weight']),str)
+
+ A=np.matrix([[1]]).astype(np.bool)
+ G=nx.from_numpy_matrix(A)
+ assert_equal(type(G[0][0]['weight']),bool)
+
+ A=np.matrix([[1]]).astype(np.complex)
+ G=nx.from_numpy_matrix(A)
+ assert_equal(type(G[0][0]['weight']),complex)
+
+ A=np.matrix([[1]]).astype(np.object)
+ assert_raises(TypeError,nx.from_numpy_matrix,A)
+
+ def test_from_numpy_matrix_dtype(self):
+ dt=[('weight',float),('cost',int)]
+ A=np.matrix([[(1.0,2)]],dtype=dt)
+ G=nx.from_numpy_matrix(A)
+ assert_equal(type(G[0][0]['weight']),float)
+ assert_equal(type(G[0][0]['cost']),int)
+ assert_equal(G[0][0]['cost'],2)
+ assert_equal(G[0][0]['weight'],1.0)
+
+ def test_to_numpy_recarray(self):
+ G=nx.Graph()
+ G.add_edge(1,2,weight=7.0,cost=5)
+ A=nx.to_numpy_recarray(G,dtype=[('weight',float),('cost',int)])
+ assert_equal(sorted(A.dtype.names),['cost','weight'])
+ assert_equal(A.weight[0,1],7.0)
+ assert_equal(A.weight[0,0],0.0)
+ assert_equal(A.cost[0,1],5)
+ assert_equal(A.cost[0,0],0)
+
+ def test_numpy_multigraph(self):
+ G=nx.MultiGraph()
+ G.add_edge(1,2,weight=7)
+ G.add_edge(1,2,weight=70)
+ A=nx.to_numpy_matrix(G)
+ assert_equal(A[1,0],77)
+ A=nx.to_numpy_matrix(G,multigraph_weight=min)
+ assert_equal(A[1,0],7)
+ A=nx.to_numpy_matrix(G,multigraph_weight=max)
+ assert_equal(A[1,0],70)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/tests/test_convert_scipy.py b/lib/python2.7/site-packages/setoolsgui/networkx/tests/test_convert_scipy.py
new file mode 100644
index 0000000..f90dee7
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/tests/test_convert_scipy.py
@@ -0,0 +1,179 @@
+from nose import SkipTest
+from nose.tools import assert_raises, assert_true, assert_equal, raises
+
+import networkx as nx
+from networkx.generators.classic import barbell_graph,cycle_graph,path_graph
+
+class TestConvertNumpy(object):
+ @classmethod
+ def setupClass(cls):
+ global np, sp, sparse, np_assert_equal
+ try:
+ import numpy as np
+ import scipy as sp
+ import scipy.sparse as sparse
+ np_assert_equal=np.testing.assert_equal
+ except ImportError:
+ raise SkipTest('SciPy sparse library not available.')
+
+ def __init__(self):
+ self.G1 = barbell_graph(10, 3)
+ self.G2 = cycle_graph(10, create_using=nx.DiGraph())
+
+ self.G3 = self.create_weighted(nx.Graph())
+ self.G4 = self.create_weighted(nx.DiGraph())
+
+ def create_weighted(self, G):
+ g = cycle_graph(4)
+ e = g.edges()
+ source = [u for u,v in e]
+ dest = [v for u,v in e]
+ weight = [s+10 for s in source]
+ ex = zip(source, dest, weight)
+ G.add_weighted_edges_from(ex)
+ return G
+
+ def assert_equal(self, G1, G2):
+ assert_true( sorted(G1.nodes())==sorted(G2.nodes()) )
+ assert_true( sorted(G1.edges())==sorted(G2.edges()) )
+
+ def identity_conversion(self, G, A, create_using):
+ GG = nx.from_scipy_sparse_matrix(A, create_using=create_using)
+ self.assert_equal(G, GG)
+
+ GW = nx.to_networkx_graph(A, create_using=create_using)
+ self.assert_equal(G, GW)
+
+ GI = create_using.__class__(A)
+ self.assert_equal(G, GI)
+
+ ACSR = A.tocsr()
+ GI = create_using.__class__(ACSR)
+ self.assert_equal(G, GI)
+
+ ACOO = A.tocoo()
+ GI = create_using.__class__(ACOO)
+ self.assert_equal(G, GI)
+
+ ACSC = A.tocsc()
+ GI = create_using.__class__(ACSC)
+ self.assert_equal(G, GI)
+
+ AD = A.todense()
+ GI = create_using.__class__(AD)
+ self.assert_equal(G, GI)
+
+ AA = A.toarray()
+ GI = create_using.__class__(AA)
+ self.assert_equal(G, GI)
+
+ def test_shape(self):
+ "Conversion from non-square sparse array."
+ A = sp.sparse.lil_matrix([[1,2,3],[4,5,6]])
+ assert_raises(nx.NetworkXError, nx.from_scipy_sparse_matrix, A)
+
+ def test_identity_graph_matrix(self):
+ "Conversion from graph to sparse matrix to graph."
+ A = nx.to_scipy_sparse_matrix(self.G1)
+ self.identity_conversion(self.G1, A, nx.Graph())
+
+ def test_identity_digraph_matrix(self):
+ "Conversion from digraph to sparse matrix to digraph."
+ A = nx.to_scipy_sparse_matrix(self.G2)
+ self.identity_conversion(self.G2, A, nx.DiGraph())
+
+ def test_identity_weighted_graph_matrix(self):
+ """Conversion from weighted graph to sparse matrix to weighted graph."""
+ A = nx.to_scipy_sparse_matrix(self.G3)
+ self.identity_conversion(self.G3, A, nx.Graph())
+
+ def test_identity_weighted_digraph_matrix(self):
+ """Conversion from weighted digraph to sparse matrix to weighted digraph."""
+ A = nx.to_scipy_sparse_matrix(self.G4)
+ self.identity_conversion(self.G4, A, nx.DiGraph())
+
+ def test_nodelist(self):
+ """Conversion from graph to sparse matrix to graph with nodelist."""
+ P4 = path_graph(4)
+ P3 = path_graph(3)
+ nodelist = P3.nodes()
+ A = nx.to_scipy_sparse_matrix(P4, nodelist=nodelist)
+ GA = nx.Graph(A)
+ self.assert_equal(GA, P3)
+
+ # Make nodelist ambiguous by containing duplicates.
+ nodelist += [nodelist[0]]
+ assert_raises(nx.NetworkXError, nx.to_numpy_matrix, P3,
+ nodelist=nodelist)
+
+ def test_weight_keyword(self):
+ WP4 = nx.Graph()
+ WP4.add_edges_from( (n,n+1,dict(weight=0.5,other=0.3))
+ for n in range(3) )
+ P4 = path_graph(4)
+ A = nx.to_scipy_sparse_matrix(P4)
+ np_assert_equal(A.todense(),
+ nx.to_scipy_sparse_matrix(WP4,weight=None).todense())
+ np_assert_equal(0.5*A.todense(),
+ nx.to_scipy_sparse_matrix(WP4).todense())
+ np_assert_equal(0.3*A.todense(),
+ nx.to_scipy_sparse_matrix(WP4,weight='other').todense())
+
+ def test_format_keyword(self):
+ WP4 = nx.Graph()
+ WP4.add_edges_from( (n,n+1,dict(weight=0.5,other=0.3))
+ for n in range(3) )
+ P4 = path_graph(4)
+ A = nx.to_scipy_sparse_matrix(P4, format='csr')
+ np_assert_equal(A.todense(),
+ nx.to_scipy_sparse_matrix(WP4,weight=None).todense())
+
+ A = nx.to_scipy_sparse_matrix(P4, format='csc')
+ np_assert_equal(A.todense(),
+ nx.to_scipy_sparse_matrix(WP4,weight=None).todense())
+
+ A = nx.to_scipy_sparse_matrix(P4, format='coo')
+ np_assert_equal(A.todense(),
+ nx.to_scipy_sparse_matrix(WP4,weight=None).todense())
+
+ A = nx.to_scipy_sparse_matrix(P4, format='bsr')
+ np_assert_equal(A.todense(),
+ nx.to_scipy_sparse_matrix(WP4,weight=None).todense())
+
+ A = nx.to_scipy_sparse_matrix(P4, format='lil')
+ np_assert_equal(A.todense(),
+ nx.to_scipy_sparse_matrix(WP4,weight=None).todense())
+
+ A = nx.to_scipy_sparse_matrix(P4, format='dia')
+ np_assert_equal(A.todense(),
+ nx.to_scipy_sparse_matrix(WP4,weight=None).todense())
+
+ A = nx.to_scipy_sparse_matrix(P4, format='dok')
+ np_assert_equal(A.todense(),
+ nx.to_scipy_sparse_matrix(WP4,weight=None).todense())
+
+ @raises(nx.NetworkXError)
+ def test_format_keyword_fail(self):
+ WP4 = nx.Graph()
+ WP4.add_edges_from( (n,n+1,dict(weight=0.5,other=0.3))
+ for n in range(3) )
+ P4 = path_graph(4)
+ nx.to_scipy_sparse_matrix(P4, format='any_other')
+
+ @raises(nx.NetworkXError)
+ def test_null_fail(self):
+ nx.to_scipy_sparse_matrix(nx.Graph())
+
+ def test_empty(self):
+ G = nx.Graph()
+ G.add_node(1)
+ M = nx.to_scipy_sparse_matrix(G)
+ np_assert_equal(M.todense(), np.matrix([[0]]))
+
+ def test_ordering(self):
+ G = nx.DiGraph()
+ G.add_edge(1,2)
+ G.add_edge(2,3)
+ G.add_edge(3,1)
+ M = nx.to_scipy_sparse_matrix(G,nodelist=[3,2,1])
+ np_assert_equal(M.todense(), np.matrix([[0,0,1],[1,0,0],[0,1,0]]))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/tests/test_exceptions.py b/lib/python2.7/site-packages/setoolsgui/networkx/tests/test_exceptions.py
new file mode 100644
index 0000000..796fdd2
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/tests/test_exceptions.py
@@ -0,0 +1,33 @@
+from nose.tools import raises
+import networkx as nx
+
+# smoke tests for exceptions
+
+@raises(nx.NetworkXException)
+def test_raises_networkx_exception():
+ raise nx.NetworkXException
+
+@raises(nx.NetworkXError)
+def test_raises_networkx_error():
+ raise nx.NetworkXError
+
+@raises(nx.NetworkXPointlessConcept)
+def test_raises_networkx_pointless_concept():
+ raise nx.NetworkXPointlessConcept
+
+@raises(nx.NetworkXAlgorithmError)
+def test_raises_networkx_algorithm_error():
+ raise nx.NetworkXAlgorithmError
+
+@raises(nx.NetworkXUnfeasible)
+def test_raises_networkx_unfeasible():
+ raise nx.NetworkXUnfeasible
+
+@raises(nx.NetworkXNoPath)
+def test_raises_networkx_no_path():
+ raise nx.NetworkXNoPath
+
+@raises(nx.NetworkXUnbounded)
+def test_raises_networkx_unbounded():
+ raise nx.NetworkXUnbounded
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/tests/test_relabel.py b/lib/python2.7/site-packages/setoolsgui/networkx/tests/test_relabel.py
new file mode 100644
index 0000000..e9dd2af
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/tests/test_relabel.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python
+from nose.tools import *
+from networkx import *
+from networkx.convert import *
+from networkx.algorithms.operators import *
+from networkx.generators.classic import barbell_graph,cycle_graph
+from networkx.testing import *
+
+class TestRelabel():
+ def test_convert_node_labels_to_integers(self):
+ # test that empty graph converts fine for all options
+ G=empty_graph()
+ H=convert_node_labels_to_integers(G,100)
+ assert_equal(H.name, '(empty_graph(0))_with_int_labels')
+ assert_equal(H.nodes(), [])
+ assert_equal(H.edges(), [])
+
+ for opt in ["default", "sorted", "increasing degree",
+ "decreasing degree"]:
+ G=empty_graph()
+ H=convert_node_labels_to_integers(G,100, ordering=opt)
+ assert_equal(H.name, '(empty_graph(0))_with_int_labels')
+ assert_equal(H.nodes(), [])
+ assert_equal(H.edges(), [])
+
+ G=empty_graph()
+ G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')])
+ G.name="paw"
+ H=convert_node_labels_to_integers(G)
+ degH=H.degree().values()
+ degG=G.degree().values()
+ assert_equal(sorted(degH), sorted(degG))
+
+ H=convert_node_labels_to_integers(G,1000)
+ degH=H.degree().values()
+ degG=G.degree().values()
+ assert_equal(sorted(degH), sorted(degG))
+ assert_equal(H.nodes(), [1000, 1001, 1002, 1003])
+
+ H=convert_node_labels_to_integers(G,ordering="increasing degree")
+ degH=H.degree().values()
+ degG=G.degree().values()
+ assert_equal(sorted(degH), sorted(degG))
+ assert_equal(degree(H,0), 1)
+ assert_equal(degree(H,1), 2)
+ assert_equal(degree(H,2), 2)
+ assert_equal(degree(H,3), 3)
+
+ H=convert_node_labels_to_integers(G,ordering="decreasing degree")
+ degH=H.degree().values()
+ degG=G.degree().values()
+ assert_equal(sorted(degH), sorted(degG))
+ assert_equal(degree(H,0), 3)
+ assert_equal(degree(H,1), 2)
+ assert_equal(degree(H,2), 2)
+ assert_equal(degree(H,3), 1)
+
+ H=convert_node_labels_to_integers(G,ordering="increasing degree",
+ label_attribute='label')
+ degH=H.degree().values()
+ degG=G.degree().values()
+ assert_equal(sorted(degH), sorted(degG))
+ assert_equal(degree(H,0), 1)
+ assert_equal(degree(H,1), 2)
+ assert_equal(degree(H,2), 2)
+ assert_equal(degree(H,3), 3)
+
+ # check mapping
+ assert_equal(H.node[3]['label'],'C')
+ assert_equal(H.node[0]['label'],'D')
+ assert_true(H.node[1]['label']=='A' or H.node[2]['label']=='A')
+ assert_true(H.node[1]['label']=='B' or H.node[2]['label']=='B')
+
+ def test_convert_to_integers2(self):
+ G=empty_graph()
+ G.add_edges_from([('C','D'),('A','B'),('A','C'),('B','C')])
+ G.name="paw"
+ H=convert_node_labels_to_integers(G,ordering="sorted")
+ degH=H.degree().values()
+ degG=G.degree().values()
+ assert_equal(sorted(degH), sorted(degG))
+
+ H=convert_node_labels_to_integers(G,ordering="sorted",
+ label_attribute='label')
+ assert_equal(H.node[0]['label'],'A')
+ assert_equal(H.node[1]['label'],'B')
+ assert_equal(H.node[2]['label'],'C')
+ assert_equal(H.node[3]['label'],'D')
+
+ @raises(nx.NetworkXError)
+ def test_convert_to_integers_raise(self):
+ G = nx.Graph()
+ H=convert_node_labels_to_integers(G,ordering="increasing age")
+
+
+ def test_relabel_nodes_copy(self):
+ G=empty_graph()
+ G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')])
+ mapping={'A':'aardvark','B':'bear','C':'cat','D':'dog'}
+ H=relabel_nodes(G,mapping)
+ assert_equal(sorted(H.nodes()), ['aardvark', 'bear', 'cat', 'dog'])
+
+ def test_relabel_nodes_function(self):
+ G=empty_graph()
+ G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')])
+ # function mapping no longer encouraged but works
+ def mapping(n):
+ return ord(n)
+ H=relabel_nodes(G,mapping)
+ assert_equal(sorted(H.nodes()), [65, 66, 67, 68])
+
+ def test_relabel_nodes_graph(self):
+ G=Graph([('A','B'),('A','C'),('B','C'),('C','D')])
+ mapping={'A':'aardvark','B':'bear','C':'cat','D':'dog'}
+ H=relabel_nodes(G,mapping)
+ assert_equal(sorted(H.nodes()), ['aardvark', 'bear', 'cat', 'dog'])
+
+ def test_relabel_nodes_digraph(self):
+ G=DiGraph([('A','B'),('A','C'),('B','C'),('C','D')])
+ mapping={'A':'aardvark','B':'bear','C':'cat','D':'dog'}
+ H=relabel_nodes(G,mapping,copy=False)
+ assert_equal(sorted(H.nodes()), ['aardvark', 'bear', 'cat', 'dog'])
+
+ def test_relabel_nodes_multigraph(self):
+ G=MultiGraph([('a','b'),('a','b')])
+ mapping={'a':'aardvark','b':'bear'}
+ G=relabel_nodes(G,mapping,copy=False)
+ assert_equal(sorted(G.nodes()), ['aardvark', 'bear'])
+ assert_edges_equal(sorted(G.edges()),
+ [('aardvark', 'bear'), ('aardvark', 'bear')])
+
+ def test_relabel_nodes_multidigraph(self):
+ G=MultiDiGraph([('a','b'),('a','b')])
+ mapping={'a':'aardvark','b':'bear'}
+ G=relabel_nodes(G,mapping,copy=False)
+ assert_equal(sorted(G.nodes()), ['aardvark', 'bear'])
+ assert_equal(sorted(G.edges()),
+ [('aardvark', 'bear'), ('aardvark', 'bear')])
+
+ @raises(KeyError)
+ def test_relabel_nodes_missing(self):
+ G=Graph([('A','B'),('A','C'),('B','C'),('C','D')])
+ mapping={0:'aardvark'}
+ G=relabel_nodes(G,mapping,copy=False)
+
+
+ def test_relabel_toposort(self):
+ K5=nx.complete_graph(4)
+ G=nx.complete_graph(4)
+ G=nx.relabel_nodes(G,dict( [(i,i+1) for i in range(4)]),copy=False)
+ nx.is_isomorphic(K5,G)
+ G=nx.complete_graph(4)
+ G=nx.relabel_nodes(G,dict( [(i,i-1) for i in range(4)]),copy=False)
+ nx.is_isomorphic(K5,G)
+
+
+ def test_relabel_selfloop(self):
+ G = nx.DiGraph([(1, 1), (1, 2), (2, 3)])
+ G = nx.relabel_nodes(G, {1: 'One', 2: 'Two', 3: 'Three'}, copy=False)
+ assert_equal(sorted(G.nodes()),['One','Three','Two'])
+ G = nx.MultiDiGraph([(1, 1), (1, 2), (2, 3)])
+ G = nx.relabel_nodes(G, {1: 'One', 2: 'Two', 3: 'Three'}, copy=False)
+ assert_equal(sorted(G.nodes()),['One','Three','Two'])
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/utils/__init__.py b/lib/python2.7/site-packages/setoolsgui/networkx/utils/__init__.py
new file mode 100644
index 0000000..d443064
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/utils/__init__.py
@@ -0,0 +1,5 @@
+from networkx.utils.misc import *
+from networkx.utils.decorators import *
+from networkx.utils.random_sequence import *
+from networkx.utils.union_find import *
+from networkx.utils.rcm import *
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/utils/decorators.py b/lib/python2.7/site-packages/setoolsgui/networkx/utils/decorators.py
new file mode 100644
index 0000000..def1548
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/utils/decorators.py
@@ -0,0 +1,270 @@
+import sys
+
+from collections import defaultdict
+from os.path import splitext
+
+import networkx as nx
+from networkx.external.decorator import decorator
+from networkx.utils import is_string_like
+
+def not_implemented_for(*graph_types):
+ """Decorator to mark algorithms as not implemented
+
+ Parameters
+ ----------
+ graph_types : container of strings
+ Entries must be one of 'directed','undirected', 'multigraph', 'graph'.
+
+ Returns
+ -------
+ _require : function
+ The decorated function.
+
+ Raises
+ ------
+ NetworkXNotImplemnted
+ If any of the packages cannot be imported
+
+ Notes
+ -----
+ Multiple types are joined logically with "and".
+ For "or" use multiple @not_implemented_for() lines.
+
+ Examples
+ --------
+ Decorate functions like this::
+
+ @not_implemnted_for('directed')
+ def sp_function():
+ pass
+
+ @not_implemnted_for('directed','multigraph')
+ def sp_np_function():
+ pass
+ """
+ @decorator
+ def _not_implemented_for(f,*args,**kwargs):
+ graph = args[0]
+ terms= {'directed':graph.is_directed(),
+ 'undirected':not graph.is_directed(),
+ 'multigraph':graph.is_multigraph(),
+ 'graph':not graph.is_multigraph()}
+ match = True
+ try:
+ for t in graph_types:
+ match = match and terms[t]
+ except KeyError:
+ raise KeyError('use one or more of ',
+ 'directed, undirected, multigraph, graph')
+ if match:
+ raise nx.NetworkXNotImplemented('not implemented for %s type'%
+ ' '.join(graph_types))
+ else:
+ return f(*args,**kwargs)
+ return _not_implemented_for
+
+
+def require(*packages):
+ """Decorator to check whether specific packages can be imported.
+
+ If a package cannot be imported, then NetworkXError is raised.
+ If all packages can be imported, then the original function is called.
+
+ Parameters
+ ----------
+ packages : container of strings
+ Container of module names that will be imported.
+
+ Returns
+ -------
+ _require : function
+ The decorated function.
+
+ Raises
+ ------
+ NetworkXError
+ If any of the packages cannot be imported
+
+ Examples
+ --------
+ Decorate functions like this::
+
+ @require('scipy')
+ def sp_function():
+ import scipy
+ pass
+
+ @require('numpy','scipy')
+ def sp_np_function():
+ import numpy
+ import scipy
+ pass
+ """
+ @decorator
+ def _require(f,*args,**kwargs):
+ for package in reversed(packages):
+ try:
+ __import__(package)
+ except:
+ msg = "{0} requires {1}"
+ raise nx.NetworkXError( msg.format(f.__name__, package) )
+ return f(*args,**kwargs)
+ return _require
+
+
+def _open_gz(path, mode):
+ import gzip
+ return gzip.open(path,mode=mode)
+
+def _open_bz2(path, mode):
+ import bz2
+ return bz2.BZ2File(path,mode=mode)
+
+# To handle new extensions, define a function accepting a `path` and `mode`.
+# Then add the extension to _dispatch_dict.
+_dispatch_dict = defaultdict(lambda : open)
+_dispatch_dict['.gz'] = _open_gz
+_dispatch_dict['.bz2'] = _open_bz2
+_dispatch_dict['.gzip'] = _open_gz
+
+
+def open_file(path_arg, mode='r'):
+ """Decorator to ensure clean opening and closing of files.
+
+ Parameters
+ ----------
+ path_arg : int
+ Location of the path argument in args. Even if the argument is a
+ named positional argument (with a default value), you must specify its
+ index as a positional argument.
+ mode : str
+ String for opening mode.
+
+ Returns
+ -------
+ _open_file : function
+ Function which cleanly executes the io.
+
+ Examples
+ --------
+ Decorate functions like this::
+
+ @open_file(0,'r')
+ def read_function(pathname):
+ pass
+
+ @open_file(1,'w')
+ def write_function(G,pathname):
+ pass
+
+ @open_file(1,'w')
+ def write_function(G, pathname='graph.dot')
+ pass
+
+ @open_file('path', 'w+')
+ def another_function(arg, **kwargs):
+ path = kwargs['path']
+ pass
+ """
+ # Note that this decorator solves the problem when a path argument is
+ # specified as a string, but it does not handle the situation when the
+ # function wants to accept a default of None (and then handle it).
+ # Here is an example:
+ #
+ # @open_file('path')
+ # def some_function(arg1, arg2, path=None):
+ # if path is None:
+ # fobj = tempfile.NamedTemporaryFile(delete=False)
+ # close_fobj = True
+ # else:
+ # # `path` could have been a string or file object or something
+ # # similar. In any event, the decorator has given us a file object
+ # # and it will close it for us, if it should.
+ # fobj = path
+ # close_fobj = False
+ #
+ # try:
+ # fobj.write('blah')
+ # finally:
+ # if close_fobj:
+ # fobj.close()
+ #
+ # Normally, we'd want to use "with" to ensure that fobj gets closed.
+ # However, recall that the decorator will make `path` a file object for
+ # us, and using "with" would undesirably close that file object. Instead,
+ # you use a try block, as shown above. When we exit the function, fobj will
+ # be closed, if it should be, by the decorator.
+
+ @decorator
+ def _open_file(func, *args, **kwargs):
+
+ # Note that since we have used @decorator, *args, and **kwargs have
+ # already been resolved to match the function signature of func. This
+ # means default values have been propagated. For example, the function
+ # func(x, y, a=1, b=2, **kwargs) if called as func(0,1,b=5,c=10) would
+ # have args=(0,1,1,5) and kwargs={'c':10}.
+
+ # First we parse the arguments of the decorator. The path_arg could
+ # be an positional argument or a keyword argument. Even if it is
+ try:
+ # path_arg is a required positional argument
+ # This works precisely because we are using @decorator
+ path = args[path_arg]
+ except TypeError:
+ # path_arg is a keyword argument. It is "required" in the sense
+ # that it must exist, according to the decorator specification,
+ # It can exist in `kwargs` by a developer specified default value
+ # or it could have been explicitly set by the user.
+ try:
+ path = kwargs[path_arg]
+ except KeyError:
+ # Could not find the keyword. Thus, no default was specified
+ # in the function signature and the user did not provide it.
+ msg = 'Missing required keyword argument: {0}'
+ raise nx.NetworkXError(msg.format(path_arg))
+ else:
+ is_kwarg = True
+ except IndexError:
+ # A "required" argument was missing. This can only happen if
+ # the decorator of the function was incorrectly specified.
+ # So this probably is not a user error, but a developer error.
+ msg = "path_arg of open_file decorator is incorrect"
+ raise nx.NetworkXError(msg)
+ else:
+ is_kwarg = False
+
+ # Now we have the path_arg. There are two types of input to consider:
+ # 1) string representing a path that should be opened
+ # 2) an already opened file object
+ if is_string_like(path):
+ ext = splitext(path)[1]
+ fobj = _dispatch_dict[ext](path, mode=mode)
+ close_fobj = True
+ elif hasattr(path, 'read'):
+ # path is already a file-like object
+ fobj = path
+ close_fobj = False
+ else:
+ # could be None, in which case the algorithm will deal with it
+ fobj = path
+ close_fobj = False
+
+ # Insert file object into args or kwargs.
+ if is_kwarg:
+ new_args = args
+ kwargs[path_arg] = fobj
+ else:
+ # args is a tuple, so we must convert to list before modifying it.
+ new_args = list(args)
+ new_args[path_arg] = fobj
+
+ # Finally, we call the original function, making sure to close the fobj.
+ try:
+ result = func(*new_args, **kwargs)
+ finally:
+ if close_fobj:
+ fobj.close()
+
+ return result
+
+ return _open_file
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/utils/misc.py b/lib/python2.7/site-packages/setoolsgui/networkx/utils/misc.py
new file mode 100644
index 0000000..a942753
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/utils/misc.py
@@ -0,0 +1,151 @@
+"""
+Miscellaneous Helpers for NetworkX.
+
+These are not imported into the base networkx namespace but
+can be accessed, for example, as
+
+>>> import networkx
+>>> networkx.utils.is_string_like('spam')
+True
+"""
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import sys
+import subprocess
+import uuid
+
+import networkx as nx
+from networkx.external.decorator import decorator
+
+__author__ = '\n'.join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Dan Schult(dschult@colgate.edu)',
+ 'Ben Edwards(bedwards@cs.unm.edu)'])
+### some cookbook stuff
+# used in deciding whether something is a bunch of nodes, edges, etc.
+# see G.add_nodes and others in Graph Class in networkx/base.py
+
+def is_string_like(obj): # from John Hunter, types-free version
+ """Check if obj is string."""
+ try:
+ obj + ''
+ except (TypeError, ValueError):
+ return False
+ return True
+
+def iterable(obj):
+ """ Return True if obj is iterable with a well-defined len()."""
+ if hasattr(obj,"__iter__"): return True
+ try:
+ len(obj)
+ except:
+ return False
+ return True
+
+def flatten(obj, result=None):
+ """ Return flattened version of (possibly nested) iterable object. """
+ if not iterable(obj) or is_string_like(obj):
+ return obj
+ if result is None:
+ result = []
+ for item in obj:
+ if not iterable(item) or is_string_like(item):
+ result.append(item)
+ else:
+ flatten(item, result)
+ return obj.__class__(result)
+
+def is_list_of_ints( intlist ):
+ """ Return True if list is a list of ints. """
+ if not isinstance(intlist,list): return False
+ for i in intlist:
+ if not isinstance(i,int): return False
+ return True
+
+def make_str(t):
+ """Return the string representation of t."""
+ if is_string_like(t): return t
+ return str(t)
+
+def cumulative_sum(numbers):
+ """Yield cumulative sum of numbers.
+
+ >>> import networkx.utils as utils
+ >>> list(utils.cumulative_sum([1,2,3,4]))
+ [1, 3, 6, 10]
+ """
+ csum = 0
+ for n in numbers:
+ csum += n
+ yield csum
+
+def generate_unique_node():
+ """ Generate a unique node label."""
+ return str(uuid.uuid1())
+
+def default_opener(filename):
+ """Opens `filename` using system's default program.
+
+ Parameters
+ ----------
+ filename : str
+ The path of the file to be opened.
+
+ """
+ cmds = {'darwin': ['open'],
+ 'linux2': ['xdg-open'],
+ 'win32': ['cmd.exe', '/C', 'start', '']}
+ cmd = cmds[sys.platform] + [filename]
+ subprocess.call(cmd)
+
+
+def dict_to_numpy_array(d,mapping=None):
+ """Convert a dictionary of dictionaries to a numpy array
+ with optional mapping."""
+ try:
+ return dict_to_numpy_array2(d, mapping)
+ except AttributeError:
+ return dict_to_numpy_array1(d,mapping)
+
+def dict_to_numpy_array2(d,mapping=None):
+ """Convert a dictionary of dictionaries to a 2d numpy array
+ with optional mapping."""
+ try:
+ import numpy
+ except ImportError:
+ raise ImportError(
+ "dict_to_numpy_array requires numpy : http://scipy.org/ ")
+ if mapping is None:
+ s=set(d.keys())
+ for k,v in d.items():
+ s.update(v.keys())
+ mapping=dict(zip(s,range(len(s))))
+ n=len(mapping)
+ a = numpy.zeros((n, n))
+ for k1, row in d.items():
+ for k2, value in row.items():
+ i=mapping[k1]
+ j=mapping[k2]
+ a[i,j] = value
+ return a
+
+def dict_to_numpy_array1(d,mapping=None):
+ """Convert a dictionary of numbers to a 1d numpy array
+ with optional mapping."""
+ try:
+ import numpy
+ except ImportError:
+ raise ImportError(
+ "dict_to_numpy_array requires numpy : http://scipy.org/ ")
+ if mapping is None:
+ s = set(d.keys())
+ mapping = dict(zip(s,range(len(s))))
+ n = len(mapping)
+ a = numpy.zeros(n)
+ for k1, value in d.items():
+ i = mapping[k1]
+ a[i] = value
+ return a
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/utils/random_sequence.py b/lib/python2.7/site-packages/setoolsgui/networkx/utils/random_sequence.py
new file mode 100644
index 0000000..a2f947d
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/utils/random_sequence.py
@@ -0,0 +1,222 @@
+"""
+Utilities for generating random numbers, random sequences, and
+random selections.
+"""
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import random
+import sys
+import networkx as nx
+__author__ = '\n'.join(['Aric Hagberg (hagberg@lanl.gov)',
+ 'Dan Schult(dschult@colgate.edu)',
+ 'Ben Edwards(bedwards@cs.unm.edu)'])
+
+def create_degree_sequence(n, sfunction=None, max_tries=50, **kwds):
+ """ Attempt to create a valid degree sequence of length n using
+ specified function sfunction(n,**kwds).
+
+ Parameters
+ ----------
+ n : int
+ Length of degree sequence = number of nodes
+ sfunction: function
+ Function which returns a list of n real or integer values.
+ Called as "sfunction(n,**kwds)".
+ max_tries: int
+ Max number of attempts at creating valid degree sequence.
+
+ Notes
+ -----
+ Repeatedly create a degree sequence by calling sfunction(n,**kwds)
+ until achieving a valid degree sequence. If unsuccessful after
+ max_tries attempts, raise an exception.
+
+ For examples of sfunctions that return sequences of random numbers,
+ see networkx.Utils.
+
+ Examples
+ --------
+ >>> from networkx.utils import uniform_sequence, create_degree_sequence
+ >>> seq=create_degree_sequence(10,uniform_sequence)
+ """
+ tries=0
+ max_deg=n
+ while tries < max_tries:
+ trialseq=sfunction(n,**kwds)
+ # round to integer values in the range [0,max_deg]
+ seq=[min(max_deg, max( int(round(s)),0 )) for s in trialseq]
+ # if graphical return, else throw away and try again
+ if nx.is_valid_degree_sequence(seq):
+ return seq
+ tries+=1
+ raise nx.NetworkXError(\
+ "Exceeded max (%d) attempts at a valid sequence."%max_tries)
+
+
+# The same helpers for choosing random sequences from distributions
+# uses Python's random module
+# http://www.python.org/doc/current/lib/module-random.html
+
+def pareto_sequence(n,exponent=1.0):
+ """
+ Return sample sequence of length n from a Pareto distribution.
+ """
+ return [random.paretovariate(exponent) for i in range(n)]
+
+
+def powerlaw_sequence(n,exponent=2.0):
+ """
+ Return sample sequence of length n from a power law distribution.
+ """
+ return [random.paretovariate(exponent-1) for i in range(n)]
+
+def zipf_rv(alpha, xmin=1, seed=None):
+ r"""Return a random value chosen from the Zipf distribution.
+
+ The return value is an integer drawn from the probability distribution
+ ::math::
+
+ p(x)=\frac{x^{-\alpha}}{\zeta(\alpha,x_{min})},
+
+ where `\zeta(\alpha,x_{min})` is the Hurwitz zeta function.
+
+ Parameters
+ ----------
+ alpha : float
+ Exponent value of the distribution
+ xmin : int
+ Minimum value
+ seed : int
+ Seed value for random number generator
+
+ Returns
+ -------
+ x : int
+ Random value from Zipf distribution
+
+ Raises
+ ------
+ ValueError:
+ If xmin < 1 or
+ If alpha <= 1
+
+ Notes
+ -----
+ The rejection algorithm generates random values for a the power-law
+ distribution in uniformly bounded expected time dependent on
+ parameters. See [1] for details on its operation.
+
+ Examples
+ --------
+ >>> nx.zipf_rv(alpha=2, xmin=3, seed=42) # doctest: +SKIP
+
+ References
+ ----------
+ ..[1] Luc Devroye, Non-Uniform Random Variate Generation,
+ Springer-Verlag, New York, 1986.
+ """
+ if xmin < 1:
+ raise ValueError("xmin < 1")
+ if alpha <= 1:
+ raise ValueError("a <= 1.0")
+ if not seed is None:
+ random.seed(seed)
+ a1 = alpha - 1.0
+ b = 2**a1
+ while True:
+ u = 1.0 - random.random() # u in (0,1]
+ v = random.random() # v in [0,1)
+ x = int(xmin*u**-(1.0/a1))
+ t = (1.0+(1.0/x))**a1
+ if v*x*(t-1.0)/(b-1.0) <= t/b:
+ break
+ return x
+
+def zipf_sequence(n, alpha=2.0, xmin=1):
+ """Return a sample sequence of length n from a Zipf distribution with
+ exponent parameter alpha and minimum value xmin.
+
+ See Also
+ --------
+ zipf_rv
+ """
+ return [ zipf_rv(alpha,xmin) for _ in range(n)]
+
+def uniform_sequence(n):
+ """
+ Return sample sequence of length n from a uniform distribution.
+ """
+ return [ random.uniform(0,n) for i in range(n)]
+
+
+def cumulative_distribution(distribution):
+ """Return normalized cumulative distribution from discrete distribution."""
+
+ cdf=[]
+ cdf.append(0.0)
+ psum=float(sum(distribution))
+ for i in range(0,len(distribution)):
+ cdf.append(cdf[i]+distribution[i]/psum)
+ return cdf
+
+
+def discrete_sequence(n, distribution=None, cdistribution=None):
+ """
+ Return sample sequence of length n from a given discrete distribution
+ or discrete cumulative distribution.
+
+ One of the following must be specified.
+
+ distribution = histogram of values, will be normalized
+
+ cdistribution = normalized discrete cumulative distribution
+
+ """
+ import bisect
+
+ if cdistribution is not None:
+ cdf=cdistribution
+ elif distribution is not None:
+ cdf=cumulative_distribution(distribution)
+ else:
+ raise nx.NetworkXError(
+ "discrete_sequence: distribution or cdistribution missing")
+
+
+ # get a uniform random number
+ inputseq=[random.random() for i in range(n)]
+
+ # choose from CDF
+ seq=[bisect.bisect_left(cdf,s)-1 for s in inputseq]
+ return seq
+
+
+def random_weighted_sample(mapping, k):
+ """Return k items without replacement from a weighted sample.
+
+ The input is a dictionary of items with weights as values.
+ """
+ if k > len(mapping):
+ raise ValueError("sample larger than population")
+ sample = set()
+ while len(sample) < k:
+ sample.add(weighted_choice(mapping))
+ return list(sample)
+
+def weighted_choice(mapping):
+ """Return a single element from a weighted sample.
+
+ The input is a dictionary of items with weights as values.
+ """
+ # use roulette method
+ rnd = random.random() * sum(mapping.values())
+ for k, w in mapping.items():
+ rnd -= w
+ if rnd < 0:
+ return k
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/utils/rcm.py b/lib/python2.7/site-packages/setoolsgui/networkx/utils/rcm.py
new file mode 100644
index 0000000..b21bd9b
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/utils/rcm.py
@@ -0,0 +1,150 @@
+"""
+Cuthill-McKee ordering of graph nodes to produce sparse matrices
+"""
+# Copyright (C) 2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# All rights reserved.
+# BSD license.
+from operator import itemgetter
+import networkx as nx
+__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>'])
+__all__ = ['cuthill_mckee_ordering',
+ 'reverse_cuthill_mckee_ordering']
+
+def cuthill_mckee_ordering(G, start=None):
+ """Generate an ordering (permutation) of the graph nodes to make
+ a sparse matrix.
+
+ Uses the Cuthill-McKee heuristic (based on breadth-first search) [1]_.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ start : node, optional
+ Start algorithm and specified node. The node should be on the
+ periphery of the graph for best results.
+
+ Returns
+ -------
+ nodes : generator
+ Generator of nodes in Cuthill-McKee ordering.
+
+ Examples
+ --------
+ >>> from networkx.utils import cuthill_mckee_ordering
+ >>> G = nx.path_graph(4)
+ >>> rcm = list(cuthill_mckee_ordering(G))
+ >>> A = nx.adjacency_matrix(G, nodelist=rcm) # doctest: +SKIP
+
+ See Also
+ --------
+ reverse_cuthill_mckee_ordering
+
+ Notes
+ -----
+ The optimal solution the the bandwidth reduction is NP-complete [2]_.
+
+ References
+ ----------
+ .. [1] E. Cuthill and J. McKee.
+ Reducing the bandwidth of sparse symmetric matrices,
+ In Proc. 24th Nat. Conf. ACM, pages 157-172, 1969.
+ http://doi.acm.org/10.1145/800195.805928
+ .. [2] Steven S. Skiena. 1997. The Algorithm Design Manual.
+ Springer-Verlag New York, Inc., New York, NY, USA.
+ """
+ for c in nx.connected_components(G):
+ for n in connected_cuthill_mckee_ordering(G.subgraph(c), start):
+ yield n
+
+def reverse_cuthill_mckee_ordering(G, start=None):
+ """Generate an ordering (permutation) of the graph nodes to make
+ a sparse matrix.
+
+ Uses the reverse Cuthill-McKee heuristic (based on breadth-first search)
+ [1]_.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ start : node, optional
+ Start algorithm and specified node. The node should be on the
+ periphery of the graph for best results.
+
+ Returns
+ -------
+ nodes : generator
+ Generator of nodes in reverse Cuthill-McKee ordering.
+
+ Examples
+ --------
+ >>> from networkx.utils import reverse_cuthill_mckee_ordering
+ >>> G = nx.path_graph(4)
+ >>> rcm = list(reverse_cuthill_mckee_ordering(G))
+ >>> A = nx.adjacency_matrix(G, nodelist=rcm) # doctest: +SKIP
+
+ See Also
+ --------
+ cuthill_mckee_ordering
+
+ Notes
+ -----
+ The optimal solution the the bandwidth reduction is NP-complete [2]_.
+
+ References
+ ----------
+ .. [1] E. Cuthill and J. McKee.
+ Reducing the bandwidth of sparse symmetric matrices,
+ In Proc. 24th Nat. Conf. ACM, pages 157-72, 1969.
+ http://doi.acm.org/10.1145/800195.805928
+ .. [2] Steven S. Skiena. 1997. The Algorithm Design Manual.
+ Springer-Verlag New York, Inc., New York, NY, USA.
+ """
+ return reversed(list(cuthill_mckee_ordering(G, start=start)))
+
+def connected_cuthill_mckee_ordering(G, start=None):
+ # the cuthill mckee algorithm for connected graphs
+ if start is None:
+ (_, start) = find_pseudo_peripheral_node_pair(G)
+ yield start
+ visited = set([start])
+ stack = [(start, iter(G[start]))]
+ while stack:
+ parent,children = stack[0]
+ if parent not in visited:
+ yield parent
+ try:
+ child = next(children)
+ if child not in visited:
+ yield child
+ visited.add(child)
+ # add children to stack, sorted by degree (lowest first)
+ nd = sorted(G.degree(G[child]).items(), key=itemgetter(1))
+ children = (n for n,d in nd)
+ stack.append((child,children))
+ except StopIteration:
+ stack.pop(0)
+
+def find_pseudo_peripheral_node_pair(G, start=None):
+ # helper for cuthill-mckee to find a "pseudo peripheral pair"
+ # to use as good starting node
+ if start is None:
+ u = next(G.nodes_iter())
+ else:
+ u = start
+ lp = 0
+ v = u
+ while True:
+ spl = nx.shortest_path_length(G, v)
+ l = max(spl.values())
+ if l <= lp:
+ break
+ lp = l
+ farthest = [n for n,dist in spl.items() if dist==l]
+ v, deg = sorted(G.degree(farthest).items(), key=itemgetter(1))[0]
+ return u, v
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/utils/tests/test_decorators.py b/lib/python2.7/site-packages/setoolsgui/networkx/utils/tests/test_decorators.py
new file mode 100644
index 0000000..964b2c9
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/utils/tests/test_decorators.py
@@ -0,0 +1,160 @@
+import tempfile
+import os
+
+from nose.tools import *
+
+import networkx as nx
+from networkx.utils.decorators import open_file,require,not_implemented_for
+
+def test_not_implemented_decorator():
+ @not_implemented_for('directed')
+ def test1(G):
+ pass
+ test1(nx.Graph())
+
+@raises(KeyError)
+def test_not_implemented_decorator_key():
+ @not_implemented_for('foo')
+ def test1(G):
+ pass
+ test1(nx.Graph())
+
+@raises(nx.NetworkXNotImplemented)
+def test_not_implemented_decorator_raise():
+ @not_implemented_for('graph')
+ def test1(G):
+ pass
+ test1(nx.Graph())
+
+
+def test_require_decorator1():
+ @require('os','sys')
+ def test1():
+ import os
+ import sys
+ test1()
+
+def test_require_decorator2():
+ @require('blahhh')
+ def test2():
+ import blahhh
+ assert_raises(nx.NetworkXError, test2)
+
+class TestOpenFileDecorator(object):
+ def setUp(self):
+ self.text = ['Blah... ', 'BLAH ', 'BLAH!!!!']
+ self.fobj = tempfile.NamedTemporaryFile('wb+', delete=False)
+ self.name = self.fobj.name
+
+ def write(self, path):
+ for text in self.text:
+ path.write(text.encode('ascii'))
+
+ @open_file(1, 'r')
+ def read(self, path):
+ return path.readlines()[0]
+
+ @staticmethod
+ @open_file(0, 'wb')
+ def writer_arg0(path):
+ path.write('demo'.encode('ascii'))
+
+ @open_file(1, 'wb+')
+ def writer_arg1(self, path):
+ self.write(path)
+
+ @open_file(2, 'wb')
+ def writer_arg2default(self, x, path=None):
+ if path is None:
+ fh = tempfile.NamedTemporaryFile('wb+', delete=False)
+ close_fh = True
+ else:
+ fh = path
+ close_fh = False
+
+ try:
+ self.write(fh)
+ finally:
+ if close_fh:
+ fh.close()
+
+ @open_file(4, 'wb')
+ def writer_arg4default(self, x, y, other='hello', path=None, **kwargs):
+ if path is None:
+ fh = tempfile.NamedTemporaryFile('wb+', delete=False)
+ close_fh = True
+ else:
+ fh = path
+ close_fh = False
+
+ try:
+ self.write(fh)
+ finally:
+ if close_fh:
+ fh.close()
+
+ @open_file('path', 'wb')
+ def writer_kwarg(self, **kwargs):
+ path = kwargs.get('path', None)
+ if path is None:
+ fh = tempfile.NamedTemporaryFile('wb+', delete=False)
+ close_fh = True
+ else:
+ fh = path
+ close_fh = False
+
+ try:
+ self.write(fh)
+ finally:
+ if close_fh:
+ fh.close()
+
+ def test_writer_arg0_str(self):
+ self.writer_arg0(self.name)
+
+ def test_writer_arg0_fobj(self):
+ self.writer_arg0(self.fobj)
+
+ def test_writer_arg1_str(self):
+ self.writer_arg1(self.name)
+ assert_equal( self.read(self.name), ''.join(self.text) )
+
+ def test_writer_arg1_fobj(self):
+ self.writer_arg1(self.fobj)
+ assert_false(self.fobj.closed)
+ self.fobj.close()
+ assert_equal( self.read(self.name), ''.join(self.text) )
+
+ def test_writer_arg2default_str(self):
+ self.writer_arg2default(0, path=None)
+ self.writer_arg2default(0, path=self.name)
+ assert_equal( self.read(self.name), ''.join(self.text) )
+
+ def test_writer_arg2default_fobj(self):
+ self.writer_arg2default(0, path=self.fobj)
+ assert_false(self.fobj.closed)
+ self.fobj.close()
+ assert_equal( self.read(self.name), ''.join(self.text) )
+
+ def test_writer_arg2default_fobj(self):
+ self.writer_arg2default(0, path=None)
+
+ def test_writer_arg4default_fobj(self):
+ self.writer_arg4default(0, 1, dog='dog', other='other2')
+ self.writer_arg4default(0, 1, dog='dog', other='other2', path=self.name)
+ assert_equal( self.read(self.name), ''.join(self.text) )
+
+ def test_writer_kwarg_str(self):
+ self.writer_kwarg(path=self.name)
+ assert_equal( self.read(self.name), ''.join(self.text) )
+
+ def test_writer_kwarg_fobj(self):
+ self.writer_kwarg(path=self.fobj)
+ self.fobj.close()
+ assert_equal( self.read(self.name), ''.join(self.text) )
+
+ def test_writer_kwarg_fobj(self):
+ self.writer_kwarg(path=None)
+
+ def tearDown(self):
+ self.fobj.close()
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/utils/tests/test_misc.py b/lib/python2.7/site-packages/setoolsgui/networkx/utils/tests/test_misc.py
new file mode 100644
index 0000000..77b8196
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/utils/tests/test_misc.py
@@ -0,0 +1,72 @@
+from nose.tools import *
+from nose import SkipTest
+import networkx as nx
+from networkx.utils import *
+
+def test_is_string_like():
+ assert_true(is_string_like("aaaa"))
+ assert_false(is_string_like(None))
+ assert_false(is_string_like(123))
+
+def test_iterable():
+ assert_false(iterable(None))
+ assert_false(iterable(10))
+ assert_true(iterable([1,2,3]))
+ assert_true(iterable((1,2,3)))
+ assert_true(iterable({1:"A",2:"X"}))
+ assert_true(iterable("ABC"))
+
+def test_graph_iterable():
+ K=nx.complete_graph(10)
+ assert_true(iterable(K))
+ assert_true(iterable(K.nodes_iter()))
+ assert_true(iterable(K.edges_iter()))
+
+def test_is_list_of_ints():
+ assert_true(is_list_of_ints([1,2,3,42]))
+ assert_false(is_list_of_ints([1,2,3,"kermit"]))
+
+def test_random_number_distribution():
+ # smoke test only
+ z=uniform_sequence(20)
+ z=powerlaw_sequence(20,exponent=2.5)
+ z=pareto_sequence(20,exponent=1.5)
+ z=discrete_sequence(20,distribution=[0,0,0,0,1,1,1,1,2,2,3])
+
+class TestNumpyArray(object):
+ numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
+ @classmethod
+ def setupClass(cls):
+ global numpy
+ global assert_equal
+ global assert_almost_equal
+ try:
+ import numpy
+ from numpy.testing import assert_equal,assert_almost_equal
+ except ImportError:
+ raise SkipTest('NumPy not available.')
+
+ def test_dict_to_numpy_array1(self):
+ d = {'a':1,'b':2}
+ a = dict_to_numpy_array1(d)
+ assert_equal(a, numpy.array([1,2]))
+ a = dict_to_numpy_array1(d, mapping = {'b':0,'a':1})
+ assert_equal(a, numpy.array([2,1]))
+
+ def test_dict_to_numpy_array2(self):
+ d = {'a': {'a':1,'b':2},
+ 'b': {'a':10,'b':20}}
+ a = dict_to_numpy_array(d)
+ assert_equal(a, numpy.array([[1,2],[10,20]]))
+ a = dict_to_numpy_array2(d, mapping = {'b':0,'a':1})
+ assert_equal(a, numpy.array([[20,10],[2,1]]))
+
+
+ def test_dict_to_numpy_array(self):
+ d = {'a': {'a':1,'b':2},
+ 'b': {'a':10,'b':20}}
+ a = dict_to_numpy_array(d)
+ assert_equal(a, numpy.array([[1,2],[10,20]]))
+ d = {'a':1,'b':2}
+ a = dict_to_numpy_array1(d)
+ assert_equal(a, numpy.array([1,2]))
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/utils/tests/test_random_sequence.py b/lib/python2.7/site-packages/setoolsgui/networkx/utils/tests/test_random_sequence.py
new file mode 100644
index 0000000..0c3634a
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/utils/tests/test_random_sequence.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+from nose.tools import *
+from networkx.utils import uniform_sequence,powerlaw_sequence,\
+ create_degree_sequence,zipf_rv,zipf_sequence,random_weighted_sample,\
+ weighted_choice
+import networkx.utils
+
+def test_degree_sequences():
+ seq=create_degree_sequence(10,uniform_sequence)
+ assert_equal(len(seq), 10)
+ seq=create_degree_sequence(10,powerlaw_sequence)
+ assert_equal(len(seq), 10)
+
+def test_zipf_rv():
+ r = zipf_rv(2.3)
+ assert_true(type(r),int)
+ assert_raises(ValueError,zipf_rv,0.5)
+ assert_raises(ValueError,zipf_rv,2,xmin=0)
+
+def test_zipf_sequence():
+ s = zipf_sequence(10)
+ assert_equal(len(s),10)
+
+def test_random_weighted_sample():
+ mapping={'a':10,'b':20}
+ s = random_weighted_sample(mapping,2)
+ assert_equal(sorted(s),sorted(mapping.keys()))
+ assert_raises(ValueError,random_weighted_sample,mapping,3)
+
+def test_random_weighted_choice():
+ mapping={'a':10,'b':0}
+ c = weighted_choice(mapping)
+ assert_equal(c,'a')
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/utils/tests/test_rcm.py b/lib/python2.7/site-packages/setoolsgui/networkx/utils/tests/test_rcm.py
new file mode 100644
index 0000000..f267586
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/utils/tests/test_rcm.py
@@ -0,0 +1,13 @@
+from nose.tools import *
+from networkx.utils import reverse_cuthill_mckee_ordering
+import networkx as nx
+
+def test_reverse_cuthill_mckee():
+ # example graph from
+ # http://www.boost.org/doc/libs/1_37_0/libs/graph/example/cuthill_mckee_ordering.cpp
+ G = nx.Graph([(0,3),(0,5),(1,2),(1,4),(1,6),(1,9),(2,3),
+ (2,4),(3,5),(3,8),(4,6),(5,6),(5,7),(6,7)])
+ rcm = list(reverse_cuthill_mckee_ordering(G,start=0))
+ assert_equal(rcm,[9, 1, 4, 6, 7, 2, 8, 5, 3, 0])
+ rcm = list(reverse_cuthill_mckee_ordering(G))
+ assert_equal(rcm,[0, 8, 5, 7, 3, 6, 4, 2, 1, 9])
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/utils/union_find.py b/lib/python2.7/site-packages/setoolsgui/networkx/utils/union_find.py
new file mode 100644
index 0000000..d05dd92
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/utils/union_find.py
@@ -0,0 +1,75 @@
+"""
+Union-find data structure.
+"""
+# Copyright (C) 2004-2011 by
+# Aric Hagberg <hagberg@lanl.gov>
+# Dan Schult <dschult@colgate.edu>
+# Pieter Swart <swart@lanl.gov>
+# All rights reserved.
+# BSD license.
+import networkx as nx
+
+class UnionFind:
+ """Union-find data structure.
+
+ Each unionFind instance X maintains a family of disjoint sets of
+ hashable objects, supporting the following two methods:
+
+ - X[item] returns a name for the set containing the given item.
+ Each set is named by an arbitrarily-chosen one of its members; as
+ long as the set remains unchanged it will keep the same name. If
+ the item is not yet part of a set in X, a new singleton set is
+ created for it.
+
+ - X.union(item1, item2, ...) merges the sets containing each item
+ into a single larger set. If any item is not yet part of a set
+ in X, it is added to X as one of the members of the merged set.
+
+ Union-find data structure. Based on Josiah Carlson's code,
+ http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/215912
+ with significant additional changes by D. Eppstein.
+ http://www.ics.uci.edu/~eppstein/PADS/UnionFind.py
+
+ """
+
+ def __init__(self):
+ """Create a new empty union-find structure."""
+ self.weights = {}
+ self.parents = {}
+
+ def __getitem__(self, object):
+ """Find and return the name of the set containing the object."""
+
+ # check for previously unknown object
+ if object not in self.parents:
+ self.parents[object] = object
+ self.weights[object] = 1
+ return object
+
+ # find path of objects leading to the root
+ path = [object]
+ root = self.parents[object]
+ while root != path[-1]:
+ path.append(root)
+ root = self.parents[root]
+
+ # compress the path and return
+ for ancestor in path:
+ self.parents[ancestor] = root
+ return root
+
+ def __iter__(self):
+ """Iterate through all items ever found or unioned by this structure."""
+ return iter(self.parents)
+
+ def union(self, *objects):
+ """Find the sets containing the objects and merge them all."""
+ roots = [self[x] for x in objects]
+ heaviest = max([(self.weights[r],r) for r in roots])[1]
+ for r in roots:
+ if r != heaviest:
+ self.weights[heaviest] += self.weights[r]
+ self.parents[r] = heaviest
+
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/networkx/version.py b/lib/python2.7/site-packages/setoolsgui/networkx/version.py
new file mode 100644
index 0000000..7369f8b
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/networkx/version.py
@@ -0,0 +1,25 @@
+"""
+Version information for NetworkX, created during installation.
+
+Do not add this file to the repository.
+
+"""
+
+import datetime
+
+version = '1.8.1'
+date = 'Sun Aug 4 07:56:54 2013'
+
+# Was NetworkX built from a development version? If so, remember that the major
+# and minor versions reference the "target" (rather than "current") release.
+dev = False
+
+# Format: (name, major, min, revision)
+version_info = ('networkx', '1', '8.1', None)
+
+# Format: a 'datetime.datetime' instance
+date_info = datetime.datetime(2013, 8, 4, 7, 56, 54, 416491)
+
+# Format: (vcs, vcs_tuple)
+vcs_info = (None, (None, None))
+
diff --git a/lib/python2.7/site-packages/setoolsgui/selinux/__init__.py b/lib/python2.7/site-packages/setoolsgui/selinux/__init__.py
new file mode 100644
index 0000000..b81b031
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/selinux/__init__.py
@@ -0,0 +1,2445 @@
+# This file was automatically generated by SWIG (http://www.swig.org).
+# Version 2.0.11
+#
+# Do not make changes to this file unless you know what you are doing--modify
+# the SWIG interface file instead.
+
+
+
+
+
+from sys import version_info
+if version_info >= (2,6,0):
+ def swig_import_helper():
+ from os.path import dirname
+ import imp
+ fp = None
+ try:
+ fp, pathname, description = imp.find_module('_selinux', [dirname(__file__)])
+ except ImportError:
+ import _selinux
+ return _selinux
+ if fp is not None:
+ try:
+ _mod = imp.load_module('_selinux', fp, pathname, description)
+ finally:
+ fp.close()
+ return _mod
+ _selinux = swig_import_helper()
+ del swig_import_helper
+else:
+ import _selinux
+del version_info
+try:
+ _swig_property = property
+except NameError:
+ pass # Python < 2.2 doesn't have 'property'.
+def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
+ if (name == "thisown"): return self.this.own(value)
+ if (name == "this"):
+ if type(value).__name__ == 'SwigPyObject':
+ self.__dict__[name] = value
+ return
+ method = class_type.__swig_setmethods__.get(name,None)
+ if method: return method(self,value)
+ if (not static):
+ self.__dict__[name] = value
+ else:
+ raise AttributeError("You cannot add attributes to %s" % self)
+
+def _swig_setattr(self,class_type,name,value):
+ return _swig_setattr_nondynamic(self,class_type,name,value,0)
+
+def _swig_getattr(self,class_type,name):
+ if (name == "thisown"): return self.this.own()
+ method = class_type.__swig_getmethods__.get(name,None)
+ if method: return method(self)
+ raise AttributeError(name)
+
+def _swig_repr(self):
+ try: strthis = "proxy of " + self.this.__repr__()
+ except: strthis = ""
+ return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
+
+try:
+ _object = object
+ _newclass = 1
+except AttributeError:
+ class _object : pass
+ _newclass = 0
+
+
+import shutil, os, stat
+
+DISABLED = -1
+PERMISSIVE = 0
+ENFORCING = 1
+
+def restorecon(path, recursive=False):
+ """ Restore SELinux context on a given path """
+
+ try:
+ mode = os.lstat(path)[stat.ST_MODE]
+ status, context = matchpathcon(path, mode)
+ except OSError:
+ path = os.path.realpath(os.path.expanduser(path))
+ mode = os.lstat(path)[stat.ST_MODE]
+ status, context = matchpathcon(path, mode)
+
+ if status == 0:
+ status, oldcontext = lgetfilecon(path)
+ if context != oldcontext:
+ lsetfilecon(path, context)
+
+ if recursive:
+ for root, dirs, files in os.walk(path):
+ for name in files + dirs:
+ restorecon(os.path.join(root, name))
+
+def chcon(path, context, recursive=False):
+ """ Set the SELinux context on a given path """
+ lsetfilecon(path, context)
+ if recursive:
+ for root, dirs, files in os.walk(path):
+ for name in files + dirs:
+ lsetfilecon(os.path.join(root,name), context)
+
+def copytree(src, dest):
+ """ An SELinux-friendly shutil.copytree method """
+ shutil.copytree(src, dest)
+ restorecon(dest, recursive=True)
+
+def install(src, dest):
+ """ An SELinux-friendly shutil.move method """
+ shutil.move(src, dest)
+ restorecon(dest, recursive=True)
+
+class security_id(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, security_id, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, security_id, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["ctx"] = _selinux.security_id_ctx_set
+ __swig_getmethods__["ctx"] = _selinux.security_id_ctx_get
+ if _newclass:ctx = _swig_property(_selinux.security_id_ctx_get, _selinux.security_id_ctx_set)
+ __swig_setmethods__["refcnt"] = _selinux.security_id_refcnt_set
+ __swig_getmethods__["refcnt"] = _selinux.security_id_refcnt_get
+ if _newclass:refcnt = _swig_property(_selinux.security_id_refcnt_get, _selinux.security_id_refcnt_set)
+ def __init__(self):
+ this = _selinux.new_security_id()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_security_id
+ __del__ = lambda self : None;
+security_id_swigregister = _selinux.security_id_swigregister
+security_id_swigregister(security_id)
+
+
+def avc_sid_to_context(*args):
+ return _selinux.avc_sid_to_context(*args)
+avc_sid_to_context = _selinux.avc_sid_to_context
+
+def avc_sid_to_context_raw(*args):
+ return _selinux.avc_sid_to_context_raw(*args)
+avc_sid_to_context_raw = _selinux.avc_sid_to_context_raw
+
+def avc_context_to_sid(*args):
+ return _selinux.avc_context_to_sid(*args)
+avc_context_to_sid = _selinux.avc_context_to_sid
+
+def avc_context_to_sid_raw(*args):
+ return _selinux.avc_context_to_sid_raw(*args)
+avc_context_to_sid_raw = _selinux.avc_context_to_sid_raw
+
+def sidget(*args):
+ return _selinux.sidget(*args)
+sidget = _selinux.sidget
+
+def sidput(*args):
+ return _selinux.sidput(*args)
+sidput = _selinux.sidput
+
+def avc_get_initial_sid(*args):
+ return _selinux.avc_get_initial_sid(*args)
+avc_get_initial_sid = _selinux.avc_get_initial_sid
+class avc_entry_ref(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, avc_entry_ref, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, avc_entry_ref, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["ae"] = _selinux.avc_entry_ref_ae_set
+ __swig_getmethods__["ae"] = _selinux.avc_entry_ref_ae_get
+ if _newclass:ae = _swig_property(_selinux.avc_entry_ref_ae_get, _selinux.avc_entry_ref_ae_set)
+ def __init__(self):
+ this = _selinux.new_avc_entry_ref()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_avc_entry_ref
+ __del__ = lambda self : None;
+avc_entry_ref_swigregister = _selinux.avc_entry_ref_swigregister
+avc_entry_ref_swigregister(avc_entry_ref)
+
+class avc_memory_callback(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, avc_memory_callback, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, avc_memory_callback, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["func_malloc"] = _selinux.avc_memory_callback_func_malloc_set
+ __swig_getmethods__["func_malloc"] = _selinux.avc_memory_callback_func_malloc_get
+ if _newclass:func_malloc = _swig_property(_selinux.avc_memory_callback_func_malloc_get, _selinux.avc_memory_callback_func_malloc_set)
+ __swig_setmethods__["func_free"] = _selinux.avc_memory_callback_func_free_set
+ __swig_getmethods__["func_free"] = _selinux.avc_memory_callback_func_free_get
+ if _newclass:func_free = _swig_property(_selinux.avc_memory_callback_func_free_get, _selinux.avc_memory_callback_func_free_set)
+ def __init__(self):
+ this = _selinux.new_avc_memory_callback()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_avc_memory_callback
+ __del__ = lambda self : None;
+avc_memory_callback_swigregister = _selinux.avc_memory_callback_swigregister
+avc_memory_callback_swigregister(avc_memory_callback)
+
+class avc_log_callback(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, avc_log_callback, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, avc_log_callback, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["func_log"] = _selinux.avc_log_callback_func_log_set
+ __swig_getmethods__["func_log"] = _selinux.avc_log_callback_func_log_get
+ if _newclass:func_log = _swig_property(_selinux.avc_log_callback_func_log_get, _selinux.avc_log_callback_func_log_set)
+ __swig_setmethods__["func_audit"] = _selinux.avc_log_callback_func_audit_set
+ __swig_getmethods__["func_audit"] = _selinux.avc_log_callback_func_audit_get
+ if _newclass:func_audit = _swig_property(_selinux.avc_log_callback_func_audit_get, _selinux.avc_log_callback_func_audit_set)
+ def __init__(self):
+ this = _selinux.new_avc_log_callback()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_avc_log_callback
+ __del__ = lambda self : None;
+avc_log_callback_swigregister = _selinux.avc_log_callback_swigregister
+avc_log_callback_swigregister(avc_log_callback)
+
+class avc_thread_callback(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, avc_thread_callback, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, avc_thread_callback, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["func_create_thread"] = _selinux.avc_thread_callback_func_create_thread_set
+ __swig_getmethods__["func_create_thread"] = _selinux.avc_thread_callback_func_create_thread_get
+ if _newclass:func_create_thread = _swig_property(_selinux.avc_thread_callback_func_create_thread_get, _selinux.avc_thread_callback_func_create_thread_set)
+ __swig_setmethods__["func_stop_thread"] = _selinux.avc_thread_callback_func_stop_thread_set
+ __swig_getmethods__["func_stop_thread"] = _selinux.avc_thread_callback_func_stop_thread_get
+ if _newclass:func_stop_thread = _swig_property(_selinux.avc_thread_callback_func_stop_thread_get, _selinux.avc_thread_callback_func_stop_thread_set)
+ def __init__(self):
+ this = _selinux.new_avc_thread_callback()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_avc_thread_callback
+ __del__ = lambda self : None;
+avc_thread_callback_swigregister = _selinux.avc_thread_callback_swigregister
+avc_thread_callback_swigregister(avc_thread_callback)
+
+class avc_lock_callback(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, avc_lock_callback, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, avc_lock_callback, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["func_alloc_lock"] = _selinux.avc_lock_callback_func_alloc_lock_set
+ __swig_getmethods__["func_alloc_lock"] = _selinux.avc_lock_callback_func_alloc_lock_get
+ if _newclass:func_alloc_lock = _swig_property(_selinux.avc_lock_callback_func_alloc_lock_get, _selinux.avc_lock_callback_func_alloc_lock_set)
+ __swig_setmethods__["func_get_lock"] = _selinux.avc_lock_callback_func_get_lock_set
+ __swig_getmethods__["func_get_lock"] = _selinux.avc_lock_callback_func_get_lock_get
+ if _newclass:func_get_lock = _swig_property(_selinux.avc_lock_callback_func_get_lock_get, _selinux.avc_lock_callback_func_get_lock_set)
+ __swig_setmethods__["func_release_lock"] = _selinux.avc_lock_callback_func_release_lock_set
+ __swig_getmethods__["func_release_lock"] = _selinux.avc_lock_callback_func_release_lock_get
+ if _newclass:func_release_lock = _swig_property(_selinux.avc_lock_callback_func_release_lock_get, _selinux.avc_lock_callback_func_release_lock_set)
+ __swig_setmethods__["func_free_lock"] = _selinux.avc_lock_callback_func_free_lock_set
+ __swig_getmethods__["func_free_lock"] = _selinux.avc_lock_callback_func_free_lock_get
+ if _newclass:func_free_lock = _swig_property(_selinux.avc_lock_callback_func_free_lock_get, _selinux.avc_lock_callback_func_free_lock_set)
+ def __init__(self):
+ this = _selinux.new_avc_lock_callback()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_avc_lock_callback
+ __del__ = lambda self : None;
+avc_lock_callback_swigregister = _selinux.avc_lock_callback_swigregister
+avc_lock_callback_swigregister(avc_lock_callback)
+
+AVC_OPT_UNUSED = _selinux.AVC_OPT_UNUSED
+AVC_OPT_SETENFORCE = _selinux.AVC_OPT_SETENFORCE
+
+def avc_init(*args):
+ return _selinux.avc_init(*args)
+avc_init = _selinux.avc_init
+
+def avc_open(*args):
+ return _selinux.avc_open(*args)
+avc_open = _selinux.avc_open
+
+def avc_cleanup():
+ return _selinux.avc_cleanup()
+avc_cleanup = _selinux.avc_cleanup
+
+def avc_reset():
+ return _selinux.avc_reset()
+avc_reset = _selinux.avc_reset
+
+def avc_destroy():
+ return _selinux.avc_destroy()
+avc_destroy = _selinux.avc_destroy
+
+def avc_has_perm_noaudit(*args):
+ return _selinux.avc_has_perm_noaudit(*args)
+avc_has_perm_noaudit = _selinux.avc_has_perm_noaudit
+
+def avc_has_perm(*args):
+ return _selinux.avc_has_perm(*args)
+avc_has_perm = _selinux.avc_has_perm
+
+def avc_audit(*args):
+ return _selinux.avc_audit(*args)
+avc_audit = _selinux.avc_audit
+
+def avc_compute_create(*args):
+ return _selinux.avc_compute_create(*args)
+avc_compute_create = _selinux.avc_compute_create
+
+def avc_compute_member(*args):
+ return _selinux.avc_compute_member(*args)
+avc_compute_member = _selinux.avc_compute_member
+AVC_CALLBACK_GRANT = _selinux.AVC_CALLBACK_GRANT
+AVC_CALLBACK_TRY_REVOKE = _selinux.AVC_CALLBACK_TRY_REVOKE
+AVC_CALLBACK_REVOKE = _selinux.AVC_CALLBACK_REVOKE
+AVC_CALLBACK_RESET = _selinux.AVC_CALLBACK_RESET
+AVC_CALLBACK_AUDITALLOW_ENABLE = _selinux.AVC_CALLBACK_AUDITALLOW_ENABLE
+AVC_CALLBACK_AUDITALLOW_DISABLE = _selinux.AVC_CALLBACK_AUDITALLOW_DISABLE
+AVC_CALLBACK_AUDITDENY_ENABLE = _selinux.AVC_CALLBACK_AUDITDENY_ENABLE
+AVC_CALLBACK_AUDITDENY_DISABLE = _selinux.AVC_CALLBACK_AUDITDENY_DISABLE
+AVC_CACHE_STATS = _selinux.AVC_CACHE_STATS
+class avc_cache_stats(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, avc_cache_stats, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, avc_cache_stats, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["entry_lookups"] = _selinux.avc_cache_stats_entry_lookups_set
+ __swig_getmethods__["entry_lookups"] = _selinux.avc_cache_stats_entry_lookups_get
+ if _newclass:entry_lookups = _swig_property(_selinux.avc_cache_stats_entry_lookups_get, _selinux.avc_cache_stats_entry_lookups_set)
+ __swig_setmethods__["entry_hits"] = _selinux.avc_cache_stats_entry_hits_set
+ __swig_getmethods__["entry_hits"] = _selinux.avc_cache_stats_entry_hits_get
+ if _newclass:entry_hits = _swig_property(_selinux.avc_cache_stats_entry_hits_get, _selinux.avc_cache_stats_entry_hits_set)
+ __swig_setmethods__["entry_misses"] = _selinux.avc_cache_stats_entry_misses_set
+ __swig_getmethods__["entry_misses"] = _selinux.avc_cache_stats_entry_misses_get
+ if _newclass:entry_misses = _swig_property(_selinux.avc_cache_stats_entry_misses_get, _selinux.avc_cache_stats_entry_misses_set)
+ __swig_setmethods__["entry_discards"] = _selinux.avc_cache_stats_entry_discards_set
+ __swig_getmethods__["entry_discards"] = _selinux.avc_cache_stats_entry_discards_get
+ if _newclass:entry_discards = _swig_property(_selinux.avc_cache_stats_entry_discards_get, _selinux.avc_cache_stats_entry_discards_set)
+ __swig_setmethods__["cav_lookups"] = _selinux.avc_cache_stats_cav_lookups_set
+ __swig_getmethods__["cav_lookups"] = _selinux.avc_cache_stats_cav_lookups_get
+ if _newclass:cav_lookups = _swig_property(_selinux.avc_cache_stats_cav_lookups_get, _selinux.avc_cache_stats_cav_lookups_set)
+ __swig_setmethods__["cav_hits"] = _selinux.avc_cache_stats_cav_hits_set
+ __swig_getmethods__["cav_hits"] = _selinux.avc_cache_stats_cav_hits_get
+ if _newclass:cav_hits = _swig_property(_selinux.avc_cache_stats_cav_hits_get, _selinux.avc_cache_stats_cav_hits_set)
+ __swig_setmethods__["cav_probes"] = _selinux.avc_cache_stats_cav_probes_set
+ __swig_getmethods__["cav_probes"] = _selinux.avc_cache_stats_cav_probes_get
+ if _newclass:cav_probes = _swig_property(_selinux.avc_cache_stats_cav_probes_get, _selinux.avc_cache_stats_cav_probes_set)
+ __swig_setmethods__["cav_misses"] = _selinux.avc_cache_stats_cav_misses_set
+ __swig_getmethods__["cav_misses"] = _selinux.avc_cache_stats_cav_misses_get
+ if _newclass:cav_misses = _swig_property(_selinux.avc_cache_stats_cav_misses_get, _selinux.avc_cache_stats_cav_misses_set)
+ def __init__(self):
+ this = _selinux.new_avc_cache_stats()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_avc_cache_stats
+ __del__ = lambda self : None;
+avc_cache_stats_swigregister = _selinux.avc_cache_stats_swigregister
+avc_cache_stats_swigregister(avc_cache_stats)
+
+
+def avc_av_stats():
+ return _selinux.avc_av_stats()
+avc_av_stats = _selinux.avc_av_stats
+
+def avc_sid_stats():
+ return _selinux.avc_sid_stats()
+avc_sid_stats = _selinux.avc_sid_stats
+
+def avc_netlink_open(*args):
+ return _selinux.avc_netlink_open(*args)
+avc_netlink_open = _selinux.avc_netlink_open
+
+def avc_netlink_loop():
+ return _selinux.avc_netlink_loop()
+avc_netlink_loop = _selinux.avc_netlink_loop
+
+def avc_netlink_close():
+ return _selinux.avc_netlink_close()
+avc_netlink_close = _selinux.avc_netlink_close
+
+def selinux_status_open(*args):
+ return _selinux.selinux_status_open(*args)
+selinux_status_open = _selinux.selinux_status_open
+
+def selinux_status_close():
+ return _selinux.selinux_status_close()
+selinux_status_close = _selinux.selinux_status_close
+
+def selinux_status_updated():
+ return _selinux.selinux_status_updated()
+selinux_status_updated = _selinux.selinux_status_updated
+
+def selinux_status_getenforce():
+ return _selinux.selinux_status_getenforce()
+selinux_status_getenforce = _selinux.selinux_status_getenforce
+
+def selinux_status_policyload():
+ return _selinux.selinux_status_policyload()
+selinux_status_policyload = _selinux.selinux_status_policyload
+
+def selinux_status_deny_unknown():
+ return _selinux.selinux_status_deny_unknown()
+selinux_status_deny_unknown = _selinux.selinux_status_deny_unknown
+COMMON_FILE__IOCTL = _selinux.COMMON_FILE__IOCTL
+COMMON_FILE__READ = _selinux.COMMON_FILE__READ
+COMMON_FILE__WRITE = _selinux.COMMON_FILE__WRITE
+COMMON_FILE__CREATE = _selinux.COMMON_FILE__CREATE
+COMMON_FILE__GETATTR = _selinux.COMMON_FILE__GETATTR
+COMMON_FILE__SETATTR = _selinux.COMMON_FILE__SETATTR
+COMMON_FILE__LOCK = _selinux.COMMON_FILE__LOCK
+COMMON_FILE__RELABELFROM = _selinux.COMMON_FILE__RELABELFROM
+COMMON_FILE__RELABELTO = _selinux.COMMON_FILE__RELABELTO
+COMMON_FILE__APPEND = _selinux.COMMON_FILE__APPEND
+COMMON_FILE__UNLINK = _selinux.COMMON_FILE__UNLINK
+COMMON_FILE__LINK = _selinux.COMMON_FILE__LINK
+COMMON_FILE__RENAME = _selinux.COMMON_FILE__RENAME
+COMMON_FILE__EXECUTE = _selinux.COMMON_FILE__EXECUTE
+COMMON_FILE__SWAPON = _selinux.COMMON_FILE__SWAPON
+COMMON_FILE__QUOTAON = _selinux.COMMON_FILE__QUOTAON
+COMMON_FILE__MOUNTON = _selinux.COMMON_FILE__MOUNTON
+COMMON_SOCKET__IOCTL = _selinux.COMMON_SOCKET__IOCTL
+COMMON_SOCKET__READ = _selinux.COMMON_SOCKET__READ
+COMMON_SOCKET__WRITE = _selinux.COMMON_SOCKET__WRITE
+COMMON_SOCKET__CREATE = _selinux.COMMON_SOCKET__CREATE
+COMMON_SOCKET__GETATTR = _selinux.COMMON_SOCKET__GETATTR
+COMMON_SOCKET__SETATTR = _selinux.COMMON_SOCKET__SETATTR
+COMMON_SOCKET__LOCK = _selinux.COMMON_SOCKET__LOCK
+COMMON_SOCKET__RELABELFROM = _selinux.COMMON_SOCKET__RELABELFROM
+COMMON_SOCKET__RELABELTO = _selinux.COMMON_SOCKET__RELABELTO
+COMMON_SOCKET__APPEND = _selinux.COMMON_SOCKET__APPEND
+COMMON_SOCKET__BIND = _selinux.COMMON_SOCKET__BIND
+COMMON_SOCKET__CONNECT = _selinux.COMMON_SOCKET__CONNECT
+COMMON_SOCKET__LISTEN = _selinux.COMMON_SOCKET__LISTEN
+COMMON_SOCKET__ACCEPT = _selinux.COMMON_SOCKET__ACCEPT
+COMMON_SOCKET__GETOPT = _selinux.COMMON_SOCKET__GETOPT
+COMMON_SOCKET__SETOPT = _selinux.COMMON_SOCKET__SETOPT
+COMMON_SOCKET__SHUTDOWN = _selinux.COMMON_SOCKET__SHUTDOWN
+COMMON_SOCKET__RECVFROM = _selinux.COMMON_SOCKET__RECVFROM
+COMMON_SOCKET__SENDTO = _selinux.COMMON_SOCKET__SENDTO
+COMMON_SOCKET__RECV_MSG = _selinux.COMMON_SOCKET__RECV_MSG
+COMMON_SOCKET__SEND_MSG = _selinux.COMMON_SOCKET__SEND_MSG
+COMMON_SOCKET__NAME_BIND = _selinux.COMMON_SOCKET__NAME_BIND
+COMMON_IPC__CREATE = _selinux.COMMON_IPC__CREATE
+COMMON_IPC__DESTROY = _selinux.COMMON_IPC__DESTROY
+COMMON_IPC__GETATTR = _selinux.COMMON_IPC__GETATTR
+COMMON_IPC__SETATTR = _selinux.COMMON_IPC__SETATTR
+COMMON_IPC__READ = _selinux.COMMON_IPC__READ
+COMMON_IPC__WRITE = _selinux.COMMON_IPC__WRITE
+COMMON_IPC__ASSOCIATE = _selinux.COMMON_IPC__ASSOCIATE
+COMMON_IPC__UNIX_READ = _selinux.COMMON_IPC__UNIX_READ
+COMMON_IPC__UNIX_WRITE = _selinux.COMMON_IPC__UNIX_WRITE
+COMMON_DATABASE__CREATE = _selinux.COMMON_DATABASE__CREATE
+COMMON_DATABASE__DROP = _selinux.COMMON_DATABASE__DROP
+COMMON_DATABASE__GETATTR = _selinux.COMMON_DATABASE__GETATTR
+COMMON_DATABASE__SETATTR = _selinux.COMMON_DATABASE__SETATTR
+COMMON_DATABASE__RELABELFROM = _selinux.COMMON_DATABASE__RELABELFROM
+COMMON_DATABASE__RELABELTO = _selinux.COMMON_DATABASE__RELABELTO
+FILESYSTEM__MOUNT = _selinux.FILESYSTEM__MOUNT
+FILESYSTEM__REMOUNT = _selinux.FILESYSTEM__REMOUNT
+FILESYSTEM__UNMOUNT = _selinux.FILESYSTEM__UNMOUNT
+FILESYSTEM__GETATTR = _selinux.FILESYSTEM__GETATTR
+FILESYSTEM__RELABELFROM = _selinux.FILESYSTEM__RELABELFROM
+FILESYSTEM__RELABELTO = _selinux.FILESYSTEM__RELABELTO
+FILESYSTEM__TRANSITION = _selinux.FILESYSTEM__TRANSITION
+FILESYSTEM__ASSOCIATE = _selinux.FILESYSTEM__ASSOCIATE
+FILESYSTEM__QUOTAMOD = _selinux.FILESYSTEM__QUOTAMOD
+FILESYSTEM__QUOTAGET = _selinux.FILESYSTEM__QUOTAGET
+DIR__IOCTL = _selinux.DIR__IOCTL
+DIR__READ = _selinux.DIR__READ
+DIR__WRITE = _selinux.DIR__WRITE
+DIR__CREATE = _selinux.DIR__CREATE
+DIR__GETATTR = _selinux.DIR__GETATTR
+DIR__SETATTR = _selinux.DIR__SETATTR
+DIR__LOCK = _selinux.DIR__LOCK
+DIR__RELABELFROM = _selinux.DIR__RELABELFROM
+DIR__RELABELTO = _selinux.DIR__RELABELTO
+DIR__APPEND = _selinux.DIR__APPEND
+DIR__UNLINK = _selinux.DIR__UNLINK
+DIR__LINK = _selinux.DIR__LINK
+DIR__RENAME = _selinux.DIR__RENAME
+DIR__EXECUTE = _selinux.DIR__EXECUTE
+DIR__SWAPON = _selinux.DIR__SWAPON
+DIR__QUOTAON = _selinux.DIR__QUOTAON
+DIR__MOUNTON = _selinux.DIR__MOUNTON
+DIR__ADD_NAME = _selinux.DIR__ADD_NAME
+DIR__REMOVE_NAME = _selinux.DIR__REMOVE_NAME
+DIR__REPARENT = _selinux.DIR__REPARENT
+DIR__SEARCH = _selinux.DIR__SEARCH
+DIR__RMDIR = _selinux.DIR__RMDIR
+DIR__OPEN = _selinux.DIR__OPEN
+FILE__IOCTL = _selinux.FILE__IOCTL
+FILE__READ = _selinux.FILE__READ
+FILE__WRITE = _selinux.FILE__WRITE
+FILE__CREATE = _selinux.FILE__CREATE
+FILE__GETATTR = _selinux.FILE__GETATTR
+FILE__SETATTR = _selinux.FILE__SETATTR
+FILE__LOCK = _selinux.FILE__LOCK
+FILE__RELABELFROM = _selinux.FILE__RELABELFROM
+FILE__RELABELTO = _selinux.FILE__RELABELTO
+FILE__APPEND = _selinux.FILE__APPEND
+FILE__UNLINK = _selinux.FILE__UNLINK
+FILE__LINK = _selinux.FILE__LINK
+FILE__RENAME = _selinux.FILE__RENAME
+FILE__EXECUTE = _selinux.FILE__EXECUTE
+FILE__SWAPON = _selinux.FILE__SWAPON
+FILE__QUOTAON = _selinux.FILE__QUOTAON
+FILE__MOUNTON = _selinux.FILE__MOUNTON
+FILE__EXECUTE_NO_TRANS = _selinux.FILE__EXECUTE_NO_TRANS
+FILE__ENTRYPOINT = _selinux.FILE__ENTRYPOINT
+FILE__EXECMOD = _selinux.FILE__EXECMOD
+FILE__OPEN = _selinux.FILE__OPEN
+LNK_FILE__IOCTL = _selinux.LNK_FILE__IOCTL
+LNK_FILE__READ = _selinux.LNK_FILE__READ
+LNK_FILE__WRITE = _selinux.LNK_FILE__WRITE
+LNK_FILE__CREATE = _selinux.LNK_FILE__CREATE
+LNK_FILE__GETATTR = _selinux.LNK_FILE__GETATTR
+LNK_FILE__SETATTR = _selinux.LNK_FILE__SETATTR
+LNK_FILE__LOCK = _selinux.LNK_FILE__LOCK
+LNK_FILE__RELABELFROM = _selinux.LNK_FILE__RELABELFROM
+LNK_FILE__RELABELTO = _selinux.LNK_FILE__RELABELTO
+LNK_FILE__APPEND = _selinux.LNK_FILE__APPEND
+LNK_FILE__UNLINK = _selinux.LNK_FILE__UNLINK
+LNK_FILE__LINK = _selinux.LNK_FILE__LINK
+LNK_FILE__RENAME = _selinux.LNK_FILE__RENAME
+LNK_FILE__EXECUTE = _selinux.LNK_FILE__EXECUTE
+LNK_FILE__SWAPON = _selinux.LNK_FILE__SWAPON
+LNK_FILE__QUOTAON = _selinux.LNK_FILE__QUOTAON
+LNK_FILE__MOUNTON = _selinux.LNK_FILE__MOUNTON
+CHR_FILE__IOCTL = _selinux.CHR_FILE__IOCTL
+CHR_FILE__READ = _selinux.CHR_FILE__READ
+CHR_FILE__WRITE = _selinux.CHR_FILE__WRITE
+CHR_FILE__CREATE = _selinux.CHR_FILE__CREATE
+CHR_FILE__GETATTR = _selinux.CHR_FILE__GETATTR
+CHR_FILE__SETATTR = _selinux.CHR_FILE__SETATTR
+CHR_FILE__LOCK = _selinux.CHR_FILE__LOCK
+CHR_FILE__RELABELFROM = _selinux.CHR_FILE__RELABELFROM
+CHR_FILE__RELABELTO = _selinux.CHR_FILE__RELABELTO
+CHR_FILE__APPEND = _selinux.CHR_FILE__APPEND
+CHR_FILE__UNLINK = _selinux.CHR_FILE__UNLINK
+CHR_FILE__LINK = _selinux.CHR_FILE__LINK
+CHR_FILE__RENAME = _selinux.CHR_FILE__RENAME
+CHR_FILE__EXECUTE = _selinux.CHR_FILE__EXECUTE
+CHR_FILE__SWAPON = _selinux.CHR_FILE__SWAPON
+CHR_FILE__QUOTAON = _selinux.CHR_FILE__QUOTAON
+CHR_FILE__MOUNTON = _selinux.CHR_FILE__MOUNTON
+CHR_FILE__EXECUTE_NO_TRANS = _selinux.CHR_FILE__EXECUTE_NO_TRANS
+CHR_FILE__ENTRYPOINT = _selinux.CHR_FILE__ENTRYPOINT
+CHR_FILE__EXECMOD = _selinux.CHR_FILE__EXECMOD
+CHR_FILE__OPEN = _selinux.CHR_FILE__OPEN
+BLK_FILE__IOCTL = _selinux.BLK_FILE__IOCTL
+BLK_FILE__READ = _selinux.BLK_FILE__READ
+BLK_FILE__WRITE = _selinux.BLK_FILE__WRITE
+BLK_FILE__CREATE = _selinux.BLK_FILE__CREATE
+BLK_FILE__GETATTR = _selinux.BLK_FILE__GETATTR
+BLK_FILE__SETATTR = _selinux.BLK_FILE__SETATTR
+BLK_FILE__LOCK = _selinux.BLK_FILE__LOCK
+BLK_FILE__RELABELFROM = _selinux.BLK_FILE__RELABELFROM
+BLK_FILE__RELABELTO = _selinux.BLK_FILE__RELABELTO
+BLK_FILE__APPEND = _selinux.BLK_FILE__APPEND
+BLK_FILE__UNLINK = _selinux.BLK_FILE__UNLINK
+BLK_FILE__LINK = _selinux.BLK_FILE__LINK
+BLK_FILE__RENAME = _selinux.BLK_FILE__RENAME
+BLK_FILE__EXECUTE = _selinux.BLK_FILE__EXECUTE
+BLK_FILE__SWAPON = _selinux.BLK_FILE__SWAPON
+BLK_FILE__QUOTAON = _selinux.BLK_FILE__QUOTAON
+BLK_FILE__MOUNTON = _selinux.BLK_FILE__MOUNTON
+BLK_FILE__OPEN = _selinux.BLK_FILE__OPEN
+SOCK_FILE__IOCTL = _selinux.SOCK_FILE__IOCTL
+SOCK_FILE__READ = _selinux.SOCK_FILE__READ
+SOCK_FILE__WRITE = _selinux.SOCK_FILE__WRITE
+SOCK_FILE__CREATE = _selinux.SOCK_FILE__CREATE
+SOCK_FILE__GETATTR = _selinux.SOCK_FILE__GETATTR
+SOCK_FILE__SETATTR = _selinux.SOCK_FILE__SETATTR
+SOCK_FILE__LOCK = _selinux.SOCK_FILE__LOCK
+SOCK_FILE__RELABELFROM = _selinux.SOCK_FILE__RELABELFROM
+SOCK_FILE__RELABELTO = _selinux.SOCK_FILE__RELABELTO
+SOCK_FILE__APPEND = _selinux.SOCK_FILE__APPEND
+SOCK_FILE__UNLINK = _selinux.SOCK_FILE__UNLINK
+SOCK_FILE__LINK = _selinux.SOCK_FILE__LINK
+SOCK_FILE__RENAME = _selinux.SOCK_FILE__RENAME
+SOCK_FILE__EXECUTE = _selinux.SOCK_FILE__EXECUTE
+SOCK_FILE__SWAPON = _selinux.SOCK_FILE__SWAPON
+SOCK_FILE__QUOTAON = _selinux.SOCK_FILE__QUOTAON
+SOCK_FILE__MOUNTON = _selinux.SOCK_FILE__MOUNTON
+FIFO_FILE__IOCTL = _selinux.FIFO_FILE__IOCTL
+FIFO_FILE__READ = _selinux.FIFO_FILE__READ
+FIFO_FILE__WRITE = _selinux.FIFO_FILE__WRITE
+FIFO_FILE__CREATE = _selinux.FIFO_FILE__CREATE
+FIFO_FILE__GETATTR = _selinux.FIFO_FILE__GETATTR
+FIFO_FILE__SETATTR = _selinux.FIFO_FILE__SETATTR
+FIFO_FILE__LOCK = _selinux.FIFO_FILE__LOCK
+FIFO_FILE__RELABELFROM = _selinux.FIFO_FILE__RELABELFROM
+FIFO_FILE__RELABELTO = _selinux.FIFO_FILE__RELABELTO
+FIFO_FILE__APPEND = _selinux.FIFO_FILE__APPEND
+FIFO_FILE__UNLINK = _selinux.FIFO_FILE__UNLINK
+FIFO_FILE__LINK = _selinux.FIFO_FILE__LINK
+FIFO_FILE__RENAME = _selinux.FIFO_FILE__RENAME
+FIFO_FILE__EXECUTE = _selinux.FIFO_FILE__EXECUTE
+FIFO_FILE__SWAPON = _selinux.FIFO_FILE__SWAPON
+FIFO_FILE__QUOTAON = _selinux.FIFO_FILE__QUOTAON
+FIFO_FILE__MOUNTON = _selinux.FIFO_FILE__MOUNTON
+FIFO_FILE__OPEN = _selinux.FIFO_FILE__OPEN
+FD__USE = _selinux.FD__USE
+SOCKET__IOCTL = _selinux.SOCKET__IOCTL
+SOCKET__READ = _selinux.SOCKET__READ
+SOCKET__WRITE = _selinux.SOCKET__WRITE
+SOCKET__CREATE = _selinux.SOCKET__CREATE
+SOCKET__GETATTR = _selinux.SOCKET__GETATTR
+SOCKET__SETATTR = _selinux.SOCKET__SETATTR
+SOCKET__LOCK = _selinux.SOCKET__LOCK
+SOCKET__RELABELFROM = _selinux.SOCKET__RELABELFROM
+SOCKET__RELABELTO = _selinux.SOCKET__RELABELTO
+SOCKET__APPEND = _selinux.SOCKET__APPEND
+SOCKET__BIND = _selinux.SOCKET__BIND
+SOCKET__CONNECT = _selinux.SOCKET__CONNECT
+SOCKET__LISTEN = _selinux.SOCKET__LISTEN
+SOCKET__ACCEPT = _selinux.SOCKET__ACCEPT
+SOCKET__GETOPT = _selinux.SOCKET__GETOPT
+SOCKET__SETOPT = _selinux.SOCKET__SETOPT
+SOCKET__SHUTDOWN = _selinux.SOCKET__SHUTDOWN
+SOCKET__RECVFROM = _selinux.SOCKET__RECVFROM
+SOCKET__SENDTO = _selinux.SOCKET__SENDTO
+SOCKET__RECV_MSG = _selinux.SOCKET__RECV_MSG
+SOCKET__SEND_MSG = _selinux.SOCKET__SEND_MSG
+SOCKET__NAME_BIND = _selinux.SOCKET__NAME_BIND
+TCP_SOCKET__IOCTL = _selinux.TCP_SOCKET__IOCTL
+TCP_SOCKET__READ = _selinux.TCP_SOCKET__READ
+TCP_SOCKET__WRITE = _selinux.TCP_SOCKET__WRITE
+TCP_SOCKET__CREATE = _selinux.TCP_SOCKET__CREATE
+TCP_SOCKET__GETATTR = _selinux.TCP_SOCKET__GETATTR
+TCP_SOCKET__SETATTR = _selinux.TCP_SOCKET__SETATTR
+TCP_SOCKET__LOCK = _selinux.TCP_SOCKET__LOCK
+TCP_SOCKET__RELABELFROM = _selinux.TCP_SOCKET__RELABELFROM
+TCP_SOCKET__RELABELTO = _selinux.TCP_SOCKET__RELABELTO
+TCP_SOCKET__APPEND = _selinux.TCP_SOCKET__APPEND
+TCP_SOCKET__BIND = _selinux.TCP_SOCKET__BIND
+TCP_SOCKET__CONNECT = _selinux.TCP_SOCKET__CONNECT
+TCP_SOCKET__LISTEN = _selinux.TCP_SOCKET__LISTEN
+TCP_SOCKET__ACCEPT = _selinux.TCP_SOCKET__ACCEPT
+TCP_SOCKET__GETOPT = _selinux.TCP_SOCKET__GETOPT
+TCP_SOCKET__SETOPT = _selinux.TCP_SOCKET__SETOPT
+TCP_SOCKET__SHUTDOWN = _selinux.TCP_SOCKET__SHUTDOWN
+TCP_SOCKET__RECVFROM = _selinux.TCP_SOCKET__RECVFROM
+TCP_SOCKET__SENDTO = _selinux.TCP_SOCKET__SENDTO
+TCP_SOCKET__RECV_MSG = _selinux.TCP_SOCKET__RECV_MSG
+TCP_SOCKET__SEND_MSG = _selinux.TCP_SOCKET__SEND_MSG
+TCP_SOCKET__NAME_BIND = _selinux.TCP_SOCKET__NAME_BIND
+TCP_SOCKET__CONNECTTO = _selinux.TCP_SOCKET__CONNECTTO
+TCP_SOCKET__NEWCONN = _selinux.TCP_SOCKET__NEWCONN
+TCP_SOCKET__ACCEPTFROM = _selinux.TCP_SOCKET__ACCEPTFROM
+TCP_SOCKET__NODE_BIND = _selinux.TCP_SOCKET__NODE_BIND
+TCP_SOCKET__NAME_CONNECT = _selinux.TCP_SOCKET__NAME_CONNECT
+UDP_SOCKET__IOCTL = _selinux.UDP_SOCKET__IOCTL
+UDP_SOCKET__READ = _selinux.UDP_SOCKET__READ
+UDP_SOCKET__WRITE = _selinux.UDP_SOCKET__WRITE
+UDP_SOCKET__CREATE = _selinux.UDP_SOCKET__CREATE
+UDP_SOCKET__GETATTR = _selinux.UDP_SOCKET__GETATTR
+UDP_SOCKET__SETATTR = _selinux.UDP_SOCKET__SETATTR
+UDP_SOCKET__LOCK = _selinux.UDP_SOCKET__LOCK
+UDP_SOCKET__RELABELFROM = _selinux.UDP_SOCKET__RELABELFROM
+UDP_SOCKET__RELABELTO = _selinux.UDP_SOCKET__RELABELTO
+UDP_SOCKET__APPEND = _selinux.UDP_SOCKET__APPEND
+UDP_SOCKET__BIND = _selinux.UDP_SOCKET__BIND
+UDP_SOCKET__CONNECT = _selinux.UDP_SOCKET__CONNECT
+UDP_SOCKET__LISTEN = _selinux.UDP_SOCKET__LISTEN
+UDP_SOCKET__ACCEPT = _selinux.UDP_SOCKET__ACCEPT
+UDP_SOCKET__GETOPT = _selinux.UDP_SOCKET__GETOPT
+UDP_SOCKET__SETOPT = _selinux.UDP_SOCKET__SETOPT
+UDP_SOCKET__SHUTDOWN = _selinux.UDP_SOCKET__SHUTDOWN
+UDP_SOCKET__RECVFROM = _selinux.UDP_SOCKET__RECVFROM
+UDP_SOCKET__SENDTO = _selinux.UDP_SOCKET__SENDTO
+UDP_SOCKET__RECV_MSG = _selinux.UDP_SOCKET__RECV_MSG
+UDP_SOCKET__SEND_MSG = _selinux.UDP_SOCKET__SEND_MSG
+UDP_SOCKET__NAME_BIND = _selinux.UDP_SOCKET__NAME_BIND
+UDP_SOCKET__NODE_BIND = _selinux.UDP_SOCKET__NODE_BIND
+RAWIP_SOCKET__IOCTL = _selinux.RAWIP_SOCKET__IOCTL
+RAWIP_SOCKET__READ = _selinux.RAWIP_SOCKET__READ
+RAWIP_SOCKET__WRITE = _selinux.RAWIP_SOCKET__WRITE
+RAWIP_SOCKET__CREATE = _selinux.RAWIP_SOCKET__CREATE
+RAWIP_SOCKET__GETATTR = _selinux.RAWIP_SOCKET__GETATTR
+RAWIP_SOCKET__SETATTR = _selinux.RAWIP_SOCKET__SETATTR
+RAWIP_SOCKET__LOCK = _selinux.RAWIP_SOCKET__LOCK
+RAWIP_SOCKET__RELABELFROM = _selinux.RAWIP_SOCKET__RELABELFROM
+RAWIP_SOCKET__RELABELTO = _selinux.RAWIP_SOCKET__RELABELTO
+RAWIP_SOCKET__APPEND = _selinux.RAWIP_SOCKET__APPEND
+RAWIP_SOCKET__BIND = _selinux.RAWIP_SOCKET__BIND
+RAWIP_SOCKET__CONNECT = _selinux.RAWIP_SOCKET__CONNECT
+RAWIP_SOCKET__LISTEN = _selinux.RAWIP_SOCKET__LISTEN
+RAWIP_SOCKET__ACCEPT = _selinux.RAWIP_SOCKET__ACCEPT
+RAWIP_SOCKET__GETOPT = _selinux.RAWIP_SOCKET__GETOPT
+RAWIP_SOCKET__SETOPT = _selinux.RAWIP_SOCKET__SETOPT
+RAWIP_SOCKET__SHUTDOWN = _selinux.RAWIP_SOCKET__SHUTDOWN
+RAWIP_SOCKET__RECVFROM = _selinux.RAWIP_SOCKET__RECVFROM
+RAWIP_SOCKET__SENDTO = _selinux.RAWIP_SOCKET__SENDTO
+RAWIP_SOCKET__RECV_MSG = _selinux.RAWIP_SOCKET__RECV_MSG
+RAWIP_SOCKET__SEND_MSG = _selinux.RAWIP_SOCKET__SEND_MSG
+RAWIP_SOCKET__NAME_BIND = _selinux.RAWIP_SOCKET__NAME_BIND
+RAWIP_SOCKET__NODE_BIND = _selinux.RAWIP_SOCKET__NODE_BIND
+NODE__TCP_RECV = _selinux.NODE__TCP_RECV
+NODE__TCP_SEND = _selinux.NODE__TCP_SEND
+NODE__UDP_RECV = _selinux.NODE__UDP_RECV
+NODE__UDP_SEND = _selinux.NODE__UDP_SEND
+NODE__RAWIP_RECV = _selinux.NODE__RAWIP_RECV
+NODE__RAWIP_SEND = _selinux.NODE__RAWIP_SEND
+NODE__ENFORCE_DEST = _selinux.NODE__ENFORCE_DEST
+NODE__DCCP_RECV = _selinux.NODE__DCCP_RECV
+NODE__DCCP_SEND = _selinux.NODE__DCCP_SEND
+NODE__RECVFROM = _selinux.NODE__RECVFROM
+NODE__SENDTO = _selinux.NODE__SENDTO
+NETIF__TCP_RECV = _selinux.NETIF__TCP_RECV
+NETIF__TCP_SEND = _selinux.NETIF__TCP_SEND
+NETIF__UDP_RECV = _selinux.NETIF__UDP_RECV
+NETIF__UDP_SEND = _selinux.NETIF__UDP_SEND
+NETIF__RAWIP_RECV = _selinux.NETIF__RAWIP_RECV
+NETIF__RAWIP_SEND = _selinux.NETIF__RAWIP_SEND
+NETIF__DCCP_RECV = _selinux.NETIF__DCCP_RECV
+NETIF__DCCP_SEND = _selinux.NETIF__DCCP_SEND
+NETIF__INGRESS = _selinux.NETIF__INGRESS
+NETIF__EGRESS = _selinux.NETIF__EGRESS
+NETLINK_SOCKET__IOCTL = _selinux.NETLINK_SOCKET__IOCTL
+NETLINK_SOCKET__READ = _selinux.NETLINK_SOCKET__READ
+NETLINK_SOCKET__WRITE = _selinux.NETLINK_SOCKET__WRITE
+NETLINK_SOCKET__CREATE = _selinux.NETLINK_SOCKET__CREATE
+NETLINK_SOCKET__GETATTR = _selinux.NETLINK_SOCKET__GETATTR
+NETLINK_SOCKET__SETATTR = _selinux.NETLINK_SOCKET__SETATTR
+NETLINK_SOCKET__LOCK = _selinux.NETLINK_SOCKET__LOCK
+NETLINK_SOCKET__RELABELFROM = _selinux.NETLINK_SOCKET__RELABELFROM
+NETLINK_SOCKET__RELABELTO = _selinux.NETLINK_SOCKET__RELABELTO
+NETLINK_SOCKET__APPEND = _selinux.NETLINK_SOCKET__APPEND
+NETLINK_SOCKET__BIND = _selinux.NETLINK_SOCKET__BIND
+NETLINK_SOCKET__CONNECT = _selinux.NETLINK_SOCKET__CONNECT
+NETLINK_SOCKET__LISTEN = _selinux.NETLINK_SOCKET__LISTEN
+NETLINK_SOCKET__ACCEPT = _selinux.NETLINK_SOCKET__ACCEPT
+NETLINK_SOCKET__GETOPT = _selinux.NETLINK_SOCKET__GETOPT
+NETLINK_SOCKET__SETOPT = _selinux.NETLINK_SOCKET__SETOPT
+NETLINK_SOCKET__SHUTDOWN = _selinux.NETLINK_SOCKET__SHUTDOWN
+NETLINK_SOCKET__RECVFROM = _selinux.NETLINK_SOCKET__RECVFROM
+NETLINK_SOCKET__SENDTO = _selinux.NETLINK_SOCKET__SENDTO
+NETLINK_SOCKET__RECV_MSG = _selinux.NETLINK_SOCKET__RECV_MSG
+NETLINK_SOCKET__SEND_MSG = _selinux.NETLINK_SOCKET__SEND_MSG
+NETLINK_SOCKET__NAME_BIND = _selinux.NETLINK_SOCKET__NAME_BIND
+PACKET_SOCKET__IOCTL = _selinux.PACKET_SOCKET__IOCTL
+PACKET_SOCKET__READ = _selinux.PACKET_SOCKET__READ
+PACKET_SOCKET__WRITE = _selinux.PACKET_SOCKET__WRITE
+PACKET_SOCKET__CREATE = _selinux.PACKET_SOCKET__CREATE
+PACKET_SOCKET__GETATTR = _selinux.PACKET_SOCKET__GETATTR
+PACKET_SOCKET__SETATTR = _selinux.PACKET_SOCKET__SETATTR
+PACKET_SOCKET__LOCK = _selinux.PACKET_SOCKET__LOCK
+PACKET_SOCKET__RELABELFROM = _selinux.PACKET_SOCKET__RELABELFROM
+PACKET_SOCKET__RELABELTO = _selinux.PACKET_SOCKET__RELABELTO
+PACKET_SOCKET__APPEND = _selinux.PACKET_SOCKET__APPEND
+PACKET_SOCKET__BIND = _selinux.PACKET_SOCKET__BIND
+PACKET_SOCKET__CONNECT = _selinux.PACKET_SOCKET__CONNECT
+PACKET_SOCKET__LISTEN = _selinux.PACKET_SOCKET__LISTEN
+PACKET_SOCKET__ACCEPT = _selinux.PACKET_SOCKET__ACCEPT
+PACKET_SOCKET__GETOPT = _selinux.PACKET_SOCKET__GETOPT
+PACKET_SOCKET__SETOPT = _selinux.PACKET_SOCKET__SETOPT
+PACKET_SOCKET__SHUTDOWN = _selinux.PACKET_SOCKET__SHUTDOWN
+PACKET_SOCKET__RECVFROM = _selinux.PACKET_SOCKET__RECVFROM
+PACKET_SOCKET__SENDTO = _selinux.PACKET_SOCKET__SENDTO
+PACKET_SOCKET__RECV_MSG = _selinux.PACKET_SOCKET__RECV_MSG
+PACKET_SOCKET__SEND_MSG = _selinux.PACKET_SOCKET__SEND_MSG
+PACKET_SOCKET__NAME_BIND = _selinux.PACKET_SOCKET__NAME_BIND
+KEY_SOCKET__IOCTL = _selinux.KEY_SOCKET__IOCTL
+KEY_SOCKET__READ = _selinux.KEY_SOCKET__READ
+KEY_SOCKET__WRITE = _selinux.KEY_SOCKET__WRITE
+KEY_SOCKET__CREATE = _selinux.KEY_SOCKET__CREATE
+KEY_SOCKET__GETATTR = _selinux.KEY_SOCKET__GETATTR
+KEY_SOCKET__SETATTR = _selinux.KEY_SOCKET__SETATTR
+KEY_SOCKET__LOCK = _selinux.KEY_SOCKET__LOCK
+KEY_SOCKET__RELABELFROM = _selinux.KEY_SOCKET__RELABELFROM
+KEY_SOCKET__RELABELTO = _selinux.KEY_SOCKET__RELABELTO
+KEY_SOCKET__APPEND = _selinux.KEY_SOCKET__APPEND
+KEY_SOCKET__BIND = _selinux.KEY_SOCKET__BIND
+KEY_SOCKET__CONNECT = _selinux.KEY_SOCKET__CONNECT
+KEY_SOCKET__LISTEN = _selinux.KEY_SOCKET__LISTEN
+KEY_SOCKET__ACCEPT = _selinux.KEY_SOCKET__ACCEPT
+KEY_SOCKET__GETOPT = _selinux.KEY_SOCKET__GETOPT
+KEY_SOCKET__SETOPT = _selinux.KEY_SOCKET__SETOPT
+KEY_SOCKET__SHUTDOWN = _selinux.KEY_SOCKET__SHUTDOWN
+KEY_SOCKET__RECVFROM = _selinux.KEY_SOCKET__RECVFROM
+KEY_SOCKET__SENDTO = _selinux.KEY_SOCKET__SENDTO
+KEY_SOCKET__RECV_MSG = _selinux.KEY_SOCKET__RECV_MSG
+KEY_SOCKET__SEND_MSG = _selinux.KEY_SOCKET__SEND_MSG
+KEY_SOCKET__NAME_BIND = _selinux.KEY_SOCKET__NAME_BIND
+UNIX_STREAM_SOCKET__IOCTL = _selinux.UNIX_STREAM_SOCKET__IOCTL
+UNIX_STREAM_SOCKET__READ = _selinux.UNIX_STREAM_SOCKET__READ
+UNIX_STREAM_SOCKET__WRITE = _selinux.UNIX_STREAM_SOCKET__WRITE
+UNIX_STREAM_SOCKET__CREATE = _selinux.UNIX_STREAM_SOCKET__CREATE
+UNIX_STREAM_SOCKET__GETATTR = _selinux.UNIX_STREAM_SOCKET__GETATTR
+UNIX_STREAM_SOCKET__SETATTR = _selinux.UNIX_STREAM_SOCKET__SETATTR
+UNIX_STREAM_SOCKET__LOCK = _selinux.UNIX_STREAM_SOCKET__LOCK
+UNIX_STREAM_SOCKET__RELABELFROM = _selinux.UNIX_STREAM_SOCKET__RELABELFROM
+UNIX_STREAM_SOCKET__RELABELTO = _selinux.UNIX_STREAM_SOCKET__RELABELTO
+UNIX_STREAM_SOCKET__APPEND = _selinux.UNIX_STREAM_SOCKET__APPEND
+UNIX_STREAM_SOCKET__BIND = _selinux.UNIX_STREAM_SOCKET__BIND
+UNIX_STREAM_SOCKET__CONNECT = _selinux.UNIX_STREAM_SOCKET__CONNECT
+UNIX_STREAM_SOCKET__LISTEN = _selinux.UNIX_STREAM_SOCKET__LISTEN
+UNIX_STREAM_SOCKET__ACCEPT = _selinux.UNIX_STREAM_SOCKET__ACCEPT
+UNIX_STREAM_SOCKET__GETOPT = _selinux.UNIX_STREAM_SOCKET__GETOPT
+UNIX_STREAM_SOCKET__SETOPT = _selinux.UNIX_STREAM_SOCKET__SETOPT
+UNIX_STREAM_SOCKET__SHUTDOWN = _selinux.UNIX_STREAM_SOCKET__SHUTDOWN
+UNIX_STREAM_SOCKET__RECVFROM = _selinux.UNIX_STREAM_SOCKET__RECVFROM
+UNIX_STREAM_SOCKET__SENDTO = _selinux.UNIX_STREAM_SOCKET__SENDTO
+UNIX_STREAM_SOCKET__RECV_MSG = _selinux.UNIX_STREAM_SOCKET__RECV_MSG
+UNIX_STREAM_SOCKET__SEND_MSG = _selinux.UNIX_STREAM_SOCKET__SEND_MSG
+UNIX_STREAM_SOCKET__NAME_BIND = _selinux.UNIX_STREAM_SOCKET__NAME_BIND
+UNIX_STREAM_SOCKET__CONNECTTO = _selinux.UNIX_STREAM_SOCKET__CONNECTTO
+UNIX_STREAM_SOCKET__NEWCONN = _selinux.UNIX_STREAM_SOCKET__NEWCONN
+UNIX_STREAM_SOCKET__ACCEPTFROM = _selinux.UNIX_STREAM_SOCKET__ACCEPTFROM
+UNIX_DGRAM_SOCKET__IOCTL = _selinux.UNIX_DGRAM_SOCKET__IOCTL
+UNIX_DGRAM_SOCKET__READ = _selinux.UNIX_DGRAM_SOCKET__READ
+UNIX_DGRAM_SOCKET__WRITE = _selinux.UNIX_DGRAM_SOCKET__WRITE
+UNIX_DGRAM_SOCKET__CREATE = _selinux.UNIX_DGRAM_SOCKET__CREATE
+UNIX_DGRAM_SOCKET__GETATTR = _selinux.UNIX_DGRAM_SOCKET__GETATTR
+UNIX_DGRAM_SOCKET__SETATTR = _selinux.UNIX_DGRAM_SOCKET__SETATTR
+UNIX_DGRAM_SOCKET__LOCK = _selinux.UNIX_DGRAM_SOCKET__LOCK
+UNIX_DGRAM_SOCKET__RELABELFROM = _selinux.UNIX_DGRAM_SOCKET__RELABELFROM
+UNIX_DGRAM_SOCKET__RELABELTO = _selinux.UNIX_DGRAM_SOCKET__RELABELTO
+UNIX_DGRAM_SOCKET__APPEND = _selinux.UNIX_DGRAM_SOCKET__APPEND
+UNIX_DGRAM_SOCKET__BIND = _selinux.UNIX_DGRAM_SOCKET__BIND
+UNIX_DGRAM_SOCKET__CONNECT = _selinux.UNIX_DGRAM_SOCKET__CONNECT
+UNIX_DGRAM_SOCKET__LISTEN = _selinux.UNIX_DGRAM_SOCKET__LISTEN
+UNIX_DGRAM_SOCKET__ACCEPT = _selinux.UNIX_DGRAM_SOCKET__ACCEPT
+UNIX_DGRAM_SOCKET__GETOPT = _selinux.UNIX_DGRAM_SOCKET__GETOPT
+UNIX_DGRAM_SOCKET__SETOPT = _selinux.UNIX_DGRAM_SOCKET__SETOPT
+UNIX_DGRAM_SOCKET__SHUTDOWN = _selinux.UNIX_DGRAM_SOCKET__SHUTDOWN
+UNIX_DGRAM_SOCKET__RECVFROM = _selinux.UNIX_DGRAM_SOCKET__RECVFROM
+UNIX_DGRAM_SOCKET__SENDTO = _selinux.UNIX_DGRAM_SOCKET__SENDTO
+UNIX_DGRAM_SOCKET__RECV_MSG = _selinux.UNIX_DGRAM_SOCKET__RECV_MSG
+UNIX_DGRAM_SOCKET__SEND_MSG = _selinux.UNIX_DGRAM_SOCKET__SEND_MSG
+UNIX_DGRAM_SOCKET__NAME_BIND = _selinux.UNIX_DGRAM_SOCKET__NAME_BIND
+PROCESS__FORK = _selinux.PROCESS__FORK
+PROCESS__TRANSITION = _selinux.PROCESS__TRANSITION
+PROCESS__SIGCHLD = _selinux.PROCESS__SIGCHLD
+PROCESS__SIGKILL = _selinux.PROCESS__SIGKILL
+PROCESS__SIGSTOP = _selinux.PROCESS__SIGSTOP
+PROCESS__SIGNULL = _selinux.PROCESS__SIGNULL
+PROCESS__SIGNAL = _selinux.PROCESS__SIGNAL
+PROCESS__PTRACE = _selinux.PROCESS__PTRACE
+PROCESS__GETSCHED = _selinux.PROCESS__GETSCHED
+PROCESS__SETSCHED = _selinux.PROCESS__SETSCHED
+PROCESS__GETSESSION = _selinux.PROCESS__GETSESSION
+PROCESS__GETPGID = _selinux.PROCESS__GETPGID
+PROCESS__SETPGID = _selinux.PROCESS__SETPGID
+PROCESS__GETCAP = _selinux.PROCESS__GETCAP
+PROCESS__SETCAP = _selinux.PROCESS__SETCAP
+PROCESS__SHARE = _selinux.PROCESS__SHARE
+PROCESS__GETATTR = _selinux.PROCESS__GETATTR
+PROCESS__SETEXEC = _selinux.PROCESS__SETEXEC
+PROCESS__SETFSCREATE = _selinux.PROCESS__SETFSCREATE
+PROCESS__NOATSECURE = _selinux.PROCESS__NOATSECURE
+PROCESS__SIGINH = _selinux.PROCESS__SIGINH
+PROCESS__SETRLIMIT = _selinux.PROCESS__SETRLIMIT
+PROCESS__RLIMITINH = _selinux.PROCESS__RLIMITINH
+PROCESS__DYNTRANSITION = _selinux.PROCESS__DYNTRANSITION
+PROCESS__SETCURRENT = _selinux.PROCESS__SETCURRENT
+PROCESS__EXECMEM = _selinux.PROCESS__EXECMEM
+PROCESS__EXECSTACK = _selinux.PROCESS__EXECSTACK
+PROCESS__EXECHEAP = _selinux.PROCESS__EXECHEAP
+PROCESS__SETKEYCREATE = _selinux.PROCESS__SETKEYCREATE
+PROCESS__SETSOCKCREATE = _selinux.PROCESS__SETSOCKCREATE
+IPC__CREATE = _selinux.IPC__CREATE
+IPC__DESTROY = _selinux.IPC__DESTROY
+IPC__GETATTR = _selinux.IPC__GETATTR
+IPC__SETATTR = _selinux.IPC__SETATTR
+IPC__READ = _selinux.IPC__READ
+IPC__WRITE = _selinux.IPC__WRITE
+IPC__ASSOCIATE = _selinux.IPC__ASSOCIATE
+IPC__UNIX_READ = _selinux.IPC__UNIX_READ
+IPC__UNIX_WRITE = _selinux.IPC__UNIX_WRITE
+SEM__CREATE = _selinux.SEM__CREATE
+SEM__DESTROY = _selinux.SEM__DESTROY
+SEM__GETATTR = _selinux.SEM__GETATTR
+SEM__SETATTR = _selinux.SEM__SETATTR
+SEM__READ = _selinux.SEM__READ
+SEM__WRITE = _selinux.SEM__WRITE
+SEM__ASSOCIATE = _selinux.SEM__ASSOCIATE
+SEM__UNIX_READ = _selinux.SEM__UNIX_READ
+SEM__UNIX_WRITE = _selinux.SEM__UNIX_WRITE
+MSGQ__CREATE = _selinux.MSGQ__CREATE
+MSGQ__DESTROY = _selinux.MSGQ__DESTROY
+MSGQ__GETATTR = _selinux.MSGQ__GETATTR
+MSGQ__SETATTR = _selinux.MSGQ__SETATTR
+MSGQ__READ = _selinux.MSGQ__READ
+MSGQ__WRITE = _selinux.MSGQ__WRITE
+MSGQ__ASSOCIATE = _selinux.MSGQ__ASSOCIATE
+MSGQ__UNIX_READ = _selinux.MSGQ__UNIX_READ
+MSGQ__UNIX_WRITE = _selinux.MSGQ__UNIX_WRITE
+MSGQ__ENQUEUE = _selinux.MSGQ__ENQUEUE
+MSG__SEND = _selinux.MSG__SEND
+MSG__RECEIVE = _selinux.MSG__RECEIVE
+SHM__CREATE = _selinux.SHM__CREATE
+SHM__DESTROY = _selinux.SHM__DESTROY
+SHM__GETATTR = _selinux.SHM__GETATTR
+SHM__SETATTR = _selinux.SHM__SETATTR
+SHM__READ = _selinux.SHM__READ
+SHM__WRITE = _selinux.SHM__WRITE
+SHM__ASSOCIATE = _selinux.SHM__ASSOCIATE
+SHM__UNIX_READ = _selinux.SHM__UNIX_READ
+SHM__UNIX_WRITE = _selinux.SHM__UNIX_WRITE
+SHM__LOCK = _selinux.SHM__LOCK
+SECURITY__COMPUTE_AV = _selinux.SECURITY__COMPUTE_AV
+SECURITY__COMPUTE_CREATE = _selinux.SECURITY__COMPUTE_CREATE
+SECURITY__COMPUTE_MEMBER = _selinux.SECURITY__COMPUTE_MEMBER
+SECURITY__CHECK_CONTEXT = _selinux.SECURITY__CHECK_CONTEXT
+SECURITY__LOAD_POLICY = _selinux.SECURITY__LOAD_POLICY
+SECURITY__COMPUTE_RELABEL = _selinux.SECURITY__COMPUTE_RELABEL
+SECURITY__COMPUTE_USER = _selinux.SECURITY__COMPUTE_USER
+SECURITY__SETENFORCE = _selinux.SECURITY__SETENFORCE
+SECURITY__SETBOOL = _selinux.SECURITY__SETBOOL
+SECURITY__SETSECPARAM = _selinux.SECURITY__SETSECPARAM
+SECURITY__SETCHECKREQPROT = _selinux.SECURITY__SETCHECKREQPROT
+SYSTEM__IPC_INFO = _selinux.SYSTEM__IPC_INFO
+SYSTEM__SYSLOG_READ = _selinux.SYSTEM__SYSLOG_READ
+SYSTEM__SYSLOG_MOD = _selinux.SYSTEM__SYSLOG_MOD
+SYSTEM__SYSLOG_CONSOLE = _selinux.SYSTEM__SYSLOG_CONSOLE
+CAPABILITY__CHOWN = _selinux.CAPABILITY__CHOWN
+CAPABILITY__DAC_OVERRIDE = _selinux.CAPABILITY__DAC_OVERRIDE
+CAPABILITY__DAC_READ_SEARCH = _selinux.CAPABILITY__DAC_READ_SEARCH
+CAPABILITY__FOWNER = _selinux.CAPABILITY__FOWNER
+CAPABILITY__FSETID = _selinux.CAPABILITY__FSETID
+CAPABILITY__KILL = _selinux.CAPABILITY__KILL
+CAPABILITY__SETGID = _selinux.CAPABILITY__SETGID
+CAPABILITY__SETUID = _selinux.CAPABILITY__SETUID
+CAPABILITY__SETPCAP = _selinux.CAPABILITY__SETPCAP
+CAPABILITY__LINUX_IMMUTABLE = _selinux.CAPABILITY__LINUX_IMMUTABLE
+CAPABILITY__NET_BIND_SERVICE = _selinux.CAPABILITY__NET_BIND_SERVICE
+CAPABILITY__NET_BROADCAST = _selinux.CAPABILITY__NET_BROADCAST
+CAPABILITY__NET_ADMIN = _selinux.CAPABILITY__NET_ADMIN
+CAPABILITY__NET_RAW = _selinux.CAPABILITY__NET_RAW
+CAPABILITY__IPC_LOCK = _selinux.CAPABILITY__IPC_LOCK
+CAPABILITY__IPC_OWNER = _selinux.CAPABILITY__IPC_OWNER
+CAPABILITY__SYS_MODULE = _selinux.CAPABILITY__SYS_MODULE
+CAPABILITY__SYS_RAWIO = _selinux.CAPABILITY__SYS_RAWIO
+CAPABILITY__SYS_CHROOT = _selinux.CAPABILITY__SYS_CHROOT
+CAPABILITY__SYS_PTRACE = _selinux.CAPABILITY__SYS_PTRACE
+CAPABILITY__SYS_PACCT = _selinux.CAPABILITY__SYS_PACCT
+CAPABILITY__SYS_ADMIN = _selinux.CAPABILITY__SYS_ADMIN
+CAPABILITY__SYS_BOOT = _selinux.CAPABILITY__SYS_BOOT
+CAPABILITY__SYS_NICE = _selinux.CAPABILITY__SYS_NICE
+CAPABILITY__SYS_RESOURCE = _selinux.CAPABILITY__SYS_RESOURCE
+CAPABILITY__SYS_TIME = _selinux.CAPABILITY__SYS_TIME
+CAPABILITY__SYS_TTY_CONFIG = _selinux.CAPABILITY__SYS_TTY_CONFIG
+CAPABILITY__MKNOD = _selinux.CAPABILITY__MKNOD
+CAPABILITY__LEASE = _selinux.CAPABILITY__LEASE
+CAPABILITY__AUDIT_WRITE = _selinux.CAPABILITY__AUDIT_WRITE
+CAPABILITY__AUDIT_CONTROL = _selinux.CAPABILITY__AUDIT_CONTROL
+CAPABILITY__SETFCAP = _selinux.CAPABILITY__SETFCAP
+CAPABILITY2__MAC_OVERRIDE = _selinux.CAPABILITY2__MAC_OVERRIDE
+CAPABILITY2__MAC_ADMIN = _selinux.CAPABILITY2__MAC_ADMIN
+PASSWD__PASSWD = _selinux.PASSWD__PASSWD
+PASSWD__CHFN = _selinux.PASSWD__CHFN
+PASSWD__CHSH = _selinux.PASSWD__CHSH
+PASSWD__ROOTOK = _selinux.PASSWD__ROOTOK
+PASSWD__CRONTAB = _selinux.PASSWD__CRONTAB
+X_DRAWABLE__CREATE = _selinux.X_DRAWABLE__CREATE
+X_DRAWABLE__DESTROY = _selinux.X_DRAWABLE__DESTROY
+X_DRAWABLE__READ = _selinux.X_DRAWABLE__READ
+X_DRAWABLE__WRITE = _selinux.X_DRAWABLE__WRITE
+X_DRAWABLE__BLEND = _selinux.X_DRAWABLE__BLEND
+X_DRAWABLE__GETATTR = _selinux.X_DRAWABLE__GETATTR
+X_DRAWABLE__SETATTR = _selinux.X_DRAWABLE__SETATTR
+X_DRAWABLE__LIST_CHILD = _selinux.X_DRAWABLE__LIST_CHILD
+X_DRAWABLE__ADD_CHILD = _selinux.X_DRAWABLE__ADD_CHILD
+X_DRAWABLE__REMOVE_CHILD = _selinux.X_DRAWABLE__REMOVE_CHILD
+X_DRAWABLE__LIST_PROPERTY = _selinux.X_DRAWABLE__LIST_PROPERTY
+X_DRAWABLE__GET_PROPERTY = _selinux.X_DRAWABLE__GET_PROPERTY
+X_DRAWABLE__SET_PROPERTY = _selinux.X_DRAWABLE__SET_PROPERTY
+X_DRAWABLE__MANAGE = _selinux.X_DRAWABLE__MANAGE
+X_DRAWABLE__OVERRIDE = _selinux.X_DRAWABLE__OVERRIDE
+X_DRAWABLE__SHOW = _selinux.X_DRAWABLE__SHOW
+X_DRAWABLE__HIDE = _selinux.X_DRAWABLE__HIDE
+X_DRAWABLE__SEND = _selinux.X_DRAWABLE__SEND
+X_DRAWABLE__RECEIVE = _selinux.X_DRAWABLE__RECEIVE
+X_SCREEN__GETATTR = _selinux.X_SCREEN__GETATTR
+X_SCREEN__SETATTR = _selinux.X_SCREEN__SETATTR
+X_SCREEN__HIDE_CURSOR = _selinux.X_SCREEN__HIDE_CURSOR
+X_SCREEN__SHOW_CURSOR = _selinux.X_SCREEN__SHOW_CURSOR
+X_SCREEN__SAVER_GETATTR = _selinux.X_SCREEN__SAVER_GETATTR
+X_SCREEN__SAVER_SETATTR = _selinux.X_SCREEN__SAVER_SETATTR
+X_SCREEN__SAVER_HIDE = _selinux.X_SCREEN__SAVER_HIDE
+X_SCREEN__SAVER_SHOW = _selinux.X_SCREEN__SAVER_SHOW
+X_GC__CREATE = _selinux.X_GC__CREATE
+X_GC__DESTROY = _selinux.X_GC__DESTROY
+X_GC__GETATTR = _selinux.X_GC__GETATTR
+X_GC__SETATTR = _selinux.X_GC__SETATTR
+X_GC__USE = _selinux.X_GC__USE
+X_FONT__CREATE = _selinux.X_FONT__CREATE
+X_FONT__DESTROY = _selinux.X_FONT__DESTROY
+X_FONT__GETATTR = _selinux.X_FONT__GETATTR
+X_FONT__ADD_GLYPH = _selinux.X_FONT__ADD_GLYPH
+X_FONT__REMOVE_GLYPH = _selinux.X_FONT__REMOVE_GLYPH
+X_FONT__USE = _selinux.X_FONT__USE
+X_COLORMAP__CREATE = _selinux.X_COLORMAP__CREATE
+X_COLORMAP__DESTROY = _selinux.X_COLORMAP__DESTROY
+X_COLORMAP__READ = _selinux.X_COLORMAP__READ
+X_COLORMAP__WRITE = _selinux.X_COLORMAP__WRITE
+X_COLORMAP__GETATTR = _selinux.X_COLORMAP__GETATTR
+X_COLORMAP__ADD_COLOR = _selinux.X_COLORMAP__ADD_COLOR
+X_COLORMAP__REMOVE_COLOR = _selinux.X_COLORMAP__REMOVE_COLOR
+X_COLORMAP__INSTALL = _selinux.X_COLORMAP__INSTALL
+X_COLORMAP__UNINSTALL = _selinux.X_COLORMAP__UNINSTALL
+X_COLORMAP__USE = _selinux.X_COLORMAP__USE
+X_PROPERTY__CREATE = _selinux.X_PROPERTY__CREATE
+X_PROPERTY__DESTROY = _selinux.X_PROPERTY__DESTROY
+X_PROPERTY__READ = _selinux.X_PROPERTY__READ
+X_PROPERTY__WRITE = _selinux.X_PROPERTY__WRITE
+X_PROPERTY__APPEND = _selinux.X_PROPERTY__APPEND
+X_PROPERTY__GETATTR = _selinux.X_PROPERTY__GETATTR
+X_PROPERTY__SETATTR = _selinux.X_PROPERTY__SETATTR
+X_SELECTION__READ = _selinux.X_SELECTION__READ
+X_SELECTION__WRITE = _selinux.X_SELECTION__WRITE
+X_SELECTION__GETATTR = _selinux.X_SELECTION__GETATTR
+X_SELECTION__SETATTR = _selinux.X_SELECTION__SETATTR
+X_CURSOR__CREATE = _selinux.X_CURSOR__CREATE
+X_CURSOR__DESTROY = _selinux.X_CURSOR__DESTROY
+X_CURSOR__READ = _selinux.X_CURSOR__READ
+X_CURSOR__WRITE = _selinux.X_CURSOR__WRITE
+X_CURSOR__GETATTR = _selinux.X_CURSOR__GETATTR
+X_CURSOR__SETATTR = _selinux.X_CURSOR__SETATTR
+X_CURSOR__USE = _selinux.X_CURSOR__USE
+X_CLIENT__DESTROY = _selinux.X_CLIENT__DESTROY
+X_CLIENT__GETATTR = _selinux.X_CLIENT__GETATTR
+X_CLIENT__SETATTR = _selinux.X_CLIENT__SETATTR
+X_CLIENT__MANAGE = _selinux.X_CLIENT__MANAGE
+X_DEVICE__GETATTR = _selinux.X_DEVICE__GETATTR
+X_DEVICE__SETATTR = _selinux.X_DEVICE__SETATTR
+X_DEVICE__USE = _selinux.X_DEVICE__USE
+X_DEVICE__READ = _selinux.X_DEVICE__READ
+X_DEVICE__WRITE = _selinux.X_DEVICE__WRITE
+X_DEVICE__GETFOCUS = _selinux.X_DEVICE__GETFOCUS
+X_DEVICE__SETFOCUS = _selinux.X_DEVICE__SETFOCUS
+X_DEVICE__BELL = _selinux.X_DEVICE__BELL
+X_DEVICE__FORCE_CURSOR = _selinux.X_DEVICE__FORCE_CURSOR
+X_DEVICE__FREEZE = _selinux.X_DEVICE__FREEZE
+X_DEVICE__GRAB = _selinux.X_DEVICE__GRAB
+X_DEVICE__MANAGE = _selinux.X_DEVICE__MANAGE
+X_SERVER__GETATTR = _selinux.X_SERVER__GETATTR
+X_SERVER__SETATTR = _selinux.X_SERVER__SETATTR
+X_SERVER__RECORD = _selinux.X_SERVER__RECORD
+X_SERVER__DEBUG = _selinux.X_SERVER__DEBUG
+X_SERVER__GRAB = _selinux.X_SERVER__GRAB
+X_SERVER__MANAGE = _selinux.X_SERVER__MANAGE
+X_EXTENSION__QUERY = _selinux.X_EXTENSION__QUERY
+X_EXTENSION__USE = _selinux.X_EXTENSION__USE
+X_RESOURCE__READ = _selinux.X_RESOURCE__READ
+X_RESOURCE__WRITE = _selinux.X_RESOURCE__WRITE
+X_EVENT__SEND = _selinux.X_EVENT__SEND
+X_EVENT__RECEIVE = _selinux.X_EVENT__RECEIVE
+X_SYNTHETIC_EVENT__SEND = _selinux.X_SYNTHETIC_EVENT__SEND
+X_SYNTHETIC_EVENT__RECEIVE = _selinux.X_SYNTHETIC_EVENT__RECEIVE
+NETLINK_ROUTE_SOCKET__IOCTL = _selinux.NETLINK_ROUTE_SOCKET__IOCTL
+NETLINK_ROUTE_SOCKET__READ = _selinux.NETLINK_ROUTE_SOCKET__READ
+NETLINK_ROUTE_SOCKET__WRITE = _selinux.NETLINK_ROUTE_SOCKET__WRITE
+NETLINK_ROUTE_SOCKET__CREATE = _selinux.NETLINK_ROUTE_SOCKET__CREATE
+NETLINK_ROUTE_SOCKET__GETATTR = _selinux.NETLINK_ROUTE_SOCKET__GETATTR
+NETLINK_ROUTE_SOCKET__SETATTR = _selinux.NETLINK_ROUTE_SOCKET__SETATTR
+NETLINK_ROUTE_SOCKET__LOCK = _selinux.NETLINK_ROUTE_SOCKET__LOCK
+NETLINK_ROUTE_SOCKET__RELABELFROM = _selinux.NETLINK_ROUTE_SOCKET__RELABELFROM
+NETLINK_ROUTE_SOCKET__RELABELTO = _selinux.NETLINK_ROUTE_SOCKET__RELABELTO
+NETLINK_ROUTE_SOCKET__APPEND = _selinux.NETLINK_ROUTE_SOCKET__APPEND
+NETLINK_ROUTE_SOCKET__BIND = _selinux.NETLINK_ROUTE_SOCKET__BIND
+NETLINK_ROUTE_SOCKET__CONNECT = _selinux.NETLINK_ROUTE_SOCKET__CONNECT
+NETLINK_ROUTE_SOCKET__LISTEN = _selinux.NETLINK_ROUTE_SOCKET__LISTEN
+NETLINK_ROUTE_SOCKET__ACCEPT = _selinux.NETLINK_ROUTE_SOCKET__ACCEPT
+NETLINK_ROUTE_SOCKET__GETOPT = _selinux.NETLINK_ROUTE_SOCKET__GETOPT
+NETLINK_ROUTE_SOCKET__SETOPT = _selinux.NETLINK_ROUTE_SOCKET__SETOPT
+NETLINK_ROUTE_SOCKET__SHUTDOWN = _selinux.NETLINK_ROUTE_SOCKET__SHUTDOWN
+NETLINK_ROUTE_SOCKET__RECVFROM = _selinux.NETLINK_ROUTE_SOCKET__RECVFROM
+NETLINK_ROUTE_SOCKET__SENDTO = _selinux.NETLINK_ROUTE_SOCKET__SENDTO
+NETLINK_ROUTE_SOCKET__RECV_MSG = _selinux.NETLINK_ROUTE_SOCKET__RECV_MSG
+NETLINK_ROUTE_SOCKET__SEND_MSG = _selinux.NETLINK_ROUTE_SOCKET__SEND_MSG
+NETLINK_ROUTE_SOCKET__NAME_BIND = _selinux.NETLINK_ROUTE_SOCKET__NAME_BIND
+NETLINK_ROUTE_SOCKET__NLMSG_READ = _selinux.NETLINK_ROUTE_SOCKET__NLMSG_READ
+NETLINK_ROUTE_SOCKET__NLMSG_WRITE = _selinux.NETLINK_ROUTE_SOCKET__NLMSG_WRITE
+NETLINK_FIREWALL_SOCKET__IOCTL = _selinux.NETLINK_FIREWALL_SOCKET__IOCTL
+NETLINK_FIREWALL_SOCKET__READ = _selinux.NETLINK_FIREWALL_SOCKET__READ
+NETLINK_FIREWALL_SOCKET__WRITE = _selinux.NETLINK_FIREWALL_SOCKET__WRITE
+NETLINK_FIREWALL_SOCKET__CREATE = _selinux.NETLINK_FIREWALL_SOCKET__CREATE
+NETLINK_FIREWALL_SOCKET__GETATTR = _selinux.NETLINK_FIREWALL_SOCKET__GETATTR
+NETLINK_FIREWALL_SOCKET__SETATTR = _selinux.NETLINK_FIREWALL_SOCKET__SETATTR
+NETLINK_FIREWALL_SOCKET__LOCK = _selinux.NETLINK_FIREWALL_SOCKET__LOCK
+NETLINK_FIREWALL_SOCKET__RELABELFROM = _selinux.NETLINK_FIREWALL_SOCKET__RELABELFROM
+NETLINK_FIREWALL_SOCKET__RELABELTO = _selinux.NETLINK_FIREWALL_SOCKET__RELABELTO
+NETLINK_FIREWALL_SOCKET__APPEND = _selinux.NETLINK_FIREWALL_SOCKET__APPEND
+NETLINK_FIREWALL_SOCKET__BIND = _selinux.NETLINK_FIREWALL_SOCKET__BIND
+NETLINK_FIREWALL_SOCKET__CONNECT = _selinux.NETLINK_FIREWALL_SOCKET__CONNECT
+NETLINK_FIREWALL_SOCKET__LISTEN = _selinux.NETLINK_FIREWALL_SOCKET__LISTEN
+NETLINK_FIREWALL_SOCKET__ACCEPT = _selinux.NETLINK_FIREWALL_SOCKET__ACCEPT
+NETLINK_FIREWALL_SOCKET__GETOPT = _selinux.NETLINK_FIREWALL_SOCKET__GETOPT
+NETLINK_FIREWALL_SOCKET__SETOPT = _selinux.NETLINK_FIREWALL_SOCKET__SETOPT
+NETLINK_FIREWALL_SOCKET__SHUTDOWN = _selinux.NETLINK_FIREWALL_SOCKET__SHUTDOWN
+NETLINK_FIREWALL_SOCKET__RECVFROM = _selinux.NETLINK_FIREWALL_SOCKET__RECVFROM
+NETLINK_FIREWALL_SOCKET__SENDTO = _selinux.NETLINK_FIREWALL_SOCKET__SENDTO
+NETLINK_FIREWALL_SOCKET__RECV_MSG = _selinux.NETLINK_FIREWALL_SOCKET__RECV_MSG
+NETLINK_FIREWALL_SOCKET__SEND_MSG = _selinux.NETLINK_FIREWALL_SOCKET__SEND_MSG
+NETLINK_FIREWALL_SOCKET__NAME_BIND = _selinux.NETLINK_FIREWALL_SOCKET__NAME_BIND
+NETLINK_FIREWALL_SOCKET__NLMSG_READ = _selinux.NETLINK_FIREWALL_SOCKET__NLMSG_READ
+NETLINK_FIREWALL_SOCKET__NLMSG_WRITE = _selinux.NETLINK_FIREWALL_SOCKET__NLMSG_WRITE
+NETLINK_TCPDIAG_SOCKET__IOCTL = _selinux.NETLINK_TCPDIAG_SOCKET__IOCTL
+NETLINK_TCPDIAG_SOCKET__READ = _selinux.NETLINK_TCPDIAG_SOCKET__READ
+NETLINK_TCPDIAG_SOCKET__WRITE = _selinux.NETLINK_TCPDIAG_SOCKET__WRITE
+NETLINK_TCPDIAG_SOCKET__CREATE = _selinux.NETLINK_TCPDIAG_SOCKET__CREATE
+NETLINK_TCPDIAG_SOCKET__GETATTR = _selinux.NETLINK_TCPDIAG_SOCKET__GETATTR
+NETLINK_TCPDIAG_SOCKET__SETATTR = _selinux.NETLINK_TCPDIAG_SOCKET__SETATTR
+NETLINK_TCPDIAG_SOCKET__LOCK = _selinux.NETLINK_TCPDIAG_SOCKET__LOCK
+NETLINK_TCPDIAG_SOCKET__RELABELFROM = _selinux.NETLINK_TCPDIAG_SOCKET__RELABELFROM
+NETLINK_TCPDIAG_SOCKET__RELABELTO = _selinux.NETLINK_TCPDIAG_SOCKET__RELABELTO
+NETLINK_TCPDIAG_SOCKET__APPEND = _selinux.NETLINK_TCPDIAG_SOCKET__APPEND
+NETLINK_TCPDIAG_SOCKET__BIND = _selinux.NETLINK_TCPDIAG_SOCKET__BIND
+NETLINK_TCPDIAG_SOCKET__CONNECT = _selinux.NETLINK_TCPDIAG_SOCKET__CONNECT
+NETLINK_TCPDIAG_SOCKET__LISTEN = _selinux.NETLINK_TCPDIAG_SOCKET__LISTEN
+NETLINK_TCPDIAG_SOCKET__ACCEPT = _selinux.NETLINK_TCPDIAG_SOCKET__ACCEPT
+NETLINK_TCPDIAG_SOCKET__GETOPT = _selinux.NETLINK_TCPDIAG_SOCKET__GETOPT
+NETLINK_TCPDIAG_SOCKET__SETOPT = _selinux.NETLINK_TCPDIAG_SOCKET__SETOPT
+NETLINK_TCPDIAG_SOCKET__SHUTDOWN = _selinux.NETLINK_TCPDIAG_SOCKET__SHUTDOWN
+NETLINK_TCPDIAG_SOCKET__RECVFROM = _selinux.NETLINK_TCPDIAG_SOCKET__RECVFROM
+NETLINK_TCPDIAG_SOCKET__SENDTO = _selinux.NETLINK_TCPDIAG_SOCKET__SENDTO
+NETLINK_TCPDIAG_SOCKET__RECV_MSG = _selinux.NETLINK_TCPDIAG_SOCKET__RECV_MSG
+NETLINK_TCPDIAG_SOCKET__SEND_MSG = _selinux.NETLINK_TCPDIAG_SOCKET__SEND_MSG
+NETLINK_TCPDIAG_SOCKET__NAME_BIND = _selinux.NETLINK_TCPDIAG_SOCKET__NAME_BIND
+NETLINK_TCPDIAG_SOCKET__NLMSG_READ = _selinux.NETLINK_TCPDIAG_SOCKET__NLMSG_READ
+NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE = _selinux.NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE
+NETLINK_NFLOG_SOCKET__IOCTL = _selinux.NETLINK_NFLOG_SOCKET__IOCTL
+NETLINK_NFLOG_SOCKET__READ = _selinux.NETLINK_NFLOG_SOCKET__READ
+NETLINK_NFLOG_SOCKET__WRITE = _selinux.NETLINK_NFLOG_SOCKET__WRITE
+NETLINK_NFLOG_SOCKET__CREATE = _selinux.NETLINK_NFLOG_SOCKET__CREATE
+NETLINK_NFLOG_SOCKET__GETATTR = _selinux.NETLINK_NFLOG_SOCKET__GETATTR
+NETLINK_NFLOG_SOCKET__SETATTR = _selinux.NETLINK_NFLOG_SOCKET__SETATTR
+NETLINK_NFLOG_SOCKET__LOCK = _selinux.NETLINK_NFLOG_SOCKET__LOCK
+NETLINK_NFLOG_SOCKET__RELABELFROM = _selinux.NETLINK_NFLOG_SOCKET__RELABELFROM
+NETLINK_NFLOG_SOCKET__RELABELTO = _selinux.NETLINK_NFLOG_SOCKET__RELABELTO
+NETLINK_NFLOG_SOCKET__APPEND = _selinux.NETLINK_NFLOG_SOCKET__APPEND
+NETLINK_NFLOG_SOCKET__BIND = _selinux.NETLINK_NFLOG_SOCKET__BIND
+NETLINK_NFLOG_SOCKET__CONNECT = _selinux.NETLINK_NFLOG_SOCKET__CONNECT
+NETLINK_NFLOG_SOCKET__LISTEN = _selinux.NETLINK_NFLOG_SOCKET__LISTEN
+NETLINK_NFLOG_SOCKET__ACCEPT = _selinux.NETLINK_NFLOG_SOCKET__ACCEPT
+NETLINK_NFLOG_SOCKET__GETOPT = _selinux.NETLINK_NFLOG_SOCKET__GETOPT
+NETLINK_NFLOG_SOCKET__SETOPT = _selinux.NETLINK_NFLOG_SOCKET__SETOPT
+NETLINK_NFLOG_SOCKET__SHUTDOWN = _selinux.NETLINK_NFLOG_SOCKET__SHUTDOWN
+NETLINK_NFLOG_SOCKET__RECVFROM = _selinux.NETLINK_NFLOG_SOCKET__RECVFROM
+NETLINK_NFLOG_SOCKET__SENDTO = _selinux.NETLINK_NFLOG_SOCKET__SENDTO
+NETLINK_NFLOG_SOCKET__RECV_MSG = _selinux.NETLINK_NFLOG_SOCKET__RECV_MSG
+NETLINK_NFLOG_SOCKET__SEND_MSG = _selinux.NETLINK_NFLOG_SOCKET__SEND_MSG
+NETLINK_NFLOG_SOCKET__NAME_BIND = _selinux.NETLINK_NFLOG_SOCKET__NAME_BIND
+NETLINK_XFRM_SOCKET__IOCTL = _selinux.NETLINK_XFRM_SOCKET__IOCTL
+NETLINK_XFRM_SOCKET__READ = _selinux.NETLINK_XFRM_SOCKET__READ
+NETLINK_XFRM_SOCKET__WRITE = _selinux.NETLINK_XFRM_SOCKET__WRITE
+NETLINK_XFRM_SOCKET__CREATE = _selinux.NETLINK_XFRM_SOCKET__CREATE
+NETLINK_XFRM_SOCKET__GETATTR = _selinux.NETLINK_XFRM_SOCKET__GETATTR
+NETLINK_XFRM_SOCKET__SETATTR = _selinux.NETLINK_XFRM_SOCKET__SETATTR
+NETLINK_XFRM_SOCKET__LOCK = _selinux.NETLINK_XFRM_SOCKET__LOCK
+NETLINK_XFRM_SOCKET__RELABELFROM = _selinux.NETLINK_XFRM_SOCKET__RELABELFROM
+NETLINK_XFRM_SOCKET__RELABELTO = _selinux.NETLINK_XFRM_SOCKET__RELABELTO
+NETLINK_XFRM_SOCKET__APPEND = _selinux.NETLINK_XFRM_SOCKET__APPEND
+NETLINK_XFRM_SOCKET__BIND = _selinux.NETLINK_XFRM_SOCKET__BIND
+NETLINK_XFRM_SOCKET__CONNECT = _selinux.NETLINK_XFRM_SOCKET__CONNECT
+NETLINK_XFRM_SOCKET__LISTEN = _selinux.NETLINK_XFRM_SOCKET__LISTEN
+NETLINK_XFRM_SOCKET__ACCEPT = _selinux.NETLINK_XFRM_SOCKET__ACCEPT
+NETLINK_XFRM_SOCKET__GETOPT = _selinux.NETLINK_XFRM_SOCKET__GETOPT
+NETLINK_XFRM_SOCKET__SETOPT = _selinux.NETLINK_XFRM_SOCKET__SETOPT
+NETLINK_XFRM_SOCKET__SHUTDOWN = _selinux.NETLINK_XFRM_SOCKET__SHUTDOWN
+NETLINK_XFRM_SOCKET__RECVFROM = _selinux.NETLINK_XFRM_SOCKET__RECVFROM
+NETLINK_XFRM_SOCKET__SENDTO = _selinux.NETLINK_XFRM_SOCKET__SENDTO
+NETLINK_XFRM_SOCKET__RECV_MSG = _selinux.NETLINK_XFRM_SOCKET__RECV_MSG
+NETLINK_XFRM_SOCKET__SEND_MSG = _selinux.NETLINK_XFRM_SOCKET__SEND_MSG
+NETLINK_XFRM_SOCKET__NAME_BIND = _selinux.NETLINK_XFRM_SOCKET__NAME_BIND
+NETLINK_XFRM_SOCKET__NLMSG_READ = _selinux.NETLINK_XFRM_SOCKET__NLMSG_READ
+NETLINK_XFRM_SOCKET__NLMSG_WRITE = _selinux.NETLINK_XFRM_SOCKET__NLMSG_WRITE
+NETLINK_SELINUX_SOCKET__IOCTL = _selinux.NETLINK_SELINUX_SOCKET__IOCTL
+NETLINK_SELINUX_SOCKET__READ = _selinux.NETLINK_SELINUX_SOCKET__READ
+NETLINK_SELINUX_SOCKET__WRITE = _selinux.NETLINK_SELINUX_SOCKET__WRITE
+NETLINK_SELINUX_SOCKET__CREATE = _selinux.NETLINK_SELINUX_SOCKET__CREATE
+NETLINK_SELINUX_SOCKET__GETATTR = _selinux.NETLINK_SELINUX_SOCKET__GETATTR
+NETLINK_SELINUX_SOCKET__SETATTR = _selinux.NETLINK_SELINUX_SOCKET__SETATTR
+NETLINK_SELINUX_SOCKET__LOCK = _selinux.NETLINK_SELINUX_SOCKET__LOCK
+NETLINK_SELINUX_SOCKET__RELABELFROM = _selinux.NETLINK_SELINUX_SOCKET__RELABELFROM
+NETLINK_SELINUX_SOCKET__RELABELTO = _selinux.NETLINK_SELINUX_SOCKET__RELABELTO
+NETLINK_SELINUX_SOCKET__APPEND = _selinux.NETLINK_SELINUX_SOCKET__APPEND
+NETLINK_SELINUX_SOCKET__BIND = _selinux.NETLINK_SELINUX_SOCKET__BIND
+NETLINK_SELINUX_SOCKET__CONNECT = _selinux.NETLINK_SELINUX_SOCKET__CONNECT
+NETLINK_SELINUX_SOCKET__LISTEN = _selinux.NETLINK_SELINUX_SOCKET__LISTEN
+NETLINK_SELINUX_SOCKET__ACCEPT = _selinux.NETLINK_SELINUX_SOCKET__ACCEPT
+NETLINK_SELINUX_SOCKET__GETOPT = _selinux.NETLINK_SELINUX_SOCKET__GETOPT
+NETLINK_SELINUX_SOCKET__SETOPT = _selinux.NETLINK_SELINUX_SOCKET__SETOPT
+NETLINK_SELINUX_SOCKET__SHUTDOWN = _selinux.NETLINK_SELINUX_SOCKET__SHUTDOWN
+NETLINK_SELINUX_SOCKET__RECVFROM = _selinux.NETLINK_SELINUX_SOCKET__RECVFROM
+NETLINK_SELINUX_SOCKET__SENDTO = _selinux.NETLINK_SELINUX_SOCKET__SENDTO
+NETLINK_SELINUX_SOCKET__RECV_MSG = _selinux.NETLINK_SELINUX_SOCKET__RECV_MSG
+NETLINK_SELINUX_SOCKET__SEND_MSG = _selinux.NETLINK_SELINUX_SOCKET__SEND_MSG
+NETLINK_SELINUX_SOCKET__NAME_BIND = _selinux.NETLINK_SELINUX_SOCKET__NAME_BIND
+NETLINK_AUDIT_SOCKET__IOCTL = _selinux.NETLINK_AUDIT_SOCKET__IOCTL
+NETLINK_AUDIT_SOCKET__READ = _selinux.NETLINK_AUDIT_SOCKET__READ
+NETLINK_AUDIT_SOCKET__WRITE = _selinux.NETLINK_AUDIT_SOCKET__WRITE
+NETLINK_AUDIT_SOCKET__CREATE = _selinux.NETLINK_AUDIT_SOCKET__CREATE
+NETLINK_AUDIT_SOCKET__GETATTR = _selinux.NETLINK_AUDIT_SOCKET__GETATTR
+NETLINK_AUDIT_SOCKET__SETATTR = _selinux.NETLINK_AUDIT_SOCKET__SETATTR
+NETLINK_AUDIT_SOCKET__LOCK = _selinux.NETLINK_AUDIT_SOCKET__LOCK
+NETLINK_AUDIT_SOCKET__RELABELFROM = _selinux.NETLINK_AUDIT_SOCKET__RELABELFROM
+NETLINK_AUDIT_SOCKET__RELABELTO = _selinux.NETLINK_AUDIT_SOCKET__RELABELTO
+NETLINK_AUDIT_SOCKET__APPEND = _selinux.NETLINK_AUDIT_SOCKET__APPEND
+NETLINK_AUDIT_SOCKET__BIND = _selinux.NETLINK_AUDIT_SOCKET__BIND
+NETLINK_AUDIT_SOCKET__CONNECT = _selinux.NETLINK_AUDIT_SOCKET__CONNECT
+NETLINK_AUDIT_SOCKET__LISTEN = _selinux.NETLINK_AUDIT_SOCKET__LISTEN
+NETLINK_AUDIT_SOCKET__ACCEPT = _selinux.NETLINK_AUDIT_SOCKET__ACCEPT
+NETLINK_AUDIT_SOCKET__GETOPT = _selinux.NETLINK_AUDIT_SOCKET__GETOPT
+NETLINK_AUDIT_SOCKET__SETOPT = _selinux.NETLINK_AUDIT_SOCKET__SETOPT
+NETLINK_AUDIT_SOCKET__SHUTDOWN = _selinux.NETLINK_AUDIT_SOCKET__SHUTDOWN
+NETLINK_AUDIT_SOCKET__RECVFROM = _selinux.NETLINK_AUDIT_SOCKET__RECVFROM
+NETLINK_AUDIT_SOCKET__SENDTO = _selinux.NETLINK_AUDIT_SOCKET__SENDTO
+NETLINK_AUDIT_SOCKET__RECV_MSG = _selinux.NETLINK_AUDIT_SOCKET__RECV_MSG
+NETLINK_AUDIT_SOCKET__SEND_MSG = _selinux.NETLINK_AUDIT_SOCKET__SEND_MSG
+NETLINK_AUDIT_SOCKET__NAME_BIND = _selinux.NETLINK_AUDIT_SOCKET__NAME_BIND
+NETLINK_AUDIT_SOCKET__NLMSG_READ = _selinux.NETLINK_AUDIT_SOCKET__NLMSG_READ
+NETLINK_AUDIT_SOCKET__NLMSG_WRITE = _selinux.NETLINK_AUDIT_SOCKET__NLMSG_WRITE
+NETLINK_AUDIT_SOCKET__NLMSG_RELAY = _selinux.NETLINK_AUDIT_SOCKET__NLMSG_RELAY
+NETLINK_AUDIT_SOCKET__NLMSG_READPRIV = _selinux.NETLINK_AUDIT_SOCKET__NLMSG_READPRIV
+NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT = _selinux.NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT
+NETLINK_IP6FW_SOCKET__IOCTL = _selinux.NETLINK_IP6FW_SOCKET__IOCTL
+NETLINK_IP6FW_SOCKET__READ = _selinux.NETLINK_IP6FW_SOCKET__READ
+NETLINK_IP6FW_SOCKET__WRITE = _selinux.NETLINK_IP6FW_SOCKET__WRITE
+NETLINK_IP6FW_SOCKET__CREATE = _selinux.NETLINK_IP6FW_SOCKET__CREATE
+NETLINK_IP6FW_SOCKET__GETATTR = _selinux.NETLINK_IP6FW_SOCKET__GETATTR
+NETLINK_IP6FW_SOCKET__SETATTR = _selinux.NETLINK_IP6FW_SOCKET__SETATTR
+NETLINK_IP6FW_SOCKET__LOCK = _selinux.NETLINK_IP6FW_SOCKET__LOCK
+NETLINK_IP6FW_SOCKET__RELABELFROM = _selinux.NETLINK_IP6FW_SOCKET__RELABELFROM
+NETLINK_IP6FW_SOCKET__RELABELTO = _selinux.NETLINK_IP6FW_SOCKET__RELABELTO
+NETLINK_IP6FW_SOCKET__APPEND = _selinux.NETLINK_IP6FW_SOCKET__APPEND
+NETLINK_IP6FW_SOCKET__BIND = _selinux.NETLINK_IP6FW_SOCKET__BIND
+NETLINK_IP6FW_SOCKET__CONNECT = _selinux.NETLINK_IP6FW_SOCKET__CONNECT
+NETLINK_IP6FW_SOCKET__LISTEN = _selinux.NETLINK_IP6FW_SOCKET__LISTEN
+NETLINK_IP6FW_SOCKET__ACCEPT = _selinux.NETLINK_IP6FW_SOCKET__ACCEPT
+NETLINK_IP6FW_SOCKET__GETOPT = _selinux.NETLINK_IP6FW_SOCKET__GETOPT
+NETLINK_IP6FW_SOCKET__SETOPT = _selinux.NETLINK_IP6FW_SOCKET__SETOPT
+NETLINK_IP6FW_SOCKET__SHUTDOWN = _selinux.NETLINK_IP6FW_SOCKET__SHUTDOWN
+NETLINK_IP6FW_SOCKET__RECVFROM = _selinux.NETLINK_IP6FW_SOCKET__RECVFROM
+NETLINK_IP6FW_SOCKET__SENDTO = _selinux.NETLINK_IP6FW_SOCKET__SENDTO
+NETLINK_IP6FW_SOCKET__RECV_MSG = _selinux.NETLINK_IP6FW_SOCKET__RECV_MSG
+NETLINK_IP6FW_SOCKET__SEND_MSG = _selinux.NETLINK_IP6FW_SOCKET__SEND_MSG
+NETLINK_IP6FW_SOCKET__NAME_BIND = _selinux.NETLINK_IP6FW_SOCKET__NAME_BIND
+NETLINK_IP6FW_SOCKET__NLMSG_READ = _selinux.NETLINK_IP6FW_SOCKET__NLMSG_READ
+NETLINK_IP6FW_SOCKET__NLMSG_WRITE = _selinux.NETLINK_IP6FW_SOCKET__NLMSG_WRITE
+NETLINK_DNRT_SOCKET__IOCTL = _selinux.NETLINK_DNRT_SOCKET__IOCTL
+NETLINK_DNRT_SOCKET__READ = _selinux.NETLINK_DNRT_SOCKET__READ
+NETLINK_DNRT_SOCKET__WRITE = _selinux.NETLINK_DNRT_SOCKET__WRITE
+NETLINK_DNRT_SOCKET__CREATE = _selinux.NETLINK_DNRT_SOCKET__CREATE
+NETLINK_DNRT_SOCKET__GETATTR = _selinux.NETLINK_DNRT_SOCKET__GETATTR
+NETLINK_DNRT_SOCKET__SETATTR = _selinux.NETLINK_DNRT_SOCKET__SETATTR
+NETLINK_DNRT_SOCKET__LOCK = _selinux.NETLINK_DNRT_SOCKET__LOCK
+NETLINK_DNRT_SOCKET__RELABELFROM = _selinux.NETLINK_DNRT_SOCKET__RELABELFROM
+NETLINK_DNRT_SOCKET__RELABELTO = _selinux.NETLINK_DNRT_SOCKET__RELABELTO
+NETLINK_DNRT_SOCKET__APPEND = _selinux.NETLINK_DNRT_SOCKET__APPEND
+NETLINK_DNRT_SOCKET__BIND = _selinux.NETLINK_DNRT_SOCKET__BIND
+NETLINK_DNRT_SOCKET__CONNECT = _selinux.NETLINK_DNRT_SOCKET__CONNECT
+NETLINK_DNRT_SOCKET__LISTEN = _selinux.NETLINK_DNRT_SOCKET__LISTEN
+NETLINK_DNRT_SOCKET__ACCEPT = _selinux.NETLINK_DNRT_SOCKET__ACCEPT
+NETLINK_DNRT_SOCKET__GETOPT = _selinux.NETLINK_DNRT_SOCKET__GETOPT
+NETLINK_DNRT_SOCKET__SETOPT = _selinux.NETLINK_DNRT_SOCKET__SETOPT
+NETLINK_DNRT_SOCKET__SHUTDOWN = _selinux.NETLINK_DNRT_SOCKET__SHUTDOWN
+NETLINK_DNRT_SOCKET__RECVFROM = _selinux.NETLINK_DNRT_SOCKET__RECVFROM
+NETLINK_DNRT_SOCKET__SENDTO = _selinux.NETLINK_DNRT_SOCKET__SENDTO
+NETLINK_DNRT_SOCKET__RECV_MSG = _selinux.NETLINK_DNRT_SOCKET__RECV_MSG
+NETLINK_DNRT_SOCKET__SEND_MSG = _selinux.NETLINK_DNRT_SOCKET__SEND_MSG
+NETLINK_DNRT_SOCKET__NAME_BIND = _selinux.NETLINK_DNRT_SOCKET__NAME_BIND
+DBUS__ACQUIRE_SVC = _selinux.DBUS__ACQUIRE_SVC
+DBUS__SEND_MSG = _selinux.DBUS__SEND_MSG
+NSCD__GETPWD = _selinux.NSCD__GETPWD
+NSCD__GETGRP = _selinux.NSCD__GETGRP
+NSCD__GETHOST = _selinux.NSCD__GETHOST
+NSCD__GETSTAT = _selinux.NSCD__GETSTAT
+NSCD__ADMIN = _selinux.NSCD__ADMIN
+NSCD__SHMEMPWD = _selinux.NSCD__SHMEMPWD
+NSCD__SHMEMGRP = _selinux.NSCD__SHMEMGRP
+NSCD__SHMEMHOST = _selinux.NSCD__SHMEMHOST
+NSCD__GETSERV = _selinux.NSCD__GETSERV
+NSCD__SHMEMSERV = _selinux.NSCD__SHMEMSERV
+ASSOCIATION__SENDTO = _selinux.ASSOCIATION__SENDTO
+ASSOCIATION__RECVFROM = _selinux.ASSOCIATION__RECVFROM
+ASSOCIATION__SETCONTEXT = _selinux.ASSOCIATION__SETCONTEXT
+ASSOCIATION__POLMATCH = _selinux.ASSOCIATION__POLMATCH
+NETLINK_KOBJECT_UEVENT_SOCKET__IOCTL = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__IOCTL
+NETLINK_KOBJECT_UEVENT_SOCKET__READ = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__READ
+NETLINK_KOBJECT_UEVENT_SOCKET__WRITE = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__WRITE
+NETLINK_KOBJECT_UEVENT_SOCKET__CREATE = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__CREATE
+NETLINK_KOBJECT_UEVENT_SOCKET__GETATTR = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__GETATTR
+NETLINK_KOBJECT_UEVENT_SOCKET__SETATTR = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__SETATTR
+NETLINK_KOBJECT_UEVENT_SOCKET__LOCK = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__LOCK
+NETLINK_KOBJECT_UEVENT_SOCKET__RELABELFROM = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__RELABELFROM
+NETLINK_KOBJECT_UEVENT_SOCKET__RELABELTO = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__RELABELTO
+NETLINK_KOBJECT_UEVENT_SOCKET__APPEND = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__APPEND
+NETLINK_KOBJECT_UEVENT_SOCKET__BIND = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__BIND
+NETLINK_KOBJECT_UEVENT_SOCKET__CONNECT = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__CONNECT
+NETLINK_KOBJECT_UEVENT_SOCKET__LISTEN = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__LISTEN
+NETLINK_KOBJECT_UEVENT_SOCKET__ACCEPT = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__ACCEPT
+NETLINK_KOBJECT_UEVENT_SOCKET__GETOPT = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__GETOPT
+NETLINK_KOBJECT_UEVENT_SOCKET__SETOPT = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__SETOPT
+NETLINK_KOBJECT_UEVENT_SOCKET__SHUTDOWN = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__SHUTDOWN
+NETLINK_KOBJECT_UEVENT_SOCKET__RECVFROM = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__RECVFROM
+NETLINK_KOBJECT_UEVENT_SOCKET__SENDTO = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__SENDTO
+NETLINK_KOBJECT_UEVENT_SOCKET__RECV_MSG = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__RECV_MSG
+NETLINK_KOBJECT_UEVENT_SOCKET__SEND_MSG = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__SEND_MSG
+NETLINK_KOBJECT_UEVENT_SOCKET__NAME_BIND = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__NAME_BIND
+APPLETALK_SOCKET__IOCTL = _selinux.APPLETALK_SOCKET__IOCTL
+APPLETALK_SOCKET__READ = _selinux.APPLETALK_SOCKET__READ
+APPLETALK_SOCKET__WRITE = _selinux.APPLETALK_SOCKET__WRITE
+APPLETALK_SOCKET__CREATE = _selinux.APPLETALK_SOCKET__CREATE
+APPLETALK_SOCKET__GETATTR = _selinux.APPLETALK_SOCKET__GETATTR
+APPLETALK_SOCKET__SETATTR = _selinux.APPLETALK_SOCKET__SETATTR
+APPLETALK_SOCKET__LOCK = _selinux.APPLETALK_SOCKET__LOCK
+APPLETALK_SOCKET__RELABELFROM = _selinux.APPLETALK_SOCKET__RELABELFROM
+APPLETALK_SOCKET__RELABELTO = _selinux.APPLETALK_SOCKET__RELABELTO
+APPLETALK_SOCKET__APPEND = _selinux.APPLETALK_SOCKET__APPEND
+APPLETALK_SOCKET__BIND = _selinux.APPLETALK_SOCKET__BIND
+APPLETALK_SOCKET__CONNECT = _selinux.APPLETALK_SOCKET__CONNECT
+APPLETALK_SOCKET__LISTEN = _selinux.APPLETALK_SOCKET__LISTEN
+APPLETALK_SOCKET__ACCEPT = _selinux.APPLETALK_SOCKET__ACCEPT
+APPLETALK_SOCKET__GETOPT = _selinux.APPLETALK_SOCKET__GETOPT
+APPLETALK_SOCKET__SETOPT = _selinux.APPLETALK_SOCKET__SETOPT
+APPLETALK_SOCKET__SHUTDOWN = _selinux.APPLETALK_SOCKET__SHUTDOWN
+APPLETALK_SOCKET__RECVFROM = _selinux.APPLETALK_SOCKET__RECVFROM
+APPLETALK_SOCKET__SENDTO = _selinux.APPLETALK_SOCKET__SENDTO
+APPLETALK_SOCKET__RECV_MSG = _selinux.APPLETALK_SOCKET__RECV_MSG
+APPLETALK_SOCKET__SEND_MSG = _selinux.APPLETALK_SOCKET__SEND_MSG
+APPLETALK_SOCKET__NAME_BIND = _selinux.APPLETALK_SOCKET__NAME_BIND
+PACKET__SEND = _selinux.PACKET__SEND
+PACKET__RECV = _selinux.PACKET__RECV
+PACKET__RELABELTO = _selinux.PACKET__RELABELTO
+PACKET__FLOW_IN = _selinux.PACKET__FLOW_IN
+PACKET__FLOW_OUT = _selinux.PACKET__FLOW_OUT
+PACKET__FORWARD_IN = _selinux.PACKET__FORWARD_IN
+PACKET__FORWARD_OUT = _selinux.PACKET__FORWARD_OUT
+KEY__VIEW = _selinux.KEY__VIEW
+KEY__READ = _selinux.KEY__READ
+KEY__WRITE = _selinux.KEY__WRITE
+KEY__SEARCH = _selinux.KEY__SEARCH
+KEY__LINK = _selinux.KEY__LINK
+KEY__SETATTR = _selinux.KEY__SETATTR
+KEY__CREATE = _selinux.KEY__CREATE
+CONTEXT__TRANSLATE = _selinux.CONTEXT__TRANSLATE
+CONTEXT__CONTAINS = _selinux.CONTEXT__CONTAINS
+DCCP_SOCKET__IOCTL = _selinux.DCCP_SOCKET__IOCTL
+DCCP_SOCKET__READ = _selinux.DCCP_SOCKET__READ
+DCCP_SOCKET__WRITE = _selinux.DCCP_SOCKET__WRITE
+DCCP_SOCKET__CREATE = _selinux.DCCP_SOCKET__CREATE
+DCCP_SOCKET__GETATTR = _selinux.DCCP_SOCKET__GETATTR
+DCCP_SOCKET__SETATTR = _selinux.DCCP_SOCKET__SETATTR
+DCCP_SOCKET__LOCK = _selinux.DCCP_SOCKET__LOCK
+DCCP_SOCKET__RELABELFROM = _selinux.DCCP_SOCKET__RELABELFROM
+DCCP_SOCKET__RELABELTO = _selinux.DCCP_SOCKET__RELABELTO
+DCCP_SOCKET__APPEND = _selinux.DCCP_SOCKET__APPEND
+DCCP_SOCKET__BIND = _selinux.DCCP_SOCKET__BIND
+DCCP_SOCKET__CONNECT = _selinux.DCCP_SOCKET__CONNECT
+DCCP_SOCKET__LISTEN = _selinux.DCCP_SOCKET__LISTEN
+DCCP_SOCKET__ACCEPT = _selinux.DCCP_SOCKET__ACCEPT
+DCCP_SOCKET__GETOPT = _selinux.DCCP_SOCKET__GETOPT
+DCCP_SOCKET__SETOPT = _selinux.DCCP_SOCKET__SETOPT
+DCCP_SOCKET__SHUTDOWN = _selinux.DCCP_SOCKET__SHUTDOWN
+DCCP_SOCKET__RECVFROM = _selinux.DCCP_SOCKET__RECVFROM
+DCCP_SOCKET__SENDTO = _selinux.DCCP_SOCKET__SENDTO
+DCCP_SOCKET__RECV_MSG = _selinux.DCCP_SOCKET__RECV_MSG
+DCCP_SOCKET__SEND_MSG = _selinux.DCCP_SOCKET__SEND_MSG
+DCCP_SOCKET__NAME_BIND = _selinux.DCCP_SOCKET__NAME_BIND
+DCCP_SOCKET__NODE_BIND = _selinux.DCCP_SOCKET__NODE_BIND
+DCCP_SOCKET__NAME_CONNECT = _selinux.DCCP_SOCKET__NAME_CONNECT
+MEMPROTECT__MMAP_ZERO = _selinux.MEMPROTECT__MMAP_ZERO
+DB_DATABASE__CREATE = _selinux.DB_DATABASE__CREATE
+DB_DATABASE__DROP = _selinux.DB_DATABASE__DROP
+DB_DATABASE__GETATTR = _selinux.DB_DATABASE__GETATTR
+DB_DATABASE__SETATTR = _selinux.DB_DATABASE__SETATTR
+DB_DATABASE__RELABELFROM = _selinux.DB_DATABASE__RELABELFROM
+DB_DATABASE__RELABELTO = _selinux.DB_DATABASE__RELABELTO
+DB_DATABASE__ACCESS = _selinux.DB_DATABASE__ACCESS
+DB_DATABASE__INSTALL_MODULE = _selinux.DB_DATABASE__INSTALL_MODULE
+DB_DATABASE__LOAD_MODULE = _selinux.DB_DATABASE__LOAD_MODULE
+DB_DATABASE__GET_PARAM = _selinux.DB_DATABASE__GET_PARAM
+DB_DATABASE__SET_PARAM = _selinux.DB_DATABASE__SET_PARAM
+DB_TABLE__CREATE = _selinux.DB_TABLE__CREATE
+DB_TABLE__DROP = _selinux.DB_TABLE__DROP
+DB_TABLE__GETATTR = _selinux.DB_TABLE__GETATTR
+DB_TABLE__SETATTR = _selinux.DB_TABLE__SETATTR
+DB_TABLE__RELABELFROM = _selinux.DB_TABLE__RELABELFROM
+DB_TABLE__RELABELTO = _selinux.DB_TABLE__RELABELTO
+DB_TABLE__USE = _selinux.DB_TABLE__USE
+DB_TABLE__SELECT = _selinux.DB_TABLE__SELECT
+DB_TABLE__UPDATE = _selinux.DB_TABLE__UPDATE
+DB_TABLE__INSERT = _selinux.DB_TABLE__INSERT
+DB_TABLE__DELETE = _selinux.DB_TABLE__DELETE
+DB_TABLE__LOCK = _selinux.DB_TABLE__LOCK
+DB_PROCEDURE__CREATE = _selinux.DB_PROCEDURE__CREATE
+DB_PROCEDURE__DROP = _selinux.DB_PROCEDURE__DROP
+DB_PROCEDURE__GETATTR = _selinux.DB_PROCEDURE__GETATTR
+DB_PROCEDURE__SETATTR = _selinux.DB_PROCEDURE__SETATTR
+DB_PROCEDURE__RELABELFROM = _selinux.DB_PROCEDURE__RELABELFROM
+DB_PROCEDURE__RELABELTO = _selinux.DB_PROCEDURE__RELABELTO
+DB_PROCEDURE__EXECUTE = _selinux.DB_PROCEDURE__EXECUTE
+DB_PROCEDURE__ENTRYPOINT = _selinux.DB_PROCEDURE__ENTRYPOINT
+DB_COLUMN__CREATE = _selinux.DB_COLUMN__CREATE
+DB_COLUMN__DROP = _selinux.DB_COLUMN__DROP
+DB_COLUMN__GETATTR = _selinux.DB_COLUMN__GETATTR
+DB_COLUMN__SETATTR = _selinux.DB_COLUMN__SETATTR
+DB_COLUMN__RELABELFROM = _selinux.DB_COLUMN__RELABELFROM
+DB_COLUMN__RELABELTO = _selinux.DB_COLUMN__RELABELTO
+DB_COLUMN__USE = _selinux.DB_COLUMN__USE
+DB_COLUMN__SELECT = _selinux.DB_COLUMN__SELECT
+DB_COLUMN__UPDATE = _selinux.DB_COLUMN__UPDATE
+DB_COLUMN__INSERT = _selinux.DB_COLUMN__INSERT
+DB_TUPLE__RELABELFROM = _selinux.DB_TUPLE__RELABELFROM
+DB_TUPLE__RELABELTO = _selinux.DB_TUPLE__RELABELTO
+DB_TUPLE__USE = _selinux.DB_TUPLE__USE
+DB_TUPLE__SELECT = _selinux.DB_TUPLE__SELECT
+DB_TUPLE__UPDATE = _selinux.DB_TUPLE__UPDATE
+DB_TUPLE__INSERT = _selinux.DB_TUPLE__INSERT
+DB_TUPLE__DELETE = _selinux.DB_TUPLE__DELETE
+DB_BLOB__CREATE = _selinux.DB_BLOB__CREATE
+DB_BLOB__DROP = _selinux.DB_BLOB__DROP
+DB_BLOB__GETATTR = _selinux.DB_BLOB__GETATTR
+DB_BLOB__SETATTR = _selinux.DB_BLOB__SETATTR
+DB_BLOB__RELABELFROM = _selinux.DB_BLOB__RELABELFROM
+DB_BLOB__RELABELTO = _selinux.DB_BLOB__RELABELTO
+DB_BLOB__READ = _selinux.DB_BLOB__READ
+DB_BLOB__WRITE = _selinux.DB_BLOB__WRITE
+DB_BLOB__IMPORT = _selinux.DB_BLOB__IMPORT
+DB_BLOB__EXPORT = _selinux.DB_BLOB__EXPORT
+PEER__RECV = _selinux.PEER__RECV
+X_APPLICATION_DATA__PASTE = _selinux.X_APPLICATION_DATA__PASTE
+X_APPLICATION_DATA__PASTE_AFTER_CONFIRM = _selinux.X_APPLICATION_DATA__PASTE_AFTER_CONFIRM
+X_APPLICATION_DATA__COPY = _selinux.X_APPLICATION_DATA__COPY
+class context_s_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, context_s_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, context_s_t, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["ptr"] = _selinux.context_s_t_ptr_set
+ __swig_getmethods__["ptr"] = _selinux.context_s_t_ptr_get
+ if _newclass:ptr = _swig_property(_selinux.context_s_t_ptr_get, _selinux.context_s_t_ptr_set)
+ def __init__(self):
+ this = _selinux.new_context_s_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_context_s_t
+ __del__ = lambda self : None;
+context_s_t_swigregister = _selinux.context_s_t_swigregister
+context_s_t_swigregister(context_s_t)
+
+
+def context_new(*args):
+ return _selinux.context_new(*args)
+context_new = _selinux.context_new
+
+def context_str(*args):
+ return _selinux.context_str(*args)
+context_str = _selinux.context_str
+
+def context_free(*args):
+ return _selinux.context_free(*args)
+context_free = _selinux.context_free
+
+def context_type_get(*args):
+ return _selinux.context_type_get(*args)
+context_type_get = _selinux.context_type_get
+
+def context_range_get(*args):
+ return _selinux.context_range_get(*args)
+context_range_get = _selinux.context_range_get
+
+def context_role_get(*args):
+ return _selinux.context_role_get(*args)
+context_role_get = _selinux.context_role_get
+
+def context_user_get(*args):
+ return _selinux.context_user_get(*args)
+context_user_get = _selinux.context_user_get
+
+def context_type_set(*args):
+ return _selinux.context_type_set(*args)
+context_type_set = _selinux.context_type_set
+
+def context_range_set(*args):
+ return _selinux.context_range_set(*args)
+context_range_set = _selinux.context_range_set
+
+def context_role_set(*args):
+ return _selinux.context_role_set(*args)
+context_role_set = _selinux.context_role_set
+
+def context_user_set(*args):
+ return _selinux.context_user_set(*args)
+context_user_set = _selinux.context_user_set
+SECCLASS_SECURITY = _selinux.SECCLASS_SECURITY
+SECCLASS_PROCESS = _selinux.SECCLASS_PROCESS
+SECCLASS_SYSTEM = _selinux.SECCLASS_SYSTEM
+SECCLASS_CAPABILITY = _selinux.SECCLASS_CAPABILITY
+SECCLASS_FILESYSTEM = _selinux.SECCLASS_FILESYSTEM
+SECCLASS_FILE = _selinux.SECCLASS_FILE
+SECCLASS_DIR = _selinux.SECCLASS_DIR
+SECCLASS_FD = _selinux.SECCLASS_FD
+SECCLASS_LNK_FILE = _selinux.SECCLASS_LNK_FILE
+SECCLASS_CHR_FILE = _selinux.SECCLASS_CHR_FILE
+SECCLASS_BLK_FILE = _selinux.SECCLASS_BLK_FILE
+SECCLASS_SOCK_FILE = _selinux.SECCLASS_SOCK_FILE
+SECCLASS_FIFO_FILE = _selinux.SECCLASS_FIFO_FILE
+SECCLASS_SOCKET = _selinux.SECCLASS_SOCKET
+SECCLASS_TCP_SOCKET = _selinux.SECCLASS_TCP_SOCKET
+SECCLASS_UDP_SOCKET = _selinux.SECCLASS_UDP_SOCKET
+SECCLASS_RAWIP_SOCKET = _selinux.SECCLASS_RAWIP_SOCKET
+SECCLASS_NODE = _selinux.SECCLASS_NODE
+SECCLASS_NETIF = _selinux.SECCLASS_NETIF
+SECCLASS_NETLINK_SOCKET = _selinux.SECCLASS_NETLINK_SOCKET
+SECCLASS_PACKET_SOCKET = _selinux.SECCLASS_PACKET_SOCKET
+SECCLASS_KEY_SOCKET = _selinux.SECCLASS_KEY_SOCKET
+SECCLASS_UNIX_STREAM_SOCKET = _selinux.SECCLASS_UNIX_STREAM_SOCKET
+SECCLASS_UNIX_DGRAM_SOCKET = _selinux.SECCLASS_UNIX_DGRAM_SOCKET
+SECCLASS_SEM = _selinux.SECCLASS_SEM
+SECCLASS_MSG = _selinux.SECCLASS_MSG
+SECCLASS_MSGQ = _selinux.SECCLASS_MSGQ
+SECCLASS_SHM = _selinux.SECCLASS_SHM
+SECCLASS_IPC = _selinux.SECCLASS_IPC
+SECCLASS_PASSWD = _selinux.SECCLASS_PASSWD
+SECCLASS_X_DRAWABLE = _selinux.SECCLASS_X_DRAWABLE
+SECCLASS_X_SCREEN = _selinux.SECCLASS_X_SCREEN
+SECCLASS_X_GC = _selinux.SECCLASS_X_GC
+SECCLASS_X_FONT = _selinux.SECCLASS_X_FONT
+SECCLASS_X_COLORMAP = _selinux.SECCLASS_X_COLORMAP
+SECCLASS_X_PROPERTY = _selinux.SECCLASS_X_PROPERTY
+SECCLASS_X_SELECTION = _selinux.SECCLASS_X_SELECTION
+SECCLASS_X_CURSOR = _selinux.SECCLASS_X_CURSOR
+SECCLASS_X_CLIENT = _selinux.SECCLASS_X_CLIENT
+SECCLASS_X_DEVICE = _selinux.SECCLASS_X_DEVICE
+SECCLASS_X_SERVER = _selinux.SECCLASS_X_SERVER
+SECCLASS_X_EXTENSION = _selinux.SECCLASS_X_EXTENSION
+SECCLASS_NETLINK_ROUTE_SOCKET = _selinux.SECCLASS_NETLINK_ROUTE_SOCKET
+SECCLASS_NETLINK_FIREWALL_SOCKET = _selinux.SECCLASS_NETLINK_FIREWALL_SOCKET
+SECCLASS_NETLINK_TCPDIAG_SOCKET = _selinux.SECCLASS_NETLINK_TCPDIAG_SOCKET
+SECCLASS_NETLINK_NFLOG_SOCKET = _selinux.SECCLASS_NETLINK_NFLOG_SOCKET
+SECCLASS_NETLINK_XFRM_SOCKET = _selinux.SECCLASS_NETLINK_XFRM_SOCKET
+SECCLASS_NETLINK_SELINUX_SOCKET = _selinux.SECCLASS_NETLINK_SELINUX_SOCKET
+SECCLASS_NETLINK_AUDIT_SOCKET = _selinux.SECCLASS_NETLINK_AUDIT_SOCKET
+SECCLASS_NETLINK_IP6FW_SOCKET = _selinux.SECCLASS_NETLINK_IP6FW_SOCKET
+SECCLASS_NETLINK_DNRT_SOCKET = _selinux.SECCLASS_NETLINK_DNRT_SOCKET
+SECCLASS_DBUS = _selinux.SECCLASS_DBUS
+SECCLASS_NSCD = _selinux.SECCLASS_NSCD
+SECCLASS_ASSOCIATION = _selinux.SECCLASS_ASSOCIATION
+SECCLASS_NETLINK_KOBJECT_UEVENT_SOCKET = _selinux.SECCLASS_NETLINK_KOBJECT_UEVENT_SOCKET
+SECCLASS_APPLETALK_SOCKET = _selinux.SECCLASS_APPLETALK_SOCKET
+SECCLASS_PACKET = _selinux.SECCLASS_PACKET
+SECCLASS_KEY = _selinux.SECCLASS_KEY
+SECCLASS_CONTEXT = _selinux.SECCLASS_CONTEXT
+SECCLASS_DCCP_SOCKET = _selinux.SECCLASS_DCCP_SOCKET
+SECCLASS_MEMPROTECT = _selinux.SECCLASS_MEMPROTECT
+SECCLASS_DB_DATABASE = _selinux.SECCLASS_DB_DATABASE
+SECCLASS_DB_TABLE = _selinux.SECCLASS_DB_TABLE
+SECCLASS_DB_PROCEDURE = _selinux.SECCLASS_DB_PROCEDURE
+SECCLASS_DB_COLUMN = _selinux.SECCLASS_DB_COLUMN
+SECCLASS_DB_TUPLE = _selinux.SECCLASS_DB_TUPLE
+SECCLASS_DB_BLOB = _selinux.SECCLASS_DB_BLOB
+SECCLASS_PEER = _selinux.SECCLASS_PEER
+SECCLASS_CAPABILITY2 = _selinux.SECCLASS_CAPABILITY2
+SECCLASS_X_RESOURCE = _selinux.SECCLASS_X_RESOURCE
+SECCLASS_X_EVENT = _selinux.SECCLASS_X_EVENT
+SECCLASS_X_SYNTHETIC_EVENT = _selinux.SECCLASS_X_SYNTHETIC_EVENT
+SECCLASS_X_APPLICATION_DATA = _selinux.SECCLASS_X_APPLICATION_DATA
+SECINITSID_KERNEL = _selinux.SECINITSID_KERNEL
+SECINITSID_SECURITY = _selinux.SECINITSID_SECURITY
+SECINITSID_UNLABELED = _selinux.SECINITSID_UNLABELED
+SECINITSID_FS = _selinux.SECINITSID_FS
+SECINITSID_FILE = _selinux.SECINITSID_FILE
+SECINITSID_FILE_LABELS = _selinux.SECINITSID_FILE_LABELS
+SECINITSID_INIT = _selinux.SECINITSID_INIT
+SECINITSID_ANY_SOCKET = _selinux.SECINITSID_ANY_SOCKET
+SECINITSID_PORT = _selinux.SECINITSID_PORT
+SECINITSID_NETIF = _selinux.SECINITSID_NETIF
+SECINITSID_NETMSG = _selinux.SECINITSID_NETMSG
+SECINITSID_NODE = _selinux.SECINITSID_NODE
+SECINITSID_IGMP_PACKET = _selinux.SECINITSID_IGMP_PACKET
+SECINITSID_ICMP_SOCKET = _selinux.SECINITSID_ICMP_SOCKET
+SECINITSID_TCP_SOCKET = _selinux.SECINITSID_TCP_SOCKET
+SECINITSID_SYSCTL_MODPROBE = _selinux.SECINITSID_SYSCTL_MODPROBE
+SECINITSID_SYSCTL = _selinux.SECINITSID_SYSCTL
+SECINITSID_SYSCTL_FS = _selinux.SECINITSID_SYSCTL_FS
+SECINITSID_SYSCTL_KERNEL = _selinux.SECINITSID_SYSCTL_KERNEL
+SECINITSID_SYSCTL_NET = _selinux.SECINITSID_SYSCTL_NET
+SECINITSID_SYSCTL_NET_UNIX = _selinux.SECINITSID_SYSCTL_NET_UNIX
+SECINITSID_SYSCTL_VM = _selinux.SECINITSID_SYSCTL_VM
+SECINITSID_SYSCTL_DEV = _selinux.SECINITSID_SYSCTL_DEV
+SECINITSID_KMOD = _selinux.SECINITSID_KMOD
+SECINITSID_POLICY = _selinux.SECINITSID_POLICY
+SECINITSID_SCMP_PACKET = _selinux.SECINITSID_SCMP_PACKET
+SECINITSID_DEVNULL = _selinux.SECINITSID_DEVNULL
+SECINITSID_NUM = _selinux.SECINITSID_NUM
+SELINUX_DEFAULTUSER = _selinux.SELINUX_DEFAULTUSER
+
+def get_ordered_context_list(*args):
+ return _selinux.get_ordered_context_list(*args)
+get_ordered_context_list = _selinux.get_ordered_context_list
+
+def get_ordered_context_list_with_level(*args):
+ return _selinux.get_ordered_context_list_with_level(*args)
+get_ordered_context_list_with_level = _selinux.get_ordered_context_list_with_level
+
+def get_default_context(*args):
+ return _selinux.get_default_context(*args)
+get_default_context = _selinux.get_default_context
+
+def get_default_context_with_level(*args):
+ return _selinux.get_default_context_with_level(*args)
+get_default_context_with_level = _selinux.get_default_context_with_level
+
+def get_default_context_with_role(*args):
+ return _selinux.get_default_context_with_role(*args)
+get_default_context_with_role = _selinux.get_default_context_with_role
+
+def get_default_context_with_rolelevel(*args):
+ return _selinux.get_default_context_with_rolelevel(*args)
+get_default_context_with_rolelevel = _selinux.get_default_context_with_rolelevel
+
+def query_user_context():
+ return _selinux.query_user_context()
+query_user_context = _selinux.query_user_context
+
+def manual_user_enter_context(*args):
+ return _selinux.manual_user_enter_context(*args)
+manual_user_enter_context = _selinux.manual_user_enter_context
+
+def selinux_default_type_path():
+ return _selinux.selinux_default_type_path()
+selinux_default_type_path = _selinux.selinux_default_type_path
+
+def get_default_type(*args):
+ return _selinux.get_default_type(*args)
+get_default_type = _selinux.get_default_type
+SELABEL_CTX_FILE = _selinux.SELABEL_CTX_FILE
+SELABEL_CTX_MEDIA = _selinux.SELABEL_CTX_MEDIA
+SELABEL_CTX_X = _selinux.SELABEL_CTX_X
+SELABEL_CTX_DB = _selinux.SELABEL_CTX_DB
+SELABEL_CTX_ANDROID_PROP = _selinux.SELABEL_CTX_ANDROID_PROP
+SELABEL_OPT_UNUSED = _selinux.SELABEL_OPT_UNUSED
+SELABEL_OPT_VALIDATE = _selinux.SELABEL_OPT_VALIDATE
+SELABEL_OPT_BASEONLY = _selinux.SELABEL_OPT_BASEONLY
+SELABEL_OPT_PATH = _selinux.SELABEL_OPT_PATH
+SELABEL_OPT_SUBSET = _selinux.SELABEL_OPT_SUBSET
+SELABEL_NOPT = _selinux.SELABEL_NOPT
+
+def selabel_open(*args):
+ return _selinux.selabel_open(*args)
+selabel_open = _selinux.selabel_open
+
+def selabel_close(*args):
+ return _selinux.selabel_close(*args)
+selabel_close = _selinux.selabel_close
+
+def selabel_lookup(*args):
+ return _selinux.selabel_lookup(*args)
+selabel_lookup = _selinux.selabel_lookup
+
+def selabel_lookup_raw(*args):
+ return _selinux.selabel_lookup_raw(*args)
+selabel_lookup_raw = _selinux.selabel_lookup_raw
+
+def selabel_partial_match(*args):
+ return _selinux.selabel_partial_match(*args)
+selabel_partial_match = _selinux.selabel_partial_match
+
+def selabel_lookup_best_match(*args):
+ return _selinux.selabel_lookup_best_match(*args)
+selabel_lookup_best_match = _selinux.selabel_lookup_best_match
+
+def selabel_lookup_best_match_raw(*args):
+ return _selinux.selabel_lookup_best_match_raw(*args)
+selabel_lookup_best_match_raw = _selinux.selabel_lookup_best_match_raw
+
+def selabel_stats(*args):
+ return _selinux.selabel_stats(*args)
+selabel_stats = _selinux.selabel_stats
+SELABEL_X_PROP = _selinux.SELABEL_X_PROP
+SELABEL_X_EXT = _selinux.SELABEL_X_EXT
+SELABEL_X_CLIENT = _selinux.SELABEL_X_CLIENT
+SELABEL_X_EVENT = _selinux.SELABEL_X_EVENT
+SELABEL_X_SELN = _selinux.SELABEL_X_SELN
+SELABEL_X_POLYPROP = _selinux.SELABEL_X_POLYPROP
+SELABEL_X_POLYSELN = _selinux.SELABEL_X_POLYSELN
+SELABEL_DB_DATABASE = _selinux.SELABEL_DB_DATABASE
+SELABEL_DB_SCHEMA = _selinux.SELABEL_DB_SCHEMA
+SELABEL_DB_TABLE = _selinux.SELABEL_DB_TABLE
+SELABEL_DB_COLUMN = _selinux.SELABEL_DB_COLUMN
+SELABEL_DB_SEQUENCE = _selinux.SELABEL_DB_SEQUENCE
+SELABEL_DB_VIEW = _selinux.SELABEL_DB_VIEW
+SELABEL_DB_PROCEDURE = _selinux.SELABEL_DB_PROCEDURE
+SELABEL_DB_BLOB = _selinux.SELABEL_DB_BLOB
+SELABEL_DB_TUPLE = _selinux.SELABEL_DB_TUPLE
+SELABEL_DB_LANGUAGE = _selinux.SELABEL_DB_LANGUAGE
+SELABEL_DB_EXCEPTION = _selinux.SELABEL_DB_EXCEPTION
+SELABEL_DB_DATATYPE = _selinux.SELABEL_DB_DATATYPE
+
+def is_selinux_enabled():
+ return _selinux.is_selinux_enabled()
+is_selinux_enabled = _selinux.is_selinux_enabled
+
+def is_selinux_mls_enabled():
+ return _selinux.is_selinux_mls_enabled()
+is_selinux_mls_enabled = _selinux.is_selinux_mls_enabled
+
+def getcon():
+ return _selinux.getcon()
+getcon = _selinux.getcon
+
+def getcon_raw():
+ return _selinux.getcon_raw()
+getcon_raw = _selinux.getcon_raw
+
+def setcon(*args):
+ return _selinux.setcon(*args)
+setcon = _selinux.setcon
+
+def setcon_raw(*args):
+ return _selinux.setcon_raw(*args)
+setcon_raw = _selinux.setcon_raw
+
+def getpidcon(*args):
+ return _selinux.getpidcon(*args)
+getpidcon = _selinux.getpidcon
+
+def getpidcon_raw(*args):
+ return _selinux.getpidcon_raw(*args)
+getpidcon_raw = _selinux.getpidcon_raw
+
+def getprevcon():
+ return _selinux.getprevcon()
+getprevcon = _selinux.getprevcon
+
+def getprevcon_raw():
+ return _selinux.getprevcon_raw()
+getprevcon_raw = _selinux.getprevcon_raw
+
+def getexeccon():
+ return _selinux.getexeccon()
+getexeccon = _selinux.getexeccon
+
+def getexeccon_raw():
+ return _selinux.getexeccon_raw()
+getexeccon_raw = _selinux.getexeccon_raw
+
+def setexeccon(*args):
+ return _selinux.setexeccon(*args)
+setexeccon = _selinux.setexeccon
+
+def setexeccon_raw(*args):
+ return _selinux.setexeccon_raw(*args)
+setexeccon_raw = _selinux.setexeccon_raw
+
+def getfscreatecon():
+ return _selinux.getfscreatecon()
+getfscreatecon = _selinux.getfscreatecon
+
+def getfscreatecon_raw():
+ return _selinux.getfscreatecon_raw()
+getfscreatecon_raw = _selinux.getfscreatecon_raw
+
+def setfscreatecon(*args):
+ return _selinux.setfscreatecon(*args)
+setfscreatecon = _selinux.setfscreatecon
+
+def setfscreatecon_raw(*args):
+ return _selinux.setfscreatecon_raw(*args)
+setfscreatecon_raw = _selinux.setfscreatecon_raw
+
+def getkeycreatecon():
+ return _selinux.getkeycreatecon()
+getkeycreatecon = _selinux.getkeycreatecon
+
+def getkeycreatecon_raw():
+ return _selinux.getkeycreatecon_raw()
+getkeycreatecon_raw = _selinux.getkeycreatecon_raw
+
+def setkeycreatecon(*args):
+ return _selinux.setkeycreatecon(*args)
+setkeycreatecon = _selinux.setkeycreatecon
+
+def setkeycreatecon_raw(*args):
+ return _selinux.setkeycreatecon_raw(*args)
+setkeycreatecon_raw = _selinux.setkeycreatecon_raw
+
+def getsockcreatecon():
+ return _selinux.getsockcreatecon()
+getsockcreatecon = _selinux.getsockcreatecon
+
+def getsockcreatecon_raw():
+ return _selinux.getsockcreatecon_raw()
+getsockcreatecon_raw = _selinux.getsockcreatecon_raw
+
+def setsockcreatecon(*args):
+ return _selinux.setsockcreatecon(*args)
+setsockcreatecon = _selinux.setsockcreatecon
+
+def setsockcreatecon_raw(*args):
+ return _selinux.setsockcreatecon_raw(*args)
+setsockcreatecon_raw = _selinux.setsockcreatecon_raw
+
+def getfilecon(*args):
+ return _selinux.getfilecon(*args)
+getfilecon = _selinux.getfilecon
+
+def getfilecon_raw(*args):
+ return _selinux.getfilecon_raw(*args)
+getfilecon_raw = _selinux.getfilecon_raw
+
+def lgetfilecon(*args):
+ return _selinux.lgetfilecon(*args)
+lgetfilecon = _selinux.lgetfilecon
+
+def lgetfilecon_raw(*args):
+ return _selinux.lgetfilecon_raw(*args)
+lgetfilecon_raw = _selinux.lgetfilecon_raw
+
+def fgetfilecon(*args):
+ return _selinux.fgetfilecon(*args)
+fgetfilecon = _selinux.fgetfilecon
+
+def fgetfilecon_raw(*args):
+ return _selinux.fgetfilecon_raw(*args)
+fgetfilecon_raw = _selinux.fgetfilecon_raw
+
+def setfilecon(*args):
+ return _selinux.setfilecon(*args)
+setfilecon = _selinux.setfilecon
+
+def setfilecon_raw(*args):
+ return _selinux.setfilecon_raw(*args)
+setfilecon_raw = _selinux.setfilecon_raw
+
+def lsetfilecon(*args):
+ return _selinux.lsetfilecon(*args)
+lsetfilecon = _selinux.lsetfilecon
+
+def lsetfilecon_raw(*args):
+ return _selinux.lsetfilecon_raw(*args)
+lsetfilecon_raw = _selinux.lsetfilecon_raw
+
+def fsetfilecon(*args):
+ return _selinux.fsetfilecon(*args)
+fsetfilecon = _selinux.fsetfilecon
+
+def fsetfilecon_raw(*args):
+ return _selinux.fsetfilecon_raw(*args)
+fsetfilecon_raw = _selinux.fsetfilecon_raw
+
+def getpeercon(*args):
+ return _selinux.getpeercon(*args)
+getpeercon = _selinux.getpeercon
+
+def getpeercon_raw(*args):
+ return _selinux.getpeercon_raw(*args)
+getpeercon_raw = _selinux.getpeercon_raw
+class av_decision(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, av_decision, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, av_decision, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["allowed"] = _selinux.av_decision_allowed_set
+ __swig_getmethods__["allowed"] = _selinux.av_decision_allowed_get
+ if _newclass:allowed = _swig_property(_selinux.av_decision_allowed_get, _selinux.av_decision_allowed_set)
+ __swig_setmethods__["decided"] = _selinux.av_decision_decided_set
+ __swig_getmethods__["decided"] = _selinux.av_decision_decided_get
+ if _newclass:decided = _swig_property(_selinux.av_decision_decided_get, _selinux.av_decision_decided_set)
+ __swig_setmethods__["auditallow"] = _selinux.av_decision_auditallow_set
+ __swig_getmethods__["auditallow"] = _selinux.av_decision_auditallow_get
+ if _newclass:auditallow = _swig_property(_selinux.av_decision_auditallow_get, _selinux.av_decision_auditallow_set)
+ __swig_setmethods__["auditdeny"] = _selinux.av_decision_auditdeny_set
+ __swig_getmethods__["auditdeny"] = _selinux.av_decision_auditdeny_get
+ if _newclass:auditdeny = _swig_property(_selinux.av_decision_auditdeny_get, _selinux.av_decision_auditdeny_set)
+ __swig_setmethods__["seqno"] = _selinux.av_decision_seqno_set
+ __swig_getmethods__["seqno"] = _selinux.av_decision_seqno_get
+ if _newclass:seqno = _swig_property(_selinux.av_decision_seqno_get, _selinux.av_decision_seqno_set)
+ __swig_setmethods__["flags"] = _selinux.av_decision_flags_set
+ __swig_getmethods__["flags"] = _selinux.av_decision_flags_get
+ if _newclass:flags = _swig_property(_selinux.av_decision_flags_get, _selinux.av_decision_flags_set)
+ def __init__(self):
+ this = _selinux.new_av_decision()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_av_decision
+ __del__ = lambda self : None;
+av_decision_swigregister = _selinux.av_decision_swigregister
+av_decision_swigregister(av_decision)
+
+SELINUX_AVD_FLAGS_PERMISSIVE = _selinux.SELINUX_AVD_FLAGS_PERMISSIVE
+class selinux_opt(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, selinux_opt, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, selinux_opt, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["type"] = _selinux.selinux_opt_type_set
+ __swig_getmethods__["type"] = _selinux.selinux_opt_type_get
+ if _newclass:type = _swig_property(_selinux.selinux_opt_type_get, _selinux.selinux_opt_type_set)
+ __swig_setmethods__["value"] = _selinux.selinux_opt_value_set
+ __swig_getmethods__["value"] = _selinux.selinux_opt_value_get
+ if _newclass:value = _swig_property(_selinux.selinux_opt_value_get, _selinux.selinux_opt_value_set)
+ def __init__(self):
+ this = _selinux.new_selinux_opt()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_selinux_opt
+ __del__ = lambda self : None;
+selinux_opt_swigregister = _selinux.selinux_opt_swigregister
+selinux_opt_swigregister(selinux_opt)
+
+class selinux_callback(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, selinux_callback, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, selinux_callback, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["func_log"] = _selinux.selinux_callback_func_log_set
+ __swig_getmethods__["func_log"] = _selinux.selinux_callback_func_log_get
+ if _newclass:func_log = _swig_property(_selinux.selinux_callback_func_log_get, _selinux.selinux_callback_func_log_set)
+ __swig_setmethods__["func_audit"] = _selinux.selinux_callback_func_audit_set
+ __swig_getmethods__["func_audit"] = _selinux.selinux_callback_func_audit_get
+ if _newclass:func_audit = _swig_property(_selinux.selinux_callback_func_audit_get, _selinux.selinux_callback_func_audit_set)
+ __swig_setmethods__["func_validate"] = _selinux.selinux_callback_func_validate_set
+ __swig_getmethods__["func_validate"] = _selinux.selinux_callback_func_validate_get
+ if _newclass:func_validate = _swig_property(_selinux.selinux_callback_func_validate_get, _selinux.selinux_callback_func_validate_set)
+ __swig_setmethods__["func_setenforce"] = _selinux.selinux_callback_func_setenforce_set
+ __swig_getmethods__["func_setenforce"] = _selinux.selinux_callback_func_setenforce_get
+ if _newclass:func_setenforce = _swig_property(_selinux.selinux_callback_func_setenforce_get, _selinux.selinux_callback_func_setenforce_set)
+ __swig_setmethods__["func_policyload"] = _selinux.selinux_callback_func_policyload_set
+ __swig_getmethods__["func_policyload"] = _selinux.selinux_callback_func_policyload_get
+ if _newclass:func_policyload = _swig_property(_selinux.selinux_callback_func_policyload_get, _selinux.selinux_callback_func_policyload_set)
+ def __init__(self):
+ this = _selinux.new_selinux_callback()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_selinux_callback
+ __del__ = lambda self : None;
+selinux_callback_swigregister = _selinux.selinux_callback_swigregister
+selinux_callback_swigregister(selinux_callback)
+
+SELINUX_CB_LOG = _selinux.SELINUX_CB_LOG
+SELINUX_CB_AUDIT = _selinux.SELINUX_CB_AUDIT
+SELINUX_CB_VALIDATE = _selinux.SELINUX_CB_VALIDATE
+SELINUX_CB_SETENFORCE = _selinux.SELINUX_CB_SETENFORCE
+SELINUX_CB_POLICYLOAD = _selinux.SELINUX_CB_POLICYLOAD
+
+def selinux_get_callback(*args):
+ return _selinux.selinux_get_callback(*args)
+selinux_get_callback = _selinux.selinux_get_callback
+
+def selinux_set_callback(*args):
+ return _selinux.selinux_set_callback(*args)
+selinux_set_callback = _selinux.selinux_set_callback
+SELINUX_ERROR = _selinux.SELINUX_ERROR
+SELINUX_WARNING = _selinux.SELINUX_WARNING
+SELINUX_INFO = _selinux.SELINUX_INFO
+SELINUX_AVC = _selinux.SELINUX_AVC
+SELINUX_TRANS_DIR = _selinux.SELINUX_TRANS_DIR
+
+def security_compute_av(*args):
+ return _selinux.security_compute_av(*args)
+security_compute_av = _selinux.security_compute_av
+
+def security_compute_av_raw(*args):
+ return _selinux.security_compute_av_raw(*args)
+security_compute_av_raw = _selinux.security_compute_av_raw
+
+def security_compute_av_flags(*args):
+ return _selinux.security_compute_av_flags(*args)
+security_compute_av_flags = _selinux.security_compute_av_flags
+
+def security_compute_av_flags_raw(*args):
+ return _selinux.security_compute_av_flags_raw(*args)
+security_compute_av_flags_raw = _selinux.security_compute_av_flags_raw
+
+def security_compute_create(*args):
+ return _selinux.security_compute_create(*args)
+security_compute_create = _selinux.security_compute_create
+
+def security_compute_create_raw(*args):
+ return _selinux.security_compute_create_raw(*args)
+security_compute_create_raw = _selinux.security_compute_create_raw
+
+def security_compute_create_name(*args):
+ return _selinux.security_compute_create_name(*args)
+security_compute_create_name = _selinux.security_compute_create_name
+
+def security_compute_create_name_raw(*args):
+ return _selinux.security_compute_create_name_raw(*args)
+security_compute_create_name_raw = _selinux.security_compute_create_name_raw
+
+def security_compute_relabel(*args):
+ return _selinux.security_compute_relabel(*args)
+security_compute_relabel = _selinux.security_compute_relabel
+
+def security_compute_relabel_raw(*args):
+ return _selinux.security_compute_relabel_raw(*args)
+security_compute_relabel_raw = _selinux.security_compute_relabel_raw
+
+def security_compute_member(*args):
+ return _selinux.security_compute_member(*args)
+security_compute_member = _selinux.security_compute_member
+
+def security_compute_member_raw(*args):
+ return _selinux.security_compute_member_raw(*args)
+security_compute_member_raw = _selinux.security_compute_member_raw
+
+def security_compute_user(*args):
+ return _selinux.security_compute_user(*args)
+security_compute_user = _selinux.security_compute_user
+
+def security_compute_user_raw(*args):
+ return _selinux.security_compute_user_raw(*args)
+security_compute_user_raw = _selinux.security_compute_user_raw
+
+def security_load_policy(*args):
+ return _selinux.security_load_policy(*args)
+security_load_policy = _selinux.security_load_policy
+
+def security_get_initial_context(*args):
+ return _selinux.security_get_initial_context(*args)
+security_get_initial_context = _selinux.security_get_initial_context
+
+def security_get_initial_context_raw(*args):
+ return _selinux.security_get_initial_context_raw(*args)
+security_get_initial_context_raw = _selinux.security_get_initial_context_raw
+
+def selinux_mkload_policy(*args):
+ return _selinux.selinux_mkload_policy(*args)
+selinux_mkload_policy = _selinux.selinux_mkload_policy
+
+def selinux_init_load_policy():
+ return _selinux.selinux_init_load_policy()
+selinux_init_load_policy = _selinux.selinux_init_load_policy
+class SELboolean(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, SELboolean, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, SELboolean, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["name"] = _selinux.SELboolean_name_set
+ __swig_getmethods__["name"] = _selinux.SELboolean_name_get
+ if _newclass:name = _swig_property(_selinux.SELboolean_name_get, _selinux.SELboolean_name_set)
+ __swig_setmethods__["value"] = _selinux.SELboolean_value_set
+ __swig_getmethods__["value"] = _selinux.SELboolean_value_get
+ if _newclass:value = _swig_property(_selinux.SELboolean_value_get, _selinux.SELboolean_value_set)
+ def __init__(self):
+ this = _selinux.new_SELboolean()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_SELboolean
+ __del__ = lambda self : None;
+SELboolean_swigregister = _selinux.SELboolean_swigregister
+SELboolean_swigregister(SELboolean)
+
+
+def security_set_boolean_list(*args):
+ return _selinux.security_set_boolean_list(*args)
+security_set_boolean_list = _selinux.security_set_boolean_list
+
+def security_load_booleans(*args):
+ return _selinux.security_load_booleans(*args)
+security_load_booleans = _selinux.security_load_booleans
+
+def security_check_context(*args):
+ return _selinux.security_check_context(*args)
+security_check_context = _selinux.security_check_context
+
+def security_check_context_raw(*args):
+ return _selinux.security_check_context_raw(*args)
+security_check_context_raw = _selinux.security_check_context_raw
+
+def security_canonicalize_context(*args):
+ return _selinux.security_canonicalize_context(*args)
+security_canonicalize_context = _selinux.security_canonicalize_context
+
+def security_canonicalize_context_raw(*args):
+ return _selinux.security_canonicalize_context_raw(*args)
+security_canonicalize_context_raw = _selinux.security_canonicalize_context_raw
+
+def security_getenforce():
+ return _selinux.security_getenforce()
+security_getenforce = _selinux.security_getenforce
+
+def security_setenforce(*args):
+ return _selinux.security_setenforce(*args)
+security_setenforce = _selinux.security_setenforce
+
+def security_deny_unknown():
+ return _selinux.security_deny_unknown()
+security_deny_unknown = _selinux.security_deny_unknown
+
+def security_disable():
+ return _selinux.security_disable()
+security_disable = _selinux.security_disable
+
+def security_policyvers():
+ return _selinux.security_policyvers()
+security_policyvers = _selinux.security_policyvers
+
+def security_get_boolean_names():
+ return _selinux.security_get_boolean_names()
+security_get_boolean_names = _selinux.security_get_boolean_names
+
+def security_get_boolean_pending(*args):
+ return _selinux.security_get_boolean_pending(*args)
+security_get_boolean_pending = _selinux.security_get_boolean_pending
+
+def security_get_boolean_active(*args):
+ return _selinux.security_get_boolean_active(*args)
+security_get_boolean_active = _selinux.security_get_boolean_active
+
+def security_set_boolean(*args):
+ return _selinux.security_set_boolean(*args)
+security_set_boolean = _selinux.security_set_boolean
+
+def security_commit_booleans():
+ return _selinux.security_commit_booleans()
+security_commit_booleans = _selinux.security_commit_booleans
+class security_class_mapping(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, security_class_mapping, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, security_class_mapping, name)
+ __repr__ = _swig_repr
+ __swig_setmethods__["name"] = _selinux.security_class_mapping_name_set
+ __swig_getmethods__["name"] = _selinux.security_class_mapping_name_get
+ if _newclass:name = _swig_property(_selinux.security_class_mapping_name_get, _selinux.security_class_mapping_name_set)
+ __swig_setmethods__["perms"] = _selinux.security_class_mapping_perms_set
+ __swig_getmethods__["perms"] = _selinux.security_class_mapping_perms_get
+ if _newclass:perms = _swig_property(_selinux.security_class_mapping_perms_get, _selinux.security_class_mapping_perms_set)
+ def __init__(self):
+ this = _selinux.new_security_class_mapping()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _selinux.delete_security_class_mapping
+ __del__ = lambda self : None;
+security_class_mapping_swigregister = _selinux.security_class_mapping_swigregister
+security_class_mapping_swigregister(security_class_mapping)
+
+
+def selinux_set_mapping(*args):
+ return _selinux.selinux_set_mapping(*args)
+selinux_set_mapping = _selinux.selinux_set_mapping
+
+def mode_to_security_class(*args):
+ return _selinux.mode_to_security_class(*args)
+mode_to_security_class = _selinux.mode_to_security_class
+
+def string_to_security_class(*args):
+ return _selinux.string_to_security_class(*args)
+string_to_security_class = _selinux.string_to_security_class
+
+def security_class_to_string(*args):
+ return _selinux.security_class_to_string(*args)
+security_class_to_string = _selinux.security_class_to_string
+
+def security_av_perm_to_string(*args):
+ return _selinux.security_av_perm_to_string(*args)
+security_av_perm_to_string = _selinux.security_av_perm_to_string
+
+def string_to_av_perm(*args):
+ return _selinux.string_to_av_perm(*args)
+string_to_av_perm = _selinux.string_to_av_perm
+
+def security_av_string(*args):
+ return _selinux.security_av_string(*args)
+security_av_string = _selinux.security_av_string
+
+def print_access_vector(*args):
+ return _selinux.print_access_vector(*args)
+print_access_vector = _selinux.print_access_vector
+MATCHPATHCON_BASEONLY = _selinux.MATCHPATHCON_BASEONLY
+MATCHPATHCON_NOTRANS = _selinux.MATCHPATHCON_NOTRANS
+MATCHPATHCON_VALIDATE = _selinux.MATCHPATHCON_VALIDATE
+
+def set_matchpathcon_flags(*args):
+ return _selinux.set_matchpathcon_flags(*args)
+set_matchpathcon_flags = _selinux.set_matchpathcon_flags
+
+def matchpathcon_init(*args):
+ return _selinux.matchpathcon_init(*args)
+matchpathcon_init = _selinux.matchpathcon_init
+
+def matchpathcon_init_prefix(*args):
+ return _selinux.matchpathcon_init_prefix(*args)
+matchpathcon_init_prefix = _selinux.matchpathcon_init_prefix
+
+def matchpathcon_fini():
+ return _selinux.matchpathcon_fini()
+matchpathcon_fini = _selinux.matchpathcon_fini
+
+def realpath_not_final(*args):
+ return _selinux.realpath_not_final(*args)
+realpath_not_final = _selinux.realpath_not_final
+
+def matchpathcon(*args):
+ return _selinux.matchpathcon(*args)
+matchpathcon = _selinux.matchpathcon
+
+def matchpathcon_index(*args):
+ return _selinux.matchpathcon_index(*args)
+matchpathcon_index = _selinux.matchpathcon_index
+
+def matchpathcon_filespec_add(*args):
+ return _selinux.matchpathcon_filespec_add(*args)
+matchpathcon_filespec_add = _selinux.matchpathcon_filespec_add
+
+def matchpathcon_filespec_destroy():
+ return _selinux.matchpathcon_filespec_destroy()
+matchpathcon_filespec_destroy = _selinux.matchpathcon_filespec_destroy
+
+def matchpathcon_filespec_eval():
+ return _selinux.matchpathcon_filespec_eval()
+matchpathcon_filespec_eval = _selinux.matchpathcon_filespec_eval
+
+def matchpathcon_checkmatches(*args):
+ return _selinux.matchpathcon_checkmatches(*args)
+matchpathcon_checkmatches = _selinux.matchpathcon_checkmatches
+
+def matchmediacon(*args):
+ return _selinux.matchmediacon(*args)
+matchmediacon = _selinux.matchmediacon
+
+def selinux_getenforcemode():
+ return _selinux.selinux_getenforcemode()
+selinux_getenforcemode = _selinux.selinux_getenforcemode
+
+def selinux_boolean_sub(*args):
+ return _selinux.selinux_boolean_sub(*args)
+selinux_boolean_sub = _selinux.selinux_boolean_sub
+
+def selinux_getpolicytype():
+ return _selinux.selinux_getpolicytype()
+selinux_getpolicytype = _selinux.selinux_getpolicytype
+
+def selinux_policy_root():
+ return _selinux.selinux_policy_root()
+selinux_policy_root = _selinux.selinux_policy_root
+
+def selinux_set_policy_root(*args):
+ return _selinux.selinux_set_policy_root(*args)
+selinux_set_policy_root = _selinux.selinux_set_policy_root
+
+def selinux_current_policy_path():
+ return _selinux.selinux_current_policy_path()
+selinux_current_policy_path = _selinux.selinux_current_policy_path
+
+def selinux_binary_policy_path():
+ return _selinux.selinux_binary_policy_path()
+selinux_binary_policy_path = _selinux.selinux_binary_policy_path
+
+def selinux_failsafe_context_path():
+ return _selinux.selinux_failsafe_context_path()
+selinux_failsafe_context_path = _selinux.selinux_failsafe_context_path
+
+def selinux_removable_context_path():
+ return _selinux.selinux_removable_context_path()
+selinux_removable_context_path = _selinux.selinux_removable_context_path
+
+def selinux_default_context_path():
+ return _selinux.selinux_default_context_path()
+selinux_default_context_path = _selinux.selinux_default_context_path
+
+def selinux_user_contexts_path():
+ return _selinux.selinux_user_contexts_path()
+selinux_user_contexts_path = _selinux.selinux_user_contexts_path
+
+def selinux_file_context_path():
+ return _selinux.selinux_file_context_path()
+selinux_file_context_path = _selinux.selinux_file_context_path
+
+def selinux_file_context_homedir_path():
+ return _selinux.selinux_file_context_homedir_path()
+selinux_file_context_homedir_path = _selinux.selinux_file_context_homedir_path
+
+def selinux_file_context_local_path():
+ return _selinux.selinux_file_context_local_path()
+selinux_file_context_local_path = _selinux.selinux_file_context_local_path
+
+def selinux_file_context_subs_path():
+ return _selinux.selinux_file_context_subs_path()
+selinux_file_context_subs_path = _selinux.selinux_file_context_subs_path
+
+def selinux_file_context_subs_dist_path():
+ return _selinux.selinux_file_context_subs_dist_path()
+selinux_file_context_subs_dist_path = _selinux.selinux_file_context_subs_dist_path
+
+def selinux_homedir_context_path():
+ return _selinux.selinux_homedir_context_path()
+selinux_homedir_context_path = _selinux.selinux_homedir_context_path
+
+def selinux_media_context_path():
+ return _selinux.selinux_media_context_path()
+selinux_media_context_path = _selinux.selinux_media_context_path
+
+def selinux_virtual_domain_context_path():
+ return _selinux.selinux_virtual_domain_context_path()
+selinux_virtual_domain_context_path = _selinux.selinux_virtual_domain_context_path
+
+def selinux_virtual_image_context_path():
+ return _selinux.selinux_virtual_image_context_path()
+selinux_virtual_image_context_path = _selinux.selinux_virtual_image_context_path
+
+def selinux_lxc_contexts_path():
+ return _selinux.selinux_lxc_contexts_path()
+selinux_lxc_contexts_path = _selinux.selinux_lxc_contexts_path
+
+def selinux_x_context_path():
+ return _selinux.selinux_x_context_path()
+selinux_x_context_path = _selinux.selinux_x_context_path
+
+def selinux_sepgsql_context_path():
+ return _selinux.selinux_sepgsql_context_path()
+selinux_sepgsql_context_path = _selinux.selinux_sepgsql_context_path
+
+def selinux_systemd_contexts_path():
+ return _selinux.selinux_systemd_contexts_path()
+selinux_systemd_contexts_path = _selinux.selinux_systemd_contexts_path
+
+def selinux_contexts_path():
+ return _selinux.selinux_contexts_path()
+selinux_contexts_path = _selinux.selinux_contexts_path
+
+def selinux_securetty_types_path():
+ return _selinux.selinux_securetty_types_path()
+selinux_securetty_types_path = _selinux.selinux_securetty_types_path
+
+def selinux_booleans_subs_path():
+ return _selinux.selinux_booleans_subs_path()
+selinux_booleans_subs_path = _selinux.selinux_booleans_subs_path
+
+def selinux_booleans_path():
+ return _selinux.selinux_booleans_path()
+selinux_booleans_path = _selinux.selinux_booleans_path
+
+def selinux_customizable_types_path():
+ return _selinux.selinux_customizable_types_path()
+selinux_customizable_types_path = _selinux.selinux_customizable_types_path
+
+def selinux_users_path():
+ return _selinux.selinux_users_path()
+selinux_users_path = _selinux.selinux_users_path
+
+def selinux_usersconf_path():
+ return _selinux.selinux_usersconf_path()
+selinux_usersconf_path = _selinux.selinux_usersconf_path
+
+def selinux_translations_path():
+ return _selinux.selinux_translations_path()
+selinux_translations_path = _selinux.selinux_translations_path
+
+def selinux_colors_path():
+ return _selinux.selinux_colors_path()
+selinux_colors_path = _selinux.selinux_colors_path
+
+def selinux_netfilter_context_path():
+ return _selinux.selinux_netfilter_context_path()
+selinux_netfilter_context_path = _selinux.selinux_netfilter_context_path
+
+def selinux_path():
+ return _selinux.selinux_path()
+selinux_path = _selinux.selinux_path
+
+def selinux_check_access(*args):
+ return _selinux.selinux_check_access(*args)
+selinux_check_access = _selinux.selinux_check_access
+
+def selinux_check_passwd_access(*args):
+ return _selinux.selinux_check_passwd_access(*args)
+selinux_check_passwd_access = _selinux.selinux_check_passwd_access
+
+def checkPasswdAccess(*args):
+ return _selinux.checkPasswdAccess(*args)
+checkPasswdAccess = _selinux.checkPasswdAccess
+
+def selinux_check_securetty_context(*args):
+ return _selinux.selinux_check_securetty_context(*args)
+selinux_check_securetty_context = _selinux.selinux_check_securetty_context
+
+def set_selinuxmnt(*args):
+ return _selinux.set_selinuxmnt(*args)
+set_selinuxmnt = _selinux.set_selinuxmnt
+
+def selinuxfs_exists():
+ return _selinux.selinuxfs_exists()
+selinuxfs_exists = _selinux.selinuxfs_exists
+
+def fini_selinuxmnt():
+ return _selinux.fini_selinuxmnt()
+fini_selinuxmnt = _selinux.fini_selinuxmnt
+
+def setexecfilecon(*args):
+ return _selinux.setexecfilecon(*args)
+setexecfilecon = _selinux.setexecfilecon
+
+def rpm_execcon(*args):
+ return _selinux.rpm_execcon(*args)
+rpm_execcon = _selinux.rpm_execcon
+
+def is_context_customizable(*args):
+ return _selinux.is_context_customizable(*args)
+is_context_customizable = _selinux.is_context_customizable
+
+def selinux_trans_to_raw_context(*args):
+ return _selinux.selinux_trans_to_raw_context(*args)
+selinux_trans_to_raw_context = _selinux.selinux_trans_to_raw_context
+
+def selinux_raw_to_trans_context(*args):
+ return _selinux.selinux_raw_to_trans_context(*args)
+selinux_raw_to_trans_context = _selinux.selinux_raw_to_trans_context
+
+def selinux_raw_context_to_color(*args):
+ return _selinux.selinux_raw_context_to_color(*args)
+selinux_raw_context_to_color = _selinux.selinux_raw_context_to_color
+
+def getseuserbyname(*args):
+ return _selinux.getseuserbyname(*args)
+getseuserbyname = _selinux.getseuserbyname
+
+def getseuser(*args):
+ return _selinux.getseuser(*args)
+getseuser = _selinux.getseuser
+
+def selinux_file_context_cmp(*args):
+ return _selinux.selinux_file_context_cmp(*args)
+selinux_file_context_cmp = _selinux.selinux_file_context_cmp
+
+def selinux_file_context_verify(*args):
+ return _selinux.selinux_file_context_verify(*args)
+selinux_file_context_verify = _selinux.selinux_file_context_verify
+
+def selinux_lsetfilecon_default(*args):
+ return _selinux.selinux_lsetfilecon_default(*args)
+selinux_lsetfilecon_default = _selinux.selinux_lsetfilecon_default
+
+def selinux_reset_config():
+ return _selinux.selinux_reset_config()
+selinux_reset_config = _selinux.selinux_reset_config
+# This file is compatible with both classic and new-style classes.
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/selinux/_selinux.so b/lib/python2.7/site-packages/setoolsgui/selinux/_selinux.so
new file mode 100755
index 0000000..f4a045f
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/selinux/_selinux.so
Binary files differ
diff --git a/lib/python2.7/site-packages/setoolsgui/selinux/audit2why.so b/lib/python2.7/site-packages/setoolsgui/selinux/audit2why.so
new file mode 100755
index 0000000..e7daaab
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/selinux/audit2why.so
Binary files differ
diff --git a/lib/python2.7/site-packages/setoolsgui/sepolgen/__init__.py b/lib/python2.7/site-packages/setoolsgui/sepolgen/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/sepolgen/__init__.py
diff --git a/lib/python2.7/site-packages/setoolsgui/sepolgen/access.py b/lib/python2.7/site-packages/setoolsgui/sepolgen/access.py
new file mode 100644
index 0000000..cf13210
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/sepolgen/access.py
@@ -0,0 +1,331 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+"""
+Classes representing basic access.
+
+SELinux - at the most basic level - represents access as
+the 4-tuple subject (type or context), target (type or context),
+object class, permission. The policy language elaborates this basic
+access to faciliate more concise rules (e.g., allow rules can have multiple
+source or target types - see refpolicy for more information).
+
+This module has objects for representing the most basic access (AccessVector)
+and sets of that access (AccessVectorSet). These objects are used in Madison
+in a variety of ways, but they are the fundamental representation of access.
+"""
+
+import refpolicy
+from selinux import audit2why
+
+def is_idparam(id):
+ """Determine if an id is a paramater in the form $N, where N is
+ an integer.
+
+ Returns:
+ True if the id is a paramater
+ False if the id is not a paramater
+ """
+ if len(id) > 1 and id[0] == '$':
+ try:
+ int(id[1:])
+ except ValueError:
+ return False
+ return True
+ else:
+ return False
+
+class AccessVector:
+ """
+ An access vector is the basic unit of access in SELinux.
+
+ Access vectors are the most basic representation of access within
+ SELinux. It represents the access a source type has to a target
+ type in terms of an object class and a set of permissions.
+
+ Access vectors are distinct from AVRules in that they can only
+ store a single source type, target type, and object class. The
+ simplicity of AccessVectors makes them useful for storing access
+ in a form that is easy to search and compare.
+
+ The source, target, and object are stored as string. No checking
+ done to verify that the strings are valid SELinux identifiers.
+ Identifiers in the form $N (where N is an integer) are reserved as
+ interface parameters and are treated as wild cards in many
+ circumstances.
+
+ Properties:
+ .src_type - The source type allowed access. [String or None]
+ .tgt_type - The target type to which access is allowed. [String or None]
+ .obj_class - The object class to which access is allowed. [String or None]
+ .perms - The permissions allowed to the object class. [IdSet]
+ .audit_msgs - The audit messages that generated this access vector [List of strings]
+ """
+ def __init__(self, init_list=None):
+ if init_list:
+ self.from_list(init_list)
+ else:
+ self.src_type = None
+ self.tgt_type = None
+ self.obj_class = None
+ self.perms = refpolicy.IdSet()
+ self.audit_msgs = []
+ self.type = audit2why.TERULE
+ self.data = []
+
+ # The direction of the information flow represented by this
+ # access vector - used for matching
+ self.info_flow_dir = None
+
+ def from_list(self, list):
+ """Initialize an access vector from a list.
+
+ Initialize an access vector from a list treating the list as
+ positional arguments - i.e., 0 = src_type, 1 = tgt_type, etc.
+ All of the list elements 3 and greater are treated as perms.
+ For example, the list ['foo_t', 'bar_t', 'file', 'read', 'write']
+ would create an access vector list with the source type 'foo_t',
+ target type 'bar_t', object class 'file', and permissions 'read'
+ and 'write'.
+
+ This format is useful for very simple storage to strings or disc
+ (see to_list) and for initializing access vectors.
+ """
+ if len(list) < 4:
+ raise ValueError("List must contain at least four elements %s" % str(list))
+ self.src_type = list[0]
+ self.tgt_type = list[1]
+ self.obj_class = list[2]
+ self.perms = refpolicy.IdSet(list[3:])
+
+ def to_list(self):
+ """
+ Convert an access vector to a list.
+
+ Convert an access vector to a list treating the list as positional
+ values. See from_list for more information on how an access vector
+ is represented in a list.
+ """
+ l = [self.src_type, self.tgt_type, self.obj_class]
+ l.extend(self.perms)
+ return l
+
+ def __str__(self):
+ return self.to_string()
+
+ def to_string(self):
+ return "allow %s %s:%s %s;" % (self.src_type, self.tgt_type,
+ self.obj_class, self.perms.to_space_str())
+
+ def __cmp__(self, other):
+ if self.src_type != other.src_type:
+ return cmp(self.src_type, other.src_type)
+ if self.tgt_type != other.tgt_type:
+ return cmp(self.tgt_type, other.tgt_type)
+ if self.obj_class != self.obj_class:
+ return cmp(self.obj_class, other.obj_class)
+ if len(self.perms) != len(other.perms):
+ return cmp(len(self.perms), len(other.perms))
+ x = list(self.perms)
+ x.sort()
+ y = list(other.perms)
+ y.sort()
+ for pa, pb in zip(x, y):
+ if pa != pb:
+ return cmp(pa, pb)
+ return 0
+
+def avrule_to_access_vectors(avrule):
+ """Convert an avrule into a list of access vectors.
+
+ AccessVectors and AVRules are similary, but differ in that
+ an AVRule can more than one source type, target type, and
+ object class. This function expands a single avrule into a
+ list of one or more AccessVectors representing the access
+ defined in the AVRule.
+
+
+ """
+ if isinstance(avrule, AccessVector):
+ return [avrule]
+ a = []
+ for src_type in avrule.src_types:
+ for tgt_type in avrule.tgt_types:
+ for obj_class in avrule.obj_classes:
+ access = AccessVector()
+ access.src_type = src_type
+ access.tgt_type = tgt_type
+ access.obj_class = obj_class
+ access.perms = avrule.perms.copy()
+ a.append(access)
+ return a
+
+class AccessVectorSet:
+ """A non-overlapping set of access vectors.
+
+ An AccessVectorSet is designed to store one or more access vectors
+ that are non-overlapping. Access can be added to the set
+ incrementally and access vectors will be added or merged as
+ necessary. For example, adding the following access vectors using
+ add_av:
+ allow $1 etc_t : read;
+ allow $1 etc_t : write;
+ allow $1 var_log_t : read;
+ Would result in an access vector set with the access vectors:
+ allow $1 etc_t : { read write};
+ allow $1 var_log_t : read;
+ """
+ def __init__(self):
+ """Initialize an access vector set.
+ """
+ self.src = {}
+ # The information flow direction of this access vector
+ # set - see objectmodel.py for more information. This
+ # stored here to speed up searching - see matching.py.
+ self.info_dir = None
+
+ def __iter__(self):
+ """Iterate over all of the unique access vectors in the set."""
+ for tgts in self.src.values():
+ for objs in tgts.values():
+ for av in objs.values():
+ yield av
+
+ def __len__(self):
+ """Return the number of unique access vectors in the set.
+
+ Because of the inernal representation of the access vector set,
+ __len__ is not a constant time operation. Worst case is O(N)
+ where N is the number of unique access vectors, but the common
+ case is probably better.
+ """
+ l = 0
+ for tgts in self.src.values():
+ for objs in tgts.values():
+ l += len(objs)
+ return l
+
+ def to_list(self):
+ """Return the unique access vectors in the set as a list.
+
+ The format of the returned list is a set of nested lists,
+ each access vector represented by a list. This format is
+ designed to be simply serializable to a file.
+
+ For example, consider an access vector set with the following
+ access vectors:
+ allow $1 user_t : file read;
+ allow $1 etc_t : file { read write};
+ to_list would return the following:
+ [[$1, user_t, file, read]
+ [$1, etc_t, file, read, write]]
+
+ See AccessVector.to_list for more information.
+ """
+ l = []
+ for av in self:
+ l.append(av.to_list())
+
+ return l
+
+ def from_list(self, l):
+ """Add access vectors stored in a list.
+
+ See to list for more information on the list format that this
+ method accepts.
+
+ This will add all of the access from the list. Any existing
+ access vectors in the set will be retained.
+ """
+ for av in l:
+ self.add_av(AccessVector(av))
+
+ def add(self, src_type, tgt_type, obj_class, perms, audit_msg=None, avc_type=audit2why.TERULE, data=[]):
+ """Add an access vector to the set.
+ """
+ tgt = self.src.setdefault(src_type, { })
+ cls = tgt.setdefault(tgt_type, { })
+
+ if cls.has_key((obj_class, avc_type)):
+ access = cls[obj_class, avc_type]
+ else:
+ access = AccessVector()
+ access.src_type = src_type
+ access.tgt_type = tgt_type
+ access.obj_class = obj_class
+ access.data = data
+ access.type = avc_type
+ cls[obj_class, avc_type] = access
+
+ access.perms.update(perms)
+ if audit_msg:
+ access.audit_msgs.append(audit_msg)
+
+ def add_av(self, av, audit_msg=None):
+ """Add an access vector to the set."""
+ self.add(av.src_type, av.tgt_type, av.obj_class, av.perms)
+
+
+def avs_extract_types(avs):
+ types = refpolicy.IdSet()
+ for av in avs:
+ types.add(av.src_type)
+ types.add(av.tgt_type)
+
+ return types
+
+def avs_extract_obj_perms(avs):
+ perms = { }
+ for av in avs:
+ if perms.has_key(av.obj_class):
+ s = perms[av.obj_class]
+ else:
+ s = refpolicy.IdSet()
+ perms[av.obj_class] = s
+ s.update(av.perms)
+ return perms
+
+class RoleTypeSet:
+ """A non-overlapping set of role type statements.
+
+ This clas allows the incremental addition of role type statements and
+ maintains a non-overlapping list of statements.
+ """
+ def __init__(self):
+ """Initialize an access vector set."""
+ self.role_types = {}
+
+ def __iter__(self):
+ """Iterate over all of the unique role allows statements in the set."""
+ for role_type in self.role_types.values():
+ yield role_type
+
+ def __len__(self):
+ """Return the unique number of role allow statements."""
+ return len(self.role_types.keys())
+
+ def add(self, role, type):
+ if self.role_types.has_key(role):
+ role_type = self.role_types[role]
+ else:
+ role_type = refpolicy.RoleType()
+ role_type.role = role
+ self.role_types[role] = role_type
+
+ role_type.types.add(type)
diff --git a/lib/python2.7/site-packages/setoolsgui/sepolgen/audit.py b/lib/python2.7/site-packages/setoolsgui/sepolgen/audit.py
new file mode 100644
index 0000000..56919be
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/sepolgen/audit.py
@@ -0,0 +1,549 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+import refpolicy
+import access
+import re
+import sys
+
+# Convenience functions
+
+def get_audit_boot_msgs():
+ """Obtain all of the avc and policy load messages from the audit
+ log. This function uses ausearch and requires that the current
+ process have sufficient rights to run ausearch.
+
+ Returns:
+ string contain all of the audit messages returned by ausearch.
+ """
+ import subprocess
+ import time
+ fd=open("/proc/uptime", "r")
+ off=float(fd.read().split()[0])
+ fd.close
+ s = time.localtime(time.time() - off)
+ bootdate = time.strftime("%x", s)
+ boottime = time.strftime("%X", s)
+ output = subprocess.Popen(["/sbin/ausearch", "-m", "AVC,USER_AVC,MAC_POLICY_LOAD,DAEMON_START,SELINUX_ERR", "-ts", bootdate, boottime],
+ stdout=subprocess.PIPE).communicate()[0]
+ return output
+
+def get_audit_msgs():
+ """Obtain all of the avc and policy load messages from the audit
+ log. This function uses ausearch and requires that the current
+ process have sufficient rights to run ausearch.
+
+ Returns:
+ string contain all of the audit messages returned by ausearch.
+ """
+ import subprocess
+ output = subprocess.Popen(["/sbin/ausearch", "-m", "AVC,USER_AVC,MAC_POLICY_LOAD,DAEMON_START,SELINUX_ERR"],
+ stdout=subprocess.PIPE).communicate()[0]
+ return output
+
+def get_dmesg_msgs():
+ """Obtain all of the avc and policy load messages from /bin/dmesg.
+
+ Returns:
+ string contain all of the audit messages returned by dmesg.
+ """
+ import subprocess
+ output = subprocess.Popen(["/bin/dmesg"],
+ stdout=subprocess.PIPE).communicate()[0]
+ return output
+
+# Classes representing audit messages
+
+class AuditMessage:
+ """Base class for all objects representing audit messages.
+
+ AuditMessage is a base class for all audit messages and only
+ provides storage for the raw message (as a string) and a
+ parsing function that does nothing.
+ """
+ def __init__(self, message):
+ self.message = message
+ self.header = ""
+
+ def from_split_string(self, recs):
+ """Parse a string that has been split into records by space into
+ an audit message.
+
+ This method should be overridden by subclasses. Error reporting
+ should be done by raise ValueError exceptions.
+ """
+ for msg in recs:
+ fields = msg.split("=")
+ if len(fields) != 2:
+ if msg[:6] == "audit(":
+ self.header = msg
+ return
+ else:
+ continue
+
+ if fields[0] == "msg":
+ self.header = fields[1]
+ return
+
+
+class InvalidMessage(AuditMessage):
+ """Class representing invalid audit messages. This is used to differentiate
+ between audit messages that aren't recognized (that should return None from
+ the audit message parser) and a message that is recognized but is malformed
+ in some way.
+ """
+ def __init__(self, message):
+ AuditMessage.__init__(self, message)
+
+class PathMessage(AuditMessage):
+ """Class representing a path message"""
+ def __init__(self, message):
+ AuditMessage.__init__(self, message)
+ self.path = ""
+
+ def from_split_string(self, recs):
+ AuditMessage.from_split_string(self, recs)
+
+ for msg in recs:
+ fields = msg.split("=")
+ if len(fields) != 2:
+ continue
+ if fields[0] == "path":
+ self.path = fields[1][1:-1]
+ return
+import selinux.audit2why as audit2why
+
+avcdict = {}
+
+class AVCMessage(AuditMessage):
+ """AVC message representing an access denial or granted message.
+
+ This is a very basic class and does not represent all possible fields
+ in an avc message. Currently the fields are:
+ scontext - context for the source (process) that generated the message
+ tcontext - context for the target
+ tclass - object class for the target (only one)
+ comm - the process name
+ exe - the on-disc binary
+ path - the path of the target
+ access - list of accesses that were allowed or denied
+ denial - boolean indicating whether this was a denial (True) or granted
+ (False) message.
+
+ An example audit message generated from the audit daemon looks like (line breaks
+ added):
+ 'type=AVC msg=audit(1155568085.407:10877): avc: denied { search } for
+ pid=677 comm="python" name="modules" dev=dm-0 ino=13716388
+ scontext=user_u:system_r:setroubleshootd_t:s0
+ tcontext=system_u:object_r:modules_object_t:s0 tclass=dir'
+
+ An example audit message stored in syslog (not processed by the audit daemon - line
+ breaks added):
+ 'Sep 12 08:26:43 dhcp83-5 kernel: audit(1158064002.046:4): avc: denied { read }
+ for pid=2 496 comm="bluez-pin" name=".gdm1K3IFT" dev=dm-0 ino=3601333
+ scontext=user_u:system_r:bluetooth_helper_t:s0-s0:c0
+ tcontext=system_u:object_r:xdm_tmp_t:s0 tclass=file
+ """
+ def __init__(self, message):
+ AuditMessage.__init__(self, message)
+ self.scontext = refpolicy.SecurityContext()
+ self.tcontext = refpolicy.SecurityContext()
+ self.tclass = ""
+ self.comm = ""
+ self.exe = ""
+ self.path = ""
+ self.name = ""
+ self.accesses = []
+ self.denial = True
+ self.type = audit2why.TERULE
+
+ def __parse_access(self, recs, start):
+ # This is kind of sucky - the access that is in a space separated
+ # list like '{ read write }'. This doesn't fit particularly well with splitting
+ # the string on spaces. This function takes the list of recs and a starting
+ # position one beyond the open brace. It then adds the accesses until it finds
+ # the close brace or the end of the list (which is an error if reached without
+ # seeing a close brace).
+ found_close = False
+ i = start
+ if i == (len(recs) - 1):
+ raise ValueError("AVC message in invalid format [%s]\n" % self.message)
+ while i < len(recs):
+ if recs[i] == "}":
+ found_close = True
+ break
+ self.accesses.append(recs[i])
+ i = i + 1
+ if not found_close:
+ raise ValueError("AVC message in invalid format [%s]\n" % self.message)
+ return i + 1
+
+
+ def from_split_string(self, recs):
+ AuditMessage.from_split_string(self, recs)
+ # FUTURE - fully parse avc messages and store all possible fields
+ # Required fields
+ found_src = False
+ found_tgt = False
+ found_class = False
+ found_access = False
+
+ for i in range(len(recs)):
+ if recs[i] == "{":
+ i = self.__parse_access(recs, i + 1)
+ found_access = True
+ continue
+ elif recs[i] == "granted":
+ self.denial = False
+
+ fields = recs[i].split("=")
+ if len(fields) != 2:
+ continue
+ if fields[0] == "scontext":
+ self.scontext = refpolicy.SecurityContext(fields[1])
+ found_src = True
+ elif fields[0] == "tcontext":
+ self.tcontext = refpolicy.SecurityContext(fields[1])
+ found_tgt = True
+ elif fields[0] == "tclass":
+ self.tclass = fields[1]
+ found_class = True
+ elif fields[0] == "comm":
+ self.comm = fields[1][1:-1]
+ elif fields[0] == "exe":
+ self.exe = fields[1][1:-1]
+ elif fields[0] == "name":
+ self.name = fields[1][1:-1]
+
+ if not found_src or not found_tgt or not found_class or not found_access:
+ raise ValueError("AVC message in invalid format [%s]\n" % self.message)
+ self.analyze()
+
+ def analyze(self):
+ tcontext = self.tcontext.to_string()
+ scontext = self.scontext.to_string()
+ access_tuple = tuple( self.accesses)
+ self.data = []
+
+ if (scontext, tcontext, self.tclass, access_tuple) in avcdict.keys():
+ self.type, self.data = avcdict[(scontext, tcontext, self.tclass, access_tuple)]
+ else:
+ self.type, self.data = audit2why.analyze(scontext, tcontext, self.tclass, self.accesses);
+ if self.type == audit2why.NOPOLICY:
+ self.type = audit2why.TERULE
+ if self.type == audit2why.BADTCON:
+ raise ValueError("Invalid Target Context %s\n" % tcontext)
+ if self.type == audit2why.BADSCON:
+ raise ValueError("Invalid Source Context %s\n" % scontext)
+ if self.type == audit2why.BADSCON:
+ raise ValueError("Invalid Type Class %s\n" % self.tclass)
+ if self.type == audit2why.BADPERM:
+ raise ValueError("Invalid permission %s\n" % " ".join(self.accesses))
+ if self.type == audit2why.BADCOMPUTE:
+ raise ValueError("Error during access vector computation")
+
+ if self.type == audit2why.CONSTRAINT:
+ self.data = [ self.data ]
+ if self.scontext.user != self.tcontext.user:
+ self.data.append(("user (%s)" % self.scontext.user, 'user (%s)' % self.tcontext.user))
+ if self.scontext.role != self.tcontext.role and self.tcontext.role != "object_r":
+ self.data.append(("role (%s)" % self.scontext.role, 'role (%s)' % self.tcontext.role))
+ if self.scontext.level != self.tcontext.level:
+ self.data.append(("level (%s)" % self.scontext.level, 'level (%s)' % self.tcontext.level))
+
+ avcdict[(scontext, tcontext, self.tclass, access_tuple)] = (self.type, self.data)
+
+class PolicyLoadMessage(AuditMessage):
+ """Audit message indicating that the policy was reloaded."""
+ def __init__(self, message):
+ AuditMessage.__init__(self, message)
+
+class DaemonStartMessage(AuditMessage):
+ """Audit message indicating that a daemon was started."""
+ def __init__(self, message):
+ AuditMessage.__init__(self, message)
+ self.auditd = False
+
+ def from_split_string(self, recs):
+ AuditMessage.from_split_string(self, recs)
+ if "auditd" in recs:
+ self.auditd = True
+
+
+class ComputeSidMessage(AuditMessage):
+ """Audit message indicating that a sid was not valid.
+
+ Compute sid messages are generated on attempting to create a security
+ context that is not valid. Security contexts are invalid if the role is
+ not authorized for the user or the type is not authorized for the role.
+
+ This class does not store all of the fields from the compute sid message -
+ just the type and role.
+ """
+ def __init__(self, message):
+ AuditMessage.__init__(self, message)
+ self.invalid_context = refpolicy.SecurityContext()
+ self.scontext = refpolicy.SecurityContext()
+ self.tcontext = refpolicy.SecurityContext()
+ self.tclass = ""
+
+ def from_split_string(self, recs):
+ AuditMessage.from_split_string(self, recs)
+ if len(recs) < 10:
+ raise ValueError("Split string does not represent a valid compute sid message")
+
+ try:
+ self.invalid_context = refpolicy.SecurityContext(recs[5])
+ self.scontext = refpolicy.SecurityContext(recs[7].split("=")[1])
+ self.tcontext = refpolicy.SecurityContext(recs[8].split("=")[1])
+ self.tclass = recs[9].split("=")[1]
+ except:
+ raise ValueError("Split string does not represent a valid compute sid message")
+ def output(self):
+ return "role %s types %s;\n" % (self.role, self.type)
+
+# Parser for audit messages
+
+class AuditParser:
+ """Parser for audit messages.
+
+ This class parses audit messages and stores them according to their message
+ type. This is not a general purpose audit message parser - it only extracts
+ selinux related messages.
+
+ Each audit messages are stored in one of four lists:
+ avc_msgs - avc denial or granted messages. Messages are stored in
+ AVCMessage objects.
+ comput_sid_messages - invalid sid messages. Messages are stored in
+ ComputSidMessage objects.
+ invalid_msgs - selinux related messages that are not valid. Messages
+ are stored in InvalidMessageObjects.
+ policy_load_messages - policy load messages. Messages are stored in
+ PolicyLoadMessage objects.
+
+ These lists will be reset when a policy load message is seen if
+ AuditParser.last_load_only is set to true. It is assumed that messages
+ are fed to the parser in chronological order - time stamps are not
+ parsed.
+ """
+ def __init__(self, last_load_only=False):
+ self.__initialize()
+ self.last_load_only = last_load_only
+
+ def __initialize(self):
+ self.avc_msgs = []
+ self.compute_sid_msgs = []
+ self.invalid_msgs = []
+ self.policy_load_msgs = []
+ self.path_msgs = []
+ self.by_header = { }
+ self.check_input_file = False
+
+ # Low-level parsing function - tries to determine if this audit
+ # message is an SELinux related message and then parses it into
+ # the appropriate AuditMessage subclass. This function deliberately
+ # does not impose policy (e.g., on policy load message) or store
+ # messages to make as simple and reusable as possible.
+ #
+ # Return values:
+ # None - no recognized audit message found in this line
+ #
+ # InvalidMessage - a recognized but invalid message was found.
+ #
+ # AuditMessage (or subclass) - object representing a parsed
+ # and valid audit message.
+ def __parse_line(self, line):
+ rec = line.split()
+ for i in rec:
+ found = False
+ if i == "avc:" or i == "message=avc:" or i == "msg='avc:":
+ msg = AVCMessage(line)
+ found = True
+ elif i == "security_compute_sid:":
+ msg = ComputeSidMessage(line)
+ found = True
+ elif i == "type=MAC_POLICY_LOAD" or i == "type=1403":
+ msg = PolicyLoadMessage(line)
+ found = True
+ elif i == "type=AVC_PATH":
+ msg = PathMessage(line)
+ found = True
+ elif i == "type=DAEMON_START":
+ msg = DaemonStartMessage(list)
+ found = True
+
+ if found:
+ self.check_input_file = True
+ try:
+ msg.from_split_string(rec)
+ except ValueError:
+ msg = InvalidMessage(line)
+ return msg
+ return None
+
+ # Higher-level parse function - take a line, parse it into an
+ # AuditMessage object, and store it in the appropriate list.
+ # This function will optionally reset all of the lists when
+ # it sees a load policy message depending on the value of
+ # self.last_load_only.
+ def __parse(self, line):
+ msg = self.__parse_line(line)
+ if msg is None:
+ return
+
+ # Append to the correct list
+ if isinstance(msg, PolicyLoadMessage):
+ if self.last_load_only:
+ self.__initialize()
+ elif isinstance(msg, DaemonStartMessage):
+ # We initialize every time the auditd is started. This
+ # is less than ideal, but unfortunately it is the only
+ # way to catch reboots since the initial policy load
+ # by init is not stored in the audit log.
+ if msg.auditd and self.last_load_only:
+ self.__initialize()
+ self.policy_load_msgs.append(msg)
+ elif isinstance(msg, AVCMessage):
+ self.avc_msgs.append(msg)
+ elif isinstance(msg, ComputeSidMessage):
+ self.compute_sid_msgs.append(msg)
+ elif isinstance(msg, InvalidMessage):
+ self.invalid_msgs.append(msg)
+ elif isinstance(msg, PathMessage):
+ self.path_msgs.append(msg)
+
+ # Group by audit header
+ if msg.header != "":
+ if self.by_header.has_key(msg.header):
+ self.by_header[msg.header].append(msg)
+ else:
+ self.by_header[msg.header] = [msg]
+
+
+ # Post processing will add additional information from AVC messages
+ # from related messages - only works on messages generated by
+ # the audit system.
+ def __post_process(self):
+ for value in self.by_header.values():
+ avc = []
+ path = None
+ for msg in value:
+ if isinstance(msg, PathMessage):
+ path = msg
+ elif isinstance(msg, AVCMessage):
+ avc.append(msg)
+ if len(avc) > 0 and path:
+ for a in avc:
+ a.path = path.path
+
+ def parse_file(self, input):
+ """Parse the contents of a file object. This method can be called
+ multiple times (along with parse_string)."""
+ line = input.readline()
+ while line:
+ self.__parse(line)
+ line = input.readline()
+ if not self.check_input_file:
+ sys.stderr.write("Nothing to do\n")
+ sys.exit(0)
+ self.__post_process()
+
+ def parse_string(self, input):
+ """Parse a string containing audit messages - messages should
+ be separated by new lines. This method can be called multiple
+ times (along with parse_file)."""
+ lines = input.split('\n')
+ for l in lines:
+ self.__parse(l)
+ self.__post_process()
+
+ def to_role(self, role_filter=None):
+ """Return RoleAllowSet statements matching the specified filter
+
+ Filter out types that match the filer, or all roles
+
+ Params:
+ role_filter - [optional] Filter object used to filter the
+ output.
+ Returns:
+ Access vector set representing the denied access in the
+ audit logs parsed by this object.
+ """
+ role_types = access.RoleTypeSet()
+ for cs in self.compute_sid_msgs:
+ if not role_filter or role_filter.filter(cs):
+ role_types.add(cs.invalid_context.role, cs.invalid_context.type)
+
+ return role_types
+
+ def to_access(self, avc_filter=None, only_denials=True):
+ """Convert the audit logs access into a an access vector set.
+
+ Convert the audit logs into an access vector set, optionally
+ filtering the restults with the passed in filter object.
+
+ Filter objects are object instances with a .filter method
+ that takes and access vector and returns True if the message
+ should be included in the final output and False otherwise.
+
+ Params:
+ avc_filter - [optional] Filter object used to filter the
+ output.
+ Returns:
+ Access vector set representing the denied access in the
+ audit logs parsed by this object.
+ """
+ av_set = access.AccessVectorSet()
+ for avc in self.avc_msgs:
+ if avc.denial != True and only_denials:
+ continue
+ if avc_filter:
+ if avc_filter.filter(avc):
+ av_set.add(avc.scontext.type, avc.tcontext.type, avc.tclass,
+ avc.accesses, avc, avc_type=avc.type, data=avc.data)
+ else:
+ av_set.add(avc.scontext.type, avc.tcontext.type, avc.tclass,
+ avc.accesses, avc, avc_type=avc.type, data=avc.data)
+ return av_set
+
+class AVCTypeFilter:
+ def __init__(self, regex):
+ self.regex = re.compile(regex)
+
+ def filter(self, avc):
+ if self.regex.match(avc.scontext.type):
+ return True
+ if self.regex.match(avc.tcontext.type):
+ return True
+ return False
+
+class ComputeSidTypeFilter:
+ def __init__(self, regex):
+ self.regex = re.compile(regex)
+
+ def filter(self, avc):
+ if self.regex.match(avc.invalid_context.type):
+ return True
+ if self.regex.match(avc.scontext.type):
+ return True
+ if self.regex.match(avc.tcontext.type):
+ return True
+ return False
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/sepolgen/classperms.py b/lib/python2.7/site-packages/setoolsgui/sepolgen/classperms.py
new file mode 100644
index 0000000..c925dee
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/sepolgen/classperms.py
@@ -0,0 +1,116 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+import sys
+
+tokens = ('DEFINE',
+ 'NAME',
+ 'TICK',
+ 'SQUOTE',
+ 'OBRACE',
+ 'CBRACE',
+ 'SEMI',
+ 'OPAREN',
+ 'CPAREN',
+ 'COMMA')
+
+reserved = {
+ 'define' : 'DEFINE' }
+
+t_TICK = r'\`'
+t_SQUOTE = r'\''
+t_OBRACE = r'\{'
+t_CBRACE = r'\}'
+t_SEMI = r'\;'
+t_OPAREN = r'\('
+t_CPAREN = r'\)'
+t_COMMA = r'\,'
+
+t_ignore = " \t\n"
+
+def t_NAME(t):
+ r'[a-zA-Z_][a-zA-Z0-9_]*'
+ t.type = reserved.get(t.value,'NAME')
+ return t
+
+def t_error(t):
+ print "Illegal character '%s'" % t.value[0]
+ t.skip(1)
+
+import lex
+lex.lex()
+
+def p_statements(p):
+ '''statements : define_stmt
+ | define_stmt statements
+ '''
+ if len(p) == 2:
+ p[0] = [p[1]]
+ else:
+ p[0] = [p[1]] + [p[2]]
+
+def p_define_stmt(p):
+ # This sucks - corresponds to 'define(`foo',`{ read write }')
+ '''define_stmt : DEFINE OPAREN TICK NAME SQUOTE COMMA TICK list SQUOTE CPAREN
+ '''
+
+ p[0] = [p[4], p[8]]
+
+def p_list(p):
+ '''list : NAME
+ | OBRACE names CBRACE
+ '''
+ if p[1] == "{":
+ p[0] = p[2]
+ else:
+ p[0] = [p[1]]
+
+def p_names(p):
+ '''names : NAME
+ | NAME names
+ '''
+ if len(p) == 2:
+ p[0] = [p[1]]
+ else:
+ p[0] = [p[1]] + p[2]
+
+def p_error(p):
+ print "Syntax error on line %d %s [type=%s]" % (p.lineno, p.value, p.type)
+
+import yacc
+yacc.yacc()
+
+
+f = open("all_perms.spt")
+txt = f.read()
+f.close()
+
+#lex.input(txt)
+#while 1:
+# tok = lex.token()
+# if not tok:
+# break
+# print tok
+
+test = "define(`foo',`{ read write append }')"
+test2 = """define(`all_filesystem_perms',`{ mount remount unmount getattr relabelfrom relabelto transition associate quotamod quotaget }')
+define(`all_security_perms',`{ compute_av compute_create compute_member check_context load_policy compute_relabel compute_user setenforce setbool setsecparam setcheckreqprot }')
+"""
+result = yacc.parse(txt)
+print result
+
diff --git a/lib/python2.7/site-packages/setoolsgui/sepolgen/defaults.py b/lib/python2.7/site-packages/setoolsgui/sepolgen/defaults.py
new file mode 100644
index 0000000..218bc7c
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/sepolgen/defaults.py
@@ -0,0 +1,77 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+import os
+import re
+
+# Select the correct location for the development files based on a
+# path variable (optionally read from a configuration file)
+class PathChoooser(object):
+ def __init__(self, pathname):
+ self.config = dict()
+ if not os.path.exists(pathname):
+ self.config_pathname = "(defaults)"
+ self.config["SELINUX_DEVEL_PATH"] = "/usr/share/selinux/default:/usr/share/selinux/mls:/usr/share/selinux/devel"
+ return
+ self.config_pathname = pathname
+ ignore = re.compile(r"^\s*(?:#.+)?$")
+ consider = re.compile(r"^\s*(\w+)\s*=\s*(.+?)\s*$")
+ for lineno, line in enumerate(open(pathname)):
+ if ignore.match(line): continue
+ mo = consider.match(line)
+ if not mo:
+ raise ValueError, "%s:%d: line is not in key = value format" % (pathname, lineno+1)
+ self.config[mo.group(1)] = mo.group(2)
+
+ # We're only exporting one useful function, so why not be a function
+ def __call__(self, testfilename, pathset="SELINUX_DEVEL_PATH"):
+ paths = self.config.get(pathset, None)
+ if paths is None:
+ raise ValueError, "%s was not in %s" % (pathset, self.config_pathname)
+ paths = paths.split(":")
+ for p in paths:
+ target = os.path.join(p, testfilename)
+ if os.path.exists(target): return target
+ return os.path.join(paths[0], testfilename)
+
+
+"""
+Various default settings, including file and directory locations.
+"""
+
+def data_dir():
+ return "/var/lib/sepolgen"
+
+def perm_map():
+ return data_dir() + "/perm_map"
+
+def interface_info():
+ return data_dir() + "/interface_info"
+
+def attribute_info():
+ return data_dir() + "/attribute_info"
+
+def refpolicy_makefile():
+ chooser = PathChoooser("/etc/selinux/sepolgen.conf")
+ return chooser("Makefile")
+
+def headers():
+ chooser = PathChoooser("/etc/selinux/sepolgen.conf")
+ return chooser("include")
+
diff --git a/lib/python2.7/site-packages/setoolsgui/sepolgen/interfaces.py b/lib/python2.7/site-packages/setoolsgui/sepolgen/interfaces.py
new file mode 100644
index 0000000..88a6dc3
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/sepolgen/interfaces.py
@@ -0,0 +1,509 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+"""
+Classes for representing and manipulating interfaces.
+"""
+
+import access
+import refpolicy
+import itertools
+import objectmodel
+import matching
+
+from sepolgeni18n import _
+
+import copy
+
+class Param:
+ """
+ Object representing a paramater for an interface.
+ """
+ def __init__(self):
+ self.__name = ""
+ self.type = refpolicy.SRC_TYPE
+ self.obj_classes = refpolicy.IdSet()
+ self.required = True
+
+ def set_name(self, name):
+ if not access.is_idparam(name):
+ raise ValueError("Name [%s] is not a param" % name)
+ self.__name = name
+
+ def get_name(self):
+ return self.__name
+
+ name = property(get_name, set_name)
+
+ num = property(fget=lambda self: int(self.name[1:]))
+
+ def __repr__(self):
+ return "<sepolgen.policygen.Param instance [%s, %s, %s]>" % \
+ (self.name, refpolicy.field_to_str[self.type], " ".join(self.obj_classes))
+
+
+# Helper for extract perms
+def __param_insert(name, type, av, params):
+ ret = 0
+ if name in params:
+ p = params[name]
+ # The entries are identical - we're done
+ if type == p.type:
+ return
+ # Hanldle implicitly typed objects (like process)
+ if (type == refpolicy.SRC_TYPE or type == refpolicy.TGT_TYPE) and \
+ (p.type == refpolicy.TGT_TYPE or p.type == refpolicy.SRC_TYPE):
+ #print name, refpolicy.field_to_str[p.type]
+ # If the object is not implicitly typed, tell the
+ # caller there is a likely conflict.
+ ret = 1
+ if av:
+ avobjs = [av.obj_class]
+ else:
+ avobjs = []
+ for obj in itertools.chain(p.obj_classes, avobjs):
+ if obj in objectmodel.implicitly_typed_objects:
+ ret = 0
+ break
+ # "Promote" to a SRC_TYPE as this is the likely usage.
+ # We do this even if the above test fails on purpose
+ # as there is really no sane way to resolve the conflict
+ # here. The caller can take other actions if needed.
+ p.type = refpolicy.SRC_TYPE
+ else:
+ # There is some conflict - no way to resolve it really
+ # so we just leave the first entry and tell the caller
+ # there was a conflict.
+ ret = 1
+ else:
+ p = Param()
+ p.name = name
+ p.type = type
+ params[p.name] = p
+
+ if av:
+ p.obj_classes.add(av.obj_class)
+ return ret
+
+
+
+def av_extract_params(av, params):
+ """Extract the paramaters from an access vector.
+
+ Extract the paramaters (in the form $N) from an access
+ vector, storing them as Param objects in a dictionary.
+ Some attempt is made at resolving conflicts with other
+ entries in the dict, but if an unresolvable conflict is
+ found it is reported to the caller.
+
+ The goal here is to figure out how interface paramaters are
+ actually used in the interface - e.g., that $1 is a domain used as
+ a SRC_TYPE. In general an interface will look like this:
+
+ interface(`foo', `
+ allow $1 foo : file read;
+ ')
+
+ This is simple to figure out - $1 is a SRC_TYPE. A few interfaces
+ are more complex, for example:
+
+ interface(`foo_trans',`
+ domain_auto_trans($1,fingerd_exec_t,fingerd_t)
+
+ allow $1 fingerd_t:fd use;
+ allow fingerd_t $1:fd use;
+ allow fingerd_t $1:fifo_file rw_file_perms;
+ allow fingerd_t $1:process sigchld;
+ ')
+
+ Here the usage seems ambigious, but it is not. $1 is still domain
+ and therefore should be returned as a SRC_TYPE.
+
+ Returns:
+ 0 - success
+ 1 - conflict found
+ """
+ ret = 0
+ found_src = False
+ if access.is_idparam(av.src_type):
+ if __param_insert(av.src_type, refpolicy.SRC_TYPE, av, params) == 1:
+ ret = 1
+
+ if access.is_idparam(av.tgt_type):
+ if __param_insert(av.tgt_type, refpolicy.TGT_TYPE, av, params) == 1:
+ ret = 1
+
+ if access.is_idparam(av.obj_class):
+ if __param_insert(av.obj_class, refpolicy.OBJ_CLASS, av, params) == 1:
+ ret = 1
+
+ for perm in av.perms:
+ if access.is_idparam(perm):
+ if __param_insert(perm, PERM) == 1:
+ ret = 1
+
+ return ret
+
+def role_extract_params(role, params):
+ if access.is_idparam(role.role):
+ return __param_insert(role.role, refpolicy.ROLE, None, params)
+
+def type_rule_extract_params(rule, params):
+ def extract_from_set(set, type):
+ ret = 0
+ for x in set:
+ if access.is_idparam(x):
+ if __param_insert(x, type, None, params):
+ ret = 1
+ return ret
+
+ ret = 0
+ if extract_from_set(rule.src_types, refpolicy.SRC_TYPE):
+ ret = 1
+
+ if extract_from_set(rule.tgt_types, refpolicy.TGT_TYPE):
+ ret = 1
+
+ if extract_from_set(rule.obj_classes, refpolicy.OBJ_CLASS):
+ ret = 1
+
+ if access.is_idparam(rule.dest_type):
+ if __param_insert(rule.dest_type, refpolicy.DEST_TYPE, None, params):
+ ret = 1
+
+ return ret
+
+def ifcall_extract_params(ifcall, params):
+ ret = 0
+ for arg in ifcall.args:
+ if access.is_idparam(arg):
+ # Assume interface arguments are source types. Fairly safe
+ # assumption for most interfaces
+ if __param_insert(arg, refpolicy.SRC_TYPE, None, params):
+ ret = 1
+
+ return ret
+
+class AttributeVector:
+ def __init__(self):
+ self.name = ""
+ self.access = access.AccessVectorSet()
+
+ def add_av(self, av):
+ self.access.add_av(av)
+
+class AttributeSet:
+ def __init__(self):
+ self.attributes = { }
+
+ def add_attr(self, attr):
+ self.attributes[attr.name] = attr
+
+ def from_file(self, fd):
+ def parse_attr(line):
+ fields = line[1:-1].split()
+ if len(fields) != 2 or fields[0] != "Attribute":
+ raise SyntaxError("Syntax error Attribute statement %s" % line)
+ a = AttributeVector()
+ a.name = fields[1]
+
+ return a
+
+ a = None
+ for line in fd:
+ line = line[:-1]
+ if line[0] == "[":
+ if a:
+ self.add_attr(a)
+ a = parse_attr(line)
+ elif a:
+ l = line.split(",")
+ av = access.AccessVector(l)
+ a.add_av(av)
+ if a:
+ self.add_attr(a)
+
+class InterfaceVector:
+ def __init__(self, interface=None, attributes={}):
+ # Enabled is a loose concept currently - we are essentially
+ # not enabling interfaces that we can't handle currently.
+ # See InterfaceVector.add_ifv for more information.
+ self.enabled = True
+ self.name = ""
+ # The access that is enabled by this interface - eventually
+ # this will include indirect access from typeattribute
+ # statements.
+ self.access = access.AccessVectorSet()
+ # Paramaters are stored in a dictionary (key: param name
+ # value: Param object).
+ self.params = { }
+ if interface:
+ self.from_interface(interface, attributes)
+ self.expanded = False
+
+ def from_interface(self, interface, attributes={}):
+ self.name = interface.name
+
+ # Add allow rules
+ for avrule in interface.avrules():
+ if avrule.rule_type != refpolicy.AVRule.ALLOW:
+ continue
+ # Handle some policy bugs
+ if "dontaudit" in interface.name:
+ #print "allow rule in interface: %s" % interface
+ continue
+ avs = access.avrule_to_access_vectors(avrule)
+ for av in avs:
+ self.add_av(av)
+
+ # Add typeattribute access
+ if attributes:
+ for typeattribute in interface.typeattributes():
+ for attr in typeattribute.attributes:
+ if not attributes.attributes.has_key(attr):
+ # print "missing attribute " + attr
+ continue
+ attr_vec = attributes.attributes[attr]
+ for a in attr_vec.access:
+ av = copy.copy(a)
+ if av.src_type == attr_vec.name:
+ av.src_type = typeattribute.type
+ if av.tgt_type == attr_vec.name:
+ av.tgt_type = typeattribute.type
+ self.add_av(av)
+
+
+ # Extract paramaters from roles
+ for role in interface.roles():
+ if role_extract_params(role, self.params):
+ pass
+ #print "found conflicting role param %s for interface %s" % \
+ # (role.name, interface.name)
+ # Extract paramaters from type rules
+ for rule in interface.typerules():
+ if type_rule_extract_params(rule, self.params):
+ pass
+ #print "found conflicting params in rule %s in interface %s" % \
+ # (str(rule), interface.name)
+
+ for ifcall in interface.interface_calls():
+ if ifcall_extract_params(ifcall, self.params):
+ pass
+ #print "found conflicting params in ifcall %s in interface %s" % \
+ # (str(ifcall), interface.name)
+
+
+ def add_av(self, av):
+ if av_extract_params(av, self.params) == 1:
+ pass
+ #print "found conflicting perms [%s]" % str(av)
+ self.access.add_av(av)
+
+ def to_string(self):
+ s = []
+ s.append("[InterfaceVector %s]" % self.name)
+ for av in self.access:
+ s.append(str(av))
+ return "\n".join(s)
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ return "<InterfaceVector %s:%s>" % (self.name, self.enabled)
+
+
+class InterfaceSet:
+ def __init__(self, output=None):
+ self.interfaces = { }
+ self.tgt_type_map = { }
+ self.tgt_type_all = []
+ self.output = output
+
+ def o(self, str):
+ if self.output:
+ self.output.write(str + "\n")
+
+ def to_file(self, fd):
+ for iv in self.interfaces.values():
+ fd.write("[InterfaceVector %s " % iv.name)
+ for param in iv.params.values():
+ fd.write("%s:%s " % (param.name, refpolicy.field_to_str[param.type]))
+ fd.write("]\n")
+ avl = iv.access.to_list()
+ for av in avl:
+ fd.write(",".join(av))
+ fd.write("\n")
+
+ def from_file(self, fd):
+ def parse_ifv(line):
+ fields = line[1:-1].split()
+ if len(fields) < 2 or fields[0] != "InterfaceVector":
+ raise SyntaxError("Syntax error InterfaceVector statement %s" % line)
+ ifv = InterfaceVector()
+ ifv.name = fields[1]
+ if len(fields) == 2:
+ return
+ for field in fields[2:]:
+ p = field.split(":")
+ if len(p) != 2:
+ raise SyntaxError("Invalid param in InterfaceVector statement %s" % line)
+ param = Param()
+ param.name = p[0]
+ param.type = refpolicy.str_to_field[p[1]]
+ ifv.params[param.name] = param
+ return ifv
+
+ ifv = None
+ for line in fd:
+ line = line[:-1]
+ if line[0] == "[":
+ if ifv:
+ self.add_ifv(ifv)
+ ifv = parse_ifv(line)
+ elif ifv:
+ l = line.split(",")
+ av = access.AccessVector(l)
+ ifv.add_av(av)
+ if ifv:
+ self.add_ifv(ifv)
+
+ self.index()
+
+ def add_ifv(self, ifv):
+ self.interfaces[ifv.name] = ifv
+
+ def index(self):
+ for ifv in self.interfaces.values():
+ tgt_types = set()
+ for av in ifv.access:
+ if access.is_idparam(av.tgt_type):
+ self.tgt_type_all.append(ifv)
+ tgt_types = set()
+ break
+ tgt_types.add(av.tgt_type)
+
+ for type in tgt_types:
+ l = self.tgt_type_map.setdefault(type, [])
+ l.append(ifv)
+
+ def add(self, interface, attributes={}):
+ ifv = InterfaceVector(interface, attributes)
+ self.add_ifv(ifv)
+
+ def add_headers(self, headers, output=None, attributes={}):
+ for i in itertools.chain(headers.interfaces(), headers.templates()):
+ self.add(i, attributes)
+
+ self.expand_ifcalls(headers)
+ self.index()
+
+ def map_param(self, id, ifcall):
+ if access.is_idparam(id):
+ num = int(id[1:])
+ if num > len(ifcall.args):
+ # Tell caller to drop this because it must have
+ # been generated from an optional param.
+ return None
+ else:
+ arg = ifcall.args[num - 1]
+ if isinstance(arg, list):
+ return arg
+ else:
+ return [arg]
+ else:
+ return [id]
+
+ def map_add_av(self, ifv, av, ifcall):
+ src_types = self.map_param(av.src_type, ifcall)
+ if src_types is None:
+ return
+
+ tgt_types = self.map_param(av.tgt_type, ifcall)
+ if tgt_types is None:
+ return
+
+ obj_classes = self.map_param(av.obj_class, ifcall)
+ if obj_classes is None:
+ return
+
+ new_perms = refpolicy.IdSet()
+ for perm in av.perms:
+ p = self.map_param(perm, ifcall)
+ if p is None:
+ continue
+ else:
+ new_perms.update(p)
+ if len(new_perms) == 0:
+ return
+
+ for src_type in src_types:
+ for tgt_type in tgt_types:
+ for obj_class in obj_classes:
+ ifv.access.add(src_type, tgt_type, obj_class, new_perms)
+
+ def do_expand_ifcalls(self, interface, if_by_name):
+ # Descend an interface call tree adding the access
+ # from each interface. This is a depth first walk
+ # of the tree.
+
+ stack = [(interface, None)]
+ ifv = self.interfaces[interface.name]
+ ifv.expanded = True
+
+ while len(stack) > 0:
+ cur, cur_ifcall = stack.pop(-1)
+
+ cur_ifv = self.interfaces[cur.name]
+ if cur != interface:
+
+ for av in cur_ifv.access:
+ self.map_add_av(ifv, av, cur_ifcall)
+
+ # If we have already fully expanded this interface
+ # there is no reason to descend further.
+ if cur_ifv.expanded:
+ continue
+
+ for ifcall in cur.interface_calls():
+ if ifcall.ifname == interface.name:
+ self.o(_("Found circular interface class"))
+ return
+ try:
+ newif = if_by_name[ifcall.ifname]
+ except KeyError:
+ self.o(_("Missing interface definition for %s" % ifcall.ifname))
+ continue
+
+ stack.append((newif, ifcall))
+
+
+ def expand_ifcalls(self, headers):
+ # Create a map of interface names to interfaces -
+ # this mirrors the interface vector map we already
+ # have.
+ if_by_name = { }
+
+ for i in itertools.chain(headers.interfaces(), headers.templates()):
+ if_by_name[i.name] = i
+
+
+ for interface in itertools.chain(headers.interfaces(), headers.templates()):
+ self.do_expand_ifcalls(interface, if_by_name)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/sepolgen/lex.py b/lib/python2.7/site-packages/setoolsgui/sepolgen/lex.py
new file mode 100644
index 0000000..c149366
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/sepolgen/lex.py
@@ -0,0 +1,866 @@
+#-----------------------------------------------------------------------------
+# ply: lex.py
+#
+# Author: David M. Beazley (dave@dabeaz.com)
+#
+# Copyright (C) 2001-2006, David M. Beazley
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# See the file COPYING for a complete copy of the LGPL.
+#-----------------------------------------------------------------------------
+
+__version__ = "2.2"
+
+import re, sys, types
+
+# Regular expression used to match valid token names
+_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
+
+# Available instance types. This is used when lexers are defined by a class.
+# It's a little funky because I want to preserve backwards compatibility
+# with Python 2.0 where types.ObjectType is undefined.
+
+try:
+ _INSTANCETYPE = (types.InstanceType, types.ObjectType)
+except AttributeError:
+ _INSTANCETYPE = types.InstanceType
+ class object: pass # Note: needed if no new-style classes present
+
+# Exception thrown when invalid token encountered and no default error
+# handler is defined.
+class LexError(Exception):
+ def __init__(self,message,s):
+ self.args = (message,)
+ self.text = s
+
+# Token class
+class LexToken(object):
+ def __str__(self):
+ return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
+ def __repr__(self):
+ return str(self)
+ def skip(self,n):
+ self.lexer.skip(n)
+
+# -----------------------------------------------------------------------------
+# Lexer class
+#
+# This class encapsulates all of the methods and data associated with a lexer.
+#
+# input() - Store a new string in the lexer
+# token() - Get the next token
+# -----------------------------------------------------------------------------
+
+class Lexer:
+ def __init__(self):
+ self.lexre = None # Master regular expression. This is a list of
+ # tuples (re,findex) where re is a compiled
+ # regular expression and findex is a list
+ # mapping regex group numbers to rules
+ self.lexretext = None # Current regular expression strings
+ self.lexstatere = {} # Dictionary mapping lexer states to master regexs
+ self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
+ self.lexstate = "INITIAL" # Current lexer state
+ self.lexstatestack = [] # Stack of lexer states
+ self.lexstateinfo = None # State information
+ self.lexstateignore = {} # Dictionary of ignored characters for each state
+ self.lexstateerrorf = {} # Dictionary of error functions for each state
+ self.lexreflags = 0 # Optional re compile flags
+ self.lexdata = None # Actual input data (as a string)
+ self.lexpos = 0 # Current position in input text
+ self.lexlen = 0 # Length of the input text
+ self.lexerrorf = None # Error rule (if any)
+ self.lextokens = None # List of valid tokens
+ self.lexignore = "" # Ignored characters
+ self.lexliterals = "" # Literal characters that can be passed through
+ self.lexmodule = None # Module
+ self.lineno = 1 # Current line number
+ self.lexdebug = 0 # Debugging mode
+ self.lexoptimize = 0 # Optimized mode
+
+ def clone(self,object=None):
+ c = Lexer()
+ c.lexstatere = self.lexstatere
+ c.lexstateinfo = self.lexstateinfo
+ c.lexstateretext = self.lexstateretext
+ c.lexstate = self.lexstate
+ c.lexstatestack = self.lexstatestack
+ c.lexstateignore = self.lexstateignore
+ c.lexstateerrorf = self.lexstateerrorf
+ c.lexreflags = self.lexreflags
+ c.lexdata = self.lexdata
+ c.lexpos = self.lexpos
+ c.lexlen = self.lexlen
+ c.lextokens = self.lextokens
+ c.lexdebug = self.lexdebug
+ c.lineno = self.lineno
+ c.lexoptimize = self.lexoptimize
+ c.lexliterals = self.lexliterals
+ c.lexmodule = self.lexmodule
+
+ # If the object parameter has been supplied, it means we are attaching the
+ # lexer to a new object. In this case, we have to rebind all methods in
+ # the lexstatere and lexstateerrorf tables.
+
+ if object:
+ newtab = { }
+ for key, ritem in self.lexstatere.items():
+ newre = []
+ for cre, findex in ritem:
+ newfindex = []
+ for f in findex:
+ if not f or not f[0]:
+ newfindex.append(f)
+ continue
+ newfindex.append((getattr(object,f[0].__name__),f[1]))
+ newre.append((cre,newfindex))
+ newtab[key] = newre
+ c.lexstatere = newtab
+ c.lexstateerrorf = { }
+ for key, ef in self.lexstateerrorf.items():
+ c.lexstateerrorf[key] = getattr(object,ef.__name__)
+ c.lexmodule = object
+
+ # Set up other attributes
+ c.begin(c.lexstate)
+ return c
+
+ # ------------------------------------------------------------
+ # writetab() - Write lexer information to a table file
+ # ------------------------------------------------------------
+ def writetab(self,tabfile):
+ tf = open(tabfile+".py","w")
+ tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
+ tf.write("_lextokens = %s\n" % repr(self.lextokens))
+ tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
+ tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
+ tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
+
+ tabre = { }
+ for key, lre in self.lexstatere.items():
+ titem = []
+ for i in range(len(lre)):
+ titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1])))
+ tabre[key] = titem
+
+ tf.write("_lexstatere = %s\n" % repr(tabre))
+ tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
+
+ taberr = { }
+ for key, ef in self.lexstateerrorf.items():
+ if ef:
+ taberr[key] = ef.__name__
+ else:
+ taberr[key] = None
+ tf.write("_lexstateerrorf = %s\n" % repr(taberr))
+ tf.close()
+
+ # ------------------------------------------------------------
+ # readtab() - Read lexer information from a tab file
+ # ------------------------------------------------------------
+ def readtab(self,tabfile,fdict):
+ exec "import %s as lextab" % tabfile
+ self.lextokens = lextab._lextokens
+ self.lexreflags = lextab._lexreflags
+ self.lexliterals = lextab._lexliterals
+ self.lexstateinfo = lextab._lexstateinfo
+ self.lexstateignore = lextab._lexstateignore
+ self.lexstatere = { }
+ self.lexstateretext = { }
+ for key,lre in lextab._lexstatere.items():
+ titem = []
+ txtitem = []
+ for i in range(len(lre)):
+ titem.append((re.compile(lre[i][0],lextab._lexreflags),_names_to_funcs(lre[i][1],fdict)))
+ txtitem.append(lre[i][0])
+ self.lexstatere[key] = titem
+ self.lexstateretext[key] = txtitem
+ self.lexstateerrorf = { }
+ for key,ef in lextab._lexstateerrorf.items():
+ self.lexstateerrorf[key] = fdict[ef]
+ self.begin('INITIAL')
+
+ # ------------------------------------------------------------
+ # input() - Push a new string into the lexer
+ # ------------------------------------------------------------
+ def input(self,s):
+ if not (isinstance(s,types.StringType) or isinstance(s,types.UnicodeType)):
+ raise ValueError, "Expected a string"
+ self.lexdata = s
+ self.lexpos = 0
+ self.lexlen = len(s)
+
+ # ------------------------------------------------------------
+ # begin() - Changes the lexing state
+ # ------------------------------------------------------------
+ def begin(self,state):
+ if not self.lexstatere.has_key(state):
+ raise ValueError, "Undefined state"
+ self.lexre = self.lexstatere[state]
+ self.lexretext = self.lexstateretext[state]
+ self.lexignore = self.lexstateignore.get(state,"")
+ self.lexerrorf = self.lexstateerrorf.get(state,None)
+ self.lexstate = state
+
+ # ------------------------------------------------------------
+ # push_state() - Changes the lexing state and saves old on stack
+ # ------------------------------------------------------------
+ def push_state(self,state):
+ self.lexstatestack.append(self.lexstate)
+ self.begin(state)
+
+ # ------------------------------------------------------------
+ # pop_state() - Restores the previous state
+ # ------------------------------------------------------------
+ def pop_state(self):
+ self.begin(self.lexstatestack.pop())
+
+ # ------------------------------------------------------------
+ # current_state() - Returns the current lexing state
+ # ------------------------------------------------------------
+ def current_state(self):
+ return self.lexstate
+
+ # ------------------------------------------------------------
+ # skip() - Skip ahead n characters
+ # ------------------------------------------------------------
+ def skip(self,n):
+ self.lexpos += n
+
+ # ------------------------------------------------------------
+ # token() - Return the next token from the Lexer
+ #
+ # Note: This function has been carefully implemented to be as fast
+ # as possible. Don't make changes unless you really know what
+ # you are doing
+ # ------------------------------------------------------------
+ def token(self):
+ # Make local copies of frequently referenced attributes
+ lexpos = self.lexpos
+ lexlen = self.lexlen
+ lexignore = self.lexignore
+ lexdata = self.lexdata
+
+ while lexpos < lexlen:
+ # This code provides some short-circuit code for whitespace, tabs, and other ignored characters
+ if lexdata[lexpos] in lexignore:
+ lexpos += 1
+ continue
+
+ # Look for a regular expression match
+ for lexre,lexindexfunc in self.lexre:
+ m = lexre.match(lexdata,lexpos)
+ if not m: continue
+
+ # Set last match in lexer so that rules can access it if they want
+ self.lexmatch = m
+
+ # Create a token for return
+ tok = LexToken()
+ tok.value = m.group()
+ tok.lineno = self.lineno
+ tok.lexpos = lexpos
+ tok.lexer = self
+
+ lexpos = m.end()
+ i = m.lastindex
+ func,tok.type = lexindexfunc[i]
+ self.lexpos = lexpos
+
+ if not func:
+ # If no token type was set, it's an ignored token
+ if tok.type: return tok
+ break
+
+ # if func not callable, it means it's an ignored token
+ if not callable(func):
+ break
+
+ # If token is processed by a function, call it
+ newtok = func(tok)
+
+ # Every function must return a token, if nothing, we just move to next token
+ if not newtok:
+ lexpos = self.lexpos # This is here in case user has updated lexpos.
+ break
+
+ # Verify type of the token. If not in the token map, raise an error
+ if not self.lexoptimize:
+ if not self.lextokens.has_key(newtok.type):
+ raise LexError, ("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
+ func.func_code.co_filename, func.func_code.co_firstlineno,
+ func.__name__, newtok.type),lexdata[lexpos:])
+
+ return newtok
+ else:
+ # No match, see if in literals
+ if lexdata[lexpos] in self.lexliterals:
+ tok = LexToken()
+ tok.value = lexdata[lexpos]
+ tok.lineno = self.lineno
+ tok.lexer = self
+ tok.type = tok.value
+ tok.lexpos = lexpos
+ self.lexpos = lexpos + 1
+ return tok
+
+ # No match. Call t_error() if defined.
+ if self.lexerrorf:
+ tok = LexToken()
+ tok.value = self.lexdata[lexpos:]
+ tok.lineno = self.lineno
+ tok.type = "error"
+ tok.lexer = self
+ tok.lexpos = lexpos
+ self.lexpos = lexpos
+ newtok = self.lexerrorf(tok)
+ if lexpos == self.lexpos:
+ # Error method didn't change text position at all. This is an error.
+ raise LexError, ("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
+ lexpos = self.lexpos
+ if not newtok: continue
+ return newtok
+
+ self.lexpos = lexpos
+ raise LexError, ("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
+
+ self.lexpos = lexpos + 1
+ if self.lexdata is None:
+ raise RuntimeError, "No input string given with input()"
+ return None
+
+# -----------------------------------------------------------------------------
+# _validate_file()
+#
+# This checks to see if there are duplicated t_rulename() functions or strings
+# in the parser input file. This is done using a simple regular expression
+# match on each line in the filename.
+# -----------------------------------------------------------------------------
+
+def _validate_file(filename):
+ import os.path
+ base,ext = os.path.splitext(filename)
+ if ext != '.py': return 1 # No idea what the file is. Return OK
+
+ try:
+ f = open(filename)
+ lines = f.readlines()
+ f.close()
+ except IOError:
+ return 1 # Oh well
+
+ fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
+ sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
+ counthash = { }
+ linen = 1
+ noerror = 1
+ for l in lines:
+ m = fre.match(l)
+ if not m:
+ m = sre.match(l)
+ if m:
+ name = m.group(1)
+ prev = counthash.get(name)
+ if not prev:
+ counthash[name] = linen
+ else:
+ print "%s:%d: Rule %s redefined. Previously defined on line %d" % (filename,linen,name,prev)
+ noerror = 0
+ linen += 1
+ return noerror
+
+# -----------------------------------------------------------------------------
+# _funcs_to_names()
+#
+# Given a list of regular expression functions, this converts it to a list
+# suitable for output to a table file
+# -----------------------------------------------------------------------------
+
+def _funcs_to_names(funclist):
+ result = []
+ for f in funclist:
+ if f and f[0]:
+ result.append((f[0].__name__,f[1]))
+ else:
+ result.append(f)
+ return result
+
+# -----------------------------------------------------------------------------
+# _names_to_funcs()
+#
+# Given a list of regular expression function names, this converts it back to
+# functions.
+# -----------------------------------------------------------------------------
+
+def _names_to_funcs(namelist,fdict):
+ result = []
+ for n in namelist:
+ if n and n[0]:
+ result.append((fdict[n[0]],n[1]))
+ else:
+ result.append(n)
+ return result
+
+# -----------------------------------------------------------------------------
+# _form_master_re()
+#
+# This function takes a list of all of the regex components and attempts to
+# form the master regular expression. Given limitations in the Python re
+# module, it may be necessary to break the master regex into separate expressions.
+# -----------------------------------------------------------------------------
+
+def _form_master_re(relist,reflags,ldict):
+ if not relist: return []
+ regex = "|".join(relist)
+ try:
+ lexre = re.compile(regex,re.VERBOSE | reflags)
+
+ # Build the index to function map for the matching engine
+ lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
+ for f,i in lexre.groupindex.items():
+ handle = ldict.get(f,None)
+ if type(handle) in (types.FunctionType, types.MethodType):
+ lexindexfunc[i] = (handle,handle.__name__[2:])
+ elif handle is not None:
+ # If rule was specified as a string, we build an anonymous
+ # callback function to carry out the action
+ if f.find("ignore_") > 0:
+ lexindexfunc[i] = (None,None)
+ print "IGNORE", f
+ else:
+ lexindexfunc[i] = (None, f[2:])
+
+ return [(lexre,lexindexfunc)],[regex]
+ except Exception,e:
+ m = int(len(relist)/2)
+ if m == 0: m = 1
+ llist, lre = _form_master_re(relist[:m],reflags,ldict)
+ rlist, rre = _form_master_re(relist[m:],reflags,ldict)
+ return llist+rlist, lre+rre
+
+# -----------------------------------------------------------------------------
+# def _statetoken(s,names)
+#
+# Given a declaration name s of the form "t_" and a dictionary whose keys are
+# state names, this function returns a tuple (states,tokenname) where states
+# is a tuple of state names and tokenname is the name of the token. For example,
+# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
+# -----------------------------------------------------------------------------
+
+def _statetoken(s,names):
+ nonstate = 1
+ parts = s.split("_")
+ for i in range(1,len(parts)):
+ if not names.has_key(parts[i]) and parts[i] != 'ANY': break
+ if i > 1:
+ states = tuple(parts[1:i])
+ else:
+ states = ('INITIAL',)
+
+ if 'ANY' in states:
+ states = tuple(names.keys())
+
+ tokenname = "_".join(parts[i:])
+ return (states,tokenname)
+
+# -----------------------------------------------------------------------------
+# lex(module)
+#
+# Build all of the regular expression rules from definitions in the supplied module
+# -----------------------------------------------------------------------------
+def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0):
+ global lexer
+ ldict = None
+ stateinfo = { 'INITIAL' : 'inclusive'}
+ error = 0
+ files = { }
+ lexobj = Lexer()
+ lexobj.lexdebug = debug
+ lexobj.lexoptimize = optimize
+ global token,input
+
+ if nowarn: warn = 0
+ else: warn = 1
+
+ if object: module = object
+
+ if module:
+ # User supplied a module object.
+ if isinstance(module, types.ModuleType):
+ ldict = module.__dict__
+ elif isinstance(module, _INSTANCETYPE):
+ _items = [(k,getattr(module,k)) for k in dir(module)]
+ ldict = { }
+ for (i,v) in _items:
+ ldict[i] = v
+ else:
+ raise ValueError,"Expected a module or instance"
+ lexobj.lexmodule = module
+
+ else:
+ # No module given. We might be able to get information from the caller.
+ try:
+ raise RuntimeError
+ except RuntimeError:
+ e,b,t = sys.exc_info()
+ f = t.tb_frame
+ f = f.f_back # Walk out to our calling function
+ ldict = f.f_globals # Grab its globals dictionary
+
+ if optimize and lextab:
+ try:
+ lexobj.readtab(lextab,ldict)
+ token = lexobj.token
+ input = lexobj.input
+ lexer = lexobj
+ return lexobj
+
+ except ImportError:
+ pass
+
+ # Get the tokens, states, and literals variables (if any)
+ if (module and isinstance(module,_INSTANCETYPE)):
+ tokens = getattr(module,"tokens",None)
+ states = getattr(module,"states",None)
+ literals = getattr(module,"literals","")
+ else:
+ tokens = ldict.get("tokens",None)
+ states = ldict.get("states",None)
+ literals = ldict.get("literals","")
+
+ if not tokens:
+ raise SyntaxError,"lex: module does not define 'tokens'"
+ if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
+ raise SyntaxError,"lex: tokens must be a list or tuple."
+
+ # Build a dictionary of valid token names
+ lexobj.lextokens = { }
+ if not optimize:
+ for n in tokens:
+ if not _is_identifier.match(n):
+ print "lex: Bad token name '%s'" % n
+ error = 1
+ if warn and lexobj.lextokens.has_key(n):
+ print "lex: Warning. Token '%s' multiply defined." % n
+ lexobj.lextokens[n] = None
+ else:
+ for n in tokens: lexobj.lextokens[n] = None
+
+ if debug:
+ print "lex: tokens = '%s'" % lexobj.lextokens.keys()
+
+ try:
+ for c in literals:
+ if not (isinstance(c,types.StringType) or isinstance(c,types.UnicodeType)) or len(c) > 1:
+ print "lex: Invalid literal %s. Must be a single character" % repr(c)
+ error = 1
+ continue
+
+ except TypeError:
+ print "lex: Invalid literals specification. literals must be a sequence of characters."
+ error = 1
+
+ lexobj.lexliterals = literals
+
+ # Build statemap
+ if states:
+ if not (isinstance(states,types.TupleType) or isinstance(states,types.ListType)):
+ print "lex: states must be defined as a tuple or list."
+ error = 1
+ else:
+ for s in states:
+ if not isinstance(s,types.TupleType) or len(s) != 2:
+ print "lex: invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')" % repr(s)
+ error = 1
+ continue
+ name, statetype = s
+ if not isinstance(name,types.StringType):
+ print "lex: state name %s must be a string" % repr(name)
+ error = 1
+ continue
+ if not (statetype == 'inclusive' or statetype == 'exclusive'):
+ print "lex: state type for state %s must be 'inclusive' or 'exclusive'" % name
+ error = 1
+ continue
+ if stateinfo.has_key(name):
+ print "lex: state '%s' already defined." % name
+ error = 1
+ continue
+ stateinfo[name] = statetype
+
+ # Get a list of symbols with the t_ or s_ prefix
+ tsymbols = [f for f in ldict.keys() if f[:2] == 't_' ]
+
+ # Now build up a list of functions and a list of strings
+
+ funcsym = { } # Symbols defined as functions
+ strsym = { } # Symbols defined as strings
+ toknames = { } # Mapping of symbols to token names
+
+ for s in stateinfo.keys():
+ funcsym[s] = []
+ strsym[s] = []
+
+ ignore = { } # Ignore strings by state
+ errorf = { } # Error functions by state
+
+ if len(tsymbols) == 0:
+ raise SyntaxError,"lex: no rules of the form t_rulename are defined."
+
+ for f in tsymbols:
+ t = ldict[f]
+ states, tokname = _statetoken(f,stateinfo)
+ toknames[f] = tokname
+
+ if callable(t):
+ for s in states: funcsym[s].append((f,t))
+ elif (isinstance(t, types.StringType) or isinstance(t,types.UnicodeType)):
+ for s in states: strsym[s].append((f,t))
+ else:
+ print "lex: %s not defined as a function or string" % f
+ error = 1
+
+ # Sort the functions by line number
+ for f in funcsym.values():
+ f.sort(lambda x,y: cmp(x[1].func_code.co_firstlineno,y[1].func_code.co_firstlineno))
+
+ # Sort the strings by regular expression length
+ for s in strsym.values():
+ s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
+
+ regexs = { }
+
+ # Build the master regular expressions
+ for state in stateinfo.keys():
+ regex_list = []
+
+ # Add rules defined by functions first
+ for fname, f in funcsym[state]:
+ line = f.func_code.co_firstlineno
+ file = f.func_code.co_filename
+ files[file] = None
+ tokname = toknames[fname]
+
+ ismethod = isinstance(f, types.MethodType)
+
+ if not optimize:
+ nargs = f.func_code.co_argcount
+ if ismethod:
+ reqargs = 2
+ else:
+ reqargs = 1
+ if nargs > reqargs:
+ print "%s:%d: Rule '%s' has too many arguments." % (file,line,f.__name__)
+ error = 1
+ continue
+
+ if nargs < reqargs:
+ print "%s:%d: Rule '%s' requires an argument." % (file,line,f.__name__)
+ error = 1
+ continue
+
+ if tokname == 'ignore':
+ print "%s:%d: Rule '%s' must be defined as a string." % (file,line,f.__name__)
+ error = 1
+ continue
+
+ if tokname == 'error':
+ errorf[state] = f
+ continue
+
+ if f.__doc__:
+ if not optimize:
+ try:
+ c = re.compile("(?P<%s>%s)" % (f.__name__,f.__doc__), re.VERBOSE | reflags)
+ if c.match(""):
+ print "%s:%d: Regular expression for rule '%s' matches empty string." % (file,line,f.__name__)
+ error = 1
+ continue
+ except re.error,e:
+ print "%s:%d: Invalid regular expression for rule '%s'. %s" % (file,line,f.__name__,e)
+ if '#' in f.__doc__:
+ print "%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'." % (file,line, f.__name__)
+ error = 1
+ continue
+
+ if debug:
+ print "lex: Adding rule %s -> '%s' (state '%s')" % (f.__name__,f.__doc__, state)
+
+ # Okay. The regular expression seemed okay. Let's append it to the master regular
+ # expression we're building
+
+ regex_list.append("(?P<%s>%s)" % (f.__name__,f.__doc__))
+ else:
+ print "%s:%d: No regular expression defined for rule '%s'" % (file,line,f.__name__)
+
+ # Now add all of the simple rules
+ for name,r in strsym[state]:
+ tokname = toknames[name]
+
+ if tokname == 'ignore':
+ ignore[state] = r
+ continue
+
+ if not optimize:
+ if tokname == 'error':
+ raise SyntaxError,"lex: Rule '%s' must be defined as a function" % name
+ error = 1
+ continue
+
+ if not lexobj.lextokens.has_key(tokname) and tokname.find("ignore_") < 0:
+ print "lex: Rule '%s' defined for an unspecified token %s." % (name,tokname)
+ error = 1
+ continue
+ try:
+ c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | reflags)
+ if (c.match("")):
+ print "lex: Regular expression for rule '%s' matches empty string." % name
+ error = 1
+ continue
+ except re.error,e:
+ print "lex: Invalid regular expression for rule '%s'. %s" % (name,e)
+ if '#' in r:
+ print "lex: Make sure '#' in rule '%s' is escaped with '\\#'." % name
+
+ error = 1
+ continue
+ if debug:
+ print "lex: Adding rule %s -> '%s' (state '%s')" % (name,r,state)
+
+ regex_list.append("(?P<%s>%s)" % (name,r))
+
+ if not regex_list:
+ print "lex: No rules defined for state '%s'" % state
+ error = 1
+
+ regexs[state] = regex_list
+
+
+ if not optimize:
+ for f in files.keys():
+ if not _validate_file(f):
+ error = 1
+
+ if error:
+ raise SyntaxError,"lex: Unable to build lexer."
+
+ # From this point forward, we're reasonably confident that we can build the lexer.
+ # No more errors will be generated, but there might be some warning messages.
+
+ # Build the master regular expressions
+
+ for state in regexs.keys():
+ lexre, re_text = _form_master_re(regexs[state],reflags,ldict)
+ lexobj.lexstatere[state] = lexre
+ lexobj.lexstateretext[state] = re_text
+ if debug:
+ for i in range(len(re_text)):
+ print "lex: state '%s'. regex[%d] = '%s'" % (state, i, re_text[i])
+
+ # For inclusive states, we need to add the INITIAL state
+ for state,type in stateinfo.items():
+ if state != "INITIAL" and type == 'inclusive':
+ lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
+ lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
+
+ lexobj.lexstateinfo = stateinfo
+ lexobj.lexre = lexobj.lexstatere["INITIAL"]
+ lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
+
+ # Set up ignore variables
+ lexobj.lexstateignore = ignore
+ lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
+
+ # Set up error functions
+ lexobj.lexstateerrorf = errorf
+ lexobj.lexerrorf = errorf.get("INITIAL",None)
+ if warn and not lexobj.lexerrorf:
+ print "lex: Warning. no t_error rule is defined."
+
+ # Check state information for ignore and error rules
+ for s,stype in stateinfo.items():
+ if stype == 'exclusive':
+ if warn and not errorf.has_key(s):
+ print "lex: Warning. no error rule is defined for exclusive state '%s'" % s
+ if warn and not ignore.has_key(s) and lexobj.lexignore:
+ print "lex: Warning. no ignore rule is defined for exclusive state '%s'" % s
+ elif stype == 'inclusive':
+ if not errorf.has_key(s):
+ errorf[s] = errorf.get("INITIAL",None)
+ if not ignore.has_key(s):
+ ignore[s] = ignore.get("INITIAL","")
+
+
+ # Create global versions of the token() and input() functions
+ token = lexobj.token
+ input = lexobj.input
+ lexer = lexobj
+
+ # If in optimize mode, we write the lextab
+ if lextab and optimize:
+ lexobj.writetab(lextab)
+
+ return lexobj
+
+# -----------------------------------------------------------------------------
+# runmain()
+#
+# This runs the lexer as a main program
+# -----------------------------------------------------------------------------
+
+def runmain(lexer=None,data=None):
+ if not data:
+ try:
+ filename = sys.argv[1]
+ f = open(filename)
+ data = f.read()
+ f.close()
+ except IndexError:
+ print "Reading from standard input (type EOF to end):"
+ data = sys.stdin.read()
+
+ if lexer:
+ _input = lexer.input
+ else:
+ _input = input
+ _input(data)
+ if lexer:
+ _token = lexer.token
+ else:
+ _token = token
+
+ while 1:
+ tok = _token()
+ if not tok: break
+ print "(%s,%r,%d,%d)" % (tok.type, tok.value, tok.lineno,tok.lexpos)
+
+
+# -----------------------------------------------------------------------------
+# @TOKEN(regex)
+#
+# This decorator function can be used to set the regex expression on a function
+# when its docstring might need to be set in an alternative way
+# -----------------------------------------------------------------------------
+
+def TOKEN(r):
+ def set_doc(f):
+ f.__doc__ = r
+ return f
+ return set_doc
+
+# Alternative spelling of the TOKEN decorator
+Token = TOKEN
+
diff --git a/lib/python2.7/site-packages/setoolsgui/sepolgen/matching.py b/lib/python2.7/site-packages/setoolsgui/sepolgen/matching.py
new file mode 100644
index 0000000..d56dd92
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/sepolgen/matching.py
@@ -0,0 +1,255 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+"""
+Classes and algorithms for matching requested access to access vectors.
+"""
+
+import access
+import objectmodel
+import itertools
+
+class Match:
+ def __init__(self, interface=None, dist=0):
+ self.interface = interface
+ self.dist = dist
+ self.info_dir_change = False
+
+ def __cmp__(self, other):
+ if self.dist == other.dist:
+ if self.info_dir_change:
+ if other.info_dir_change:
+ return 0
+ else:
+ return 1
+ else:
+ if other.info_dir_change:
+ return -1
+ else:
+ return 0
+ else:
+ if self.dist < other.dist:
+ return -1
+ else:
+ return 1
+
+class MatchList:
+ DEFAULT_THRESHOLD = 150
+ def __init__(self):
+ # Match objects that pass the threshold
+ self.children = []
+ # Match objects over the threshold
+ self.bastards = []
+ self.threshold = self.DEFAULT_THRESHOLD
+ self.allow_info_dir_change = False
+ self.av = None
+
+ def best(self):
+ if len(self.children):
+ return self.children[0]
+ if len(self.bastards):
+ return self.bastards[0]
+ return None
+
+ def __len__(self):
+ # Only return the length of the matches so
+ # that this can be used to test if there is
+ # a match.
+ return len(self.children) + len(self.bastards)
+
+ def __iter__(self):
+ return iter(self.children)
+
+ def all(self):
+ return itertools.chain(self.children, self.bastards)
+
+ def append(self, match):
+ if match.dist <= self.threshold:
+ if not match.info_dir_change or self.allow_info_dir_change:
+ self.children.append(match)
+ else:
+ self.bastards.append(match)
+ else:
+ self.bastards.append(match)
+
+ def sort(self):
+ self.children.sort()
+ self.bastards.sort()
+
+
+class AccessMatcher:
+ def __init__(self, perm_maps=None):
+ self.type_penalty = 10
+ self.obj_penalty = 10
+ if perm_maps:
+ self.perm_maps = perm_maps
+ else:
+ self.perm_maps = objectmodel.PermMappings()
+ # We want a change in the information flow direction
+ # to be a strong penalty - stronger than access to
+ # a few unrelated types.
+ self.info_dir_penalty = 100
+
+ def type_distance(self, a, b):
+ if a == b or access.is_idparam(b):
+ return 0
+ else:
+ return -self.type_penalty
+
+
+ def perm_distance(self, av_req, av_prov):
+ # First check that we have enough perms
+ diff = av_req.perms.difference(av_prov.perms)
+
+ if len(diff) != 0:
+ total = self.perm_maps.getdefault_distance(av_req.obj_class, diff)
+ return -total
+ else:
+ diff = av_prov.perms.difference(av_req.perms)
+ return self.perm_maps.getdefault_distance(av_req.obj_class, diff)
+
+ def av_distance(self, req, prov):
+ """Determine the 'distance' between 2 access vectors.
+
+ This function is used to find an access vector that matches
+ a 'required' access. To do this we comput a signed numeric
+ value that indicates how close the req access is to the
+ 'provided' access vector. The closer the value is to 0
+ the closer the match, with 0 being an exact match.
+
+ A value over 0 indicates that the prov access vector provides more
+ access than the req (in practice, this means that the source type,
+ target type, and object class is the same and the perms in prov is
+ a superset of those in req.
+
+ A value under 0 indicates that the prov access less - or unrelated
+ - access to the req access. A different type or object class will
+ result in a very low value.
+
+ The values other than 0 should only be interpreted relative to
+ one another - they have no exact meaning and are likely to
+ change.
+
+ Params:
+ req - [AccessVector] The access that is required. This is the
+ access being matched.
+ prov - [AccessVector] The access provided. This is the potential
+ match that is being evaluated for req.
+ Returns:
+ 0 : Exact match between the acess vectors.
+
+ < 0 : The prov av does not provide all of the access in req.
+ A smaller value indicates that the access is further.
+
+ > 0 : The prov av provides more access than req. The larger
+ the value the more access over req.
+ """
+ # FUTURE - this is _very_ expensive and probably needs some
+ # thorough performance work. This version is meant to give
+ # meaningful results relatively simply.
+ dist = 0
+
+ # Get the difference between the types. The addition is safe
+ # here because type_distance only returns 0 or negative.
+ dist += self.type_distance(req.src_type, prov.src_type)
+ dist += self.type_distance(req.tgt_type, prov.tgt_type)
+
+ # Object class distance
+ if req.obj_class != prov.obj_class and not access.is_idparam(prov.obj_class):
+ dist -= self.obj_penalty
+
+ # Permission distance
+
+ # If this av doesn't have a matching source type, target type, and object class
+ # count all of the permissions against it. Otherwise determine the perm
+ # distance and dir.
+ if dist < 0:
+ pdist = self.perm_maps.getdefault_distance(prov.obj_class, prov.perms)
+ else:
+ pdist = self.perm_distance(req, prov)
+
+ # Combine the perm and other distance
+ if dist < 0:
+ if pdist < 0:
+ return dist + pdist
+ else:
+ return dist - pdist
+ elif dist >= 0:
+ if pdist < 0:
+ return pdist - dist
+ else:
+ return dist + pdist
+
+ def av_set_match(self, av_set, av):
+ """
+
+ """
+ dist = None
+
+ # Get the distance for each access vector
+ for x in av_set:
+ tmp = self.av_distance(av, x)
+ if dist is None:
+ dist = tmp
+ elif tmp >= 0:
+ if dist >= 0:
+ dist += tmp
+ else:
+ dist = tmp + -dist
+ else:
+ if dist < 0:
+ dist += tmp
+ else:
+ dist -= tmp
+
+ # Penalize for information flow - we want to prevent the
+ # addition of a write if the requested is read none. We are
+ # much less concerned about the reverse.
+ av_dir = self.perm_maps.getdefault_direction(av.obj_class, av.perms)
+
+ if av_set.info_dir is None:
+ av_set.info_dir = objectmodel.FLOW_NONE
+ for x in av_set:
+ av_set.info_dir = av_set.info_dir | \
+ self.perm_maps.getdefault_direction(x.obj_class, x.perms)
+ if (av_dir & objectmodel.FLOW_WRITE == 0) and (av_set.info_dir & objectmodel.FLOW_WRITE):
+ if dist < 0:
+ dist -= self.info_dir_penalty
+ else:
+ dist += self.info_dir_penalty
+
+ return dist
+
+ def search_ifs(self, ifset, av, match_list):
+ match_list.av = av
+ for iv in itertools.chain(ifset.tgt_type_all,
+ ifset.tgt_type_map.get(av.tgt_type, [])):
+ if not iv.enabled:
+ #print "iv %s not enabled" % iv.name
+ continue
+
+ dist = self.av_set_match(iv.access, av)
+ if dist >= 0:
+ m = Match(iv, dist)
+ match_list.append(m)
+
+
+ match_list.sort()
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/sepolgen/module.py b/lib/python2.7/site-packages/setoolsgui/sepolgen/module.py
new file mode 100644
index 0000000..7fc9443
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/sepolgen/module.py
@@ -0,0 +1,213 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+"""
+Utilities for dealing with the compilation of modules and creation
+of module tress.
+"""
+
+import defaults
+
+import selinux
+
+import re
+import tempfile
+import commands
+import os
+import os.path
+import subprocess
+import shutil
+
+def is_valid_name(modname):
+ """Check that a module name is valid.
+ """
+ m = re.findall("[^a-zA-Z0-9_\-\.]", modname)
+ if len(m) == 0 and modname[0].isalpha():
+ return True
+ else:
+ return False
+
+class ModuleTree:
+ def __init__(self, modname):
+ self.modname = modname
+ self.dirname = None
+
+ def dir_name(self):
+ return self.dirname
+
+ def te_name(self):
+ return self.dirname + "/" + self.modname + ".te"
+
+ def fc_name(self):
+ return self.dirname + "/" + self.modname + ".fc"
+
+ def if_name(self):
+ return self.dirname + "/" + self.modname + ".if"
+
+ def package_name(self):
+ return self.dirname + "/" + self.modname + ".pp"
+
+ def makefile_name(self):
+ return self.dirname + "/Makefile"
+
+ def create(self, parent_dirname, makefile_include=None):
+ self.dirname = parent_dirname + "/" + self.modname
+ os.mkdir(self.dirname)
+ fd = open(self.makefile_name(), "w")
+ if makefile_include:
+ fd.write("include " + makefile_include)
+ else:
+ fd.write("include " + defaults.refpolicy_makefile())
+ fd.close()
+
+ # Create empty files for the standard refpolicy
+ # module files
+ open(self.te_name(), "w").close()
+ open(self.fc_name(), "w").close()
+ open(self.if_name(), "w").close()
+
+def modname_from_sourcename(sourcename):
+ return os.path.splitext(os.path.split(sourcename)[1])[0]
+
+class ModuleCompiler:
+ """ModuleCompiler eases running of the module compiler.
+
+ The ModuleCompiler class encapsulates running the commandline
+ module compiler (checkmodule) and module packager (semodule_package).
+ You are likely interested in the create_module_package method.
+
+ Several options are controlled via paramaters (only effects the
+ non-refpol builds):
+
+ .mls [boolean] Generate an MLS module (by passed -M to
+ checkmodule). True to generate an MLS module, false
+ otherwise.
+
+ .module [boolean] Generate a module instead of a base module.
+ True to generate a module, false to generate a base.
+
+ .checkmodule [string] Fully qualified path to the module compiler.
+ Default is /usr/bin/checkmodule.
+
+ .semodule_package [string] Fully qualified path to the module
+ packager. Defaults to /usr/bin/semodule_package.
+ .output [file object] File object used to write verbose
+ output of the compililation and packaging process.
+ """
+ def __init__(self, output=None):
+ """Create a ModuleCompiler instance, optionally with an
+ output file object for verbose output of the compilation process.
+ """
+ self.mls = selinux.is_selinux_mls_enabled()
+ self.module = True
+ self.checkmodule = "/usr/bin/checkmodule"
+ self.semodule_package = "/usr/bin/semodule_package"
+ self.output = output
+ self.last_output = ""
+ self.refpol_makefile = defaults.refpolicy_makefile()
+ self.make = "/usr/bin/make"
+
+ def o(self, str):
+ if self.output:
+ self.output.write(str + "\n")
+ self.last_output = str
+
+ def run(self, command):
+ self.o(command)
+ rc, output = commands.getstatusoutput(command)
+ self.o(output)
+
+ return rc
+
+ def gen_filenames(self, sourcename):
+ """Generate the module and policy package filenames from
+ a source file name. The source file must be in the form
+ of "foo.te". This will generate "foo.mod" and "foo.pp".
+
+ Returns a tuple with (modname, policypackage).
+ """
+ splitname = sourcename.split(".")
+ if len(splitname) < 2:
+ raise RuntimeError("invalid sourcefile name %s (must end in .te)", sourcename)
+ # Handle other periods in the filename correctly
+ basename = ".".join(splitname[0:-1])
+ modname = basename + ".mod"
+ packagename = basename + ".pp"
+
+ return (modname, packagename)
+
+ def create_module_package(self, sourcename, refpolicy=True):
+ """Create a module package saved in a packagename from a
+ sourcename.
+
+ The create_module_package creates a module package saved in a
+ file named sourcename (.pp is the standard extension) from a
+ source file (.te is the standard extension). The source file
+ should contain SELinux policy statements appropriate for a
+ base or non-base module (depending on the setting of .module).
+
+ Only file names are accepted, not open file objects or
+ descriptors because the command line SELinux tools are used.
+
+ On error a RuntimeError will be raised with a descriptive
+ error message.
+ """
+ if refpolicy:
+ self.refpol_build(sourcename)
+ else:
+ modname, packagename = self.gen_filenames(sourcename)
+ self.compile(sourcename, modname)
+ self.package(modname, packagename)
+ os.unlink(modname)
+
+ def refpol_build(self, sourcename):
+ # Compile
+ command = self.make + " -f " + self.refpol_makefile
+ rc = self.run(command)
+
+ # Raise an error if the process failed
+ if rc != 0:
+ raise RuntimeError("compilation failed:\n%s" % self.last_output)
+
+ def compile(self, sourcename, modname):
+ s = [self.checkmodule]
+ if self.mls:
+ s.append("-M")
+ if self.module:
+ s.append("-m")
+ s.append("-o")
+ s.append(modname)
+ s.append(sourcename)
+
+ rc = self.run(" ".join(s))
+ if rc != 0:
+ raise RuntimeError("compilation failed:\n%s" % self.last_output)
+
+ def package(self, modname, packagename):
+ s = [self.semodule_package]
+ s.append("-o")
+ s.append(packagename)
+ s.append("-m")
+ s.append(modname)
+
+ rc = self.run(" ".join(s))
+ if rc != 0:
+ raise RuntimeError("packaging failed [%s]" % self.last_output)
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/sepolgen/objectmodel.py b/lib/python2.7/site-packages/setoolsgui/sepolgen/objectmodel.py
new file mode 100644
index 0000000..88c8a1f
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/sepolgen/objectmodel.py
@@ -0,0 +1,172 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+"""
+This module provides knowledge object classes and permissions. It should
+be used to keep this knowledge from leaking into the more generic parts of
+the policy generation.
+"""
+
+# Objects that can be implicitly typed - these objects do
+# not _have_ to be implicitly typed (e.g., sockets can be
+# explicitly labeled), but they often are.
+#
+# File is in this list for /proc/self
+#
+# This list is useful when dealing with rules that have a
+# type (or param) used as both a subject and object. For
+# example:
+#
+# allow httpd_t httpd_t : socket read;
+#
+# This rule makes sense because the socket was (presumably) created
+# by a process with the type httpd_t.
+implicitly_typed_objects = ["socket", "fd", "process", "file", "lnk_file", "fifo_file",
+ "dbus", "capability", "unix_stream_socket"]
+
+#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+#
+#Information Flow
+#
+# All of the permissions in SELinux can be described in terms of
+# information flow. For example, a read of a file is a flow of
+# information from that file to the process reading. Viewing
+# permissions in these terms can be used to model a varity of
+# security properties.
+#
+# Here we have some infrastructure for understanding permissions
+# in terms of information flow
+#
+#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+
+# Information flow deals with information either flowing from a subject
+# to and object ("write") or to a subject from an object ("read"). Read
+# or write is described from the subject point-of-view. It is also possible
+# for a permission to represent both a read and write (though the flow is
+# typical asymettric in terms of bandwidth). It is also possible for
+# permission to not flow information (meaning that the result is pure
+# side-effect).
+#
+# The following constants are for representing the directionality
+# of information flow.
+FLOW_NONE = 0
+FLOW_READ = 1
+FLOW_WRITE = 2
+FLOW_BOTH = FLOW_READ | FLOW_WRITE
+
+# These are used by the parser and for nice disply of the directions
+str_to_dir = { "n" : FLOW_NONE, "r" : FLOW_READ, "w" : FLOW_WRITE, "b" : FLOW_BOTH }
+dir_to_str = { FLOW_NONE : "n", FLOW_READ : "r", FLOW_WRITE : "w", FLOW_BOTH : "b" }
+
+class PermMap:
+ """A mapping between a permission and its information flow properties.
+
+ PermMap represents the information flow properties of a single permission
+ including the direction (read, write, etc.) and an abstract representation
+ of the bandwidth of the flow (weight).
+ """
+ def __init__(self, perm, dir, weight):
+ self.perm = perm
+ self.dir = dir
+ self.weight = weight
+
+ def __repr__(self):
+ return "<sepolgen.objectmodel.PermMap %s %s %d>" % (self.perm,
+ dir_to_str[self.dir],
+ self.weight)
+
+class PermMappings:
+ """The information flow properties of a set of object classes and permissions.
+
+ PermMappings maps one or more classes and permissions to their PermMap objects
+ describing their information flow charecteristics.
+ """
+ def __init__(self):
+ self.classes = { }
+ self.default_weight = 5
+ self.default_dir = FLOW_BOTH
+
+ def from_file(self, fd):
+ """Read the permission mappings from a file. This reads the format used
+ by Apol in the setools suite.
+ """
+ # This parsing is deliberitely picky and bails at the least error. It
+ # is assumed that the permission map file will be shipped as part
+ # of sepolgen and not user modified, so this is a reasonable design
+ # choice. If user supplied permission mappings are needed the parser
+ # should be made a little more robust and give better error messages.
+ cur = None
+ for line in fd:
+ fields = line.split()
+ if len(fields) == 0 or len(fields) == 1 or fields[0] == "#":
+ continue
+ if fields[0] == "class":
+ c = fields[1]
+ if self.classes.has_key(c):
+ raise ValueError("duplicate class in perm map")
+ self.classes[c] = { }
+ cur = self.classes[c]
+ else:
+ if len(fields) != 3:
+ raise ValueError("error in object classs permissions")
+ if cur is None:
+ raise ValueError("permission outside of class")
+ pm = PermMap(fields[0], str_to_dir[fields[1]], int(fields[2]))
+ cur[pm.perm] = pm
+
+ def get(self, obj, perm):
+ """Get the permission map for the object permission.
+
+ Returns:
+ PermMap representing the permission
+ Raises:
+ KeyError if the object or permission is not defined
+ """
+ return self.classes[obj][perm]
+
+ def getdefault(self, obj, perm):
+ """Get the permission map for the object permission or a default.
+
+ getdefault is the same as get except that a default PermMap is
+ returned if the object class or permission is not defined. The
+ default is FLOW_BOTH with a weight of 5.
+ """
+ try:
+ pm = self.classes[obj][perm]
+ except KeyError:
+ return PermMap(perm, self.default_dir, self.default_weight)
+ return pm
+
+ def getdefault_direction(self, obj, perms):
+ dir = FLOW_NONE
+ for perm in perms:
+ pm = self.getdefault(obj, perm)
+ dir = dir | pm.dir
+ return dir
+
+ def getdefault_distance(self, obj, perms):
+ total = 0
+ for perm in perms:
+ pm = self.getdefault(obj, perm)
+ total += pm.weight
+
+ return total
+
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/sepolgen/output.py b/lib/python2.7/site-packages/setoolsgui/sepolgen/output.py
new file mode 100644
index 0000000..739452d
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/sepolgen/output.py
@@ -0,0 +1,173 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+"""
+Classes and functions for the output of reference policy modules.
+
+This module takes a refpolicy.Module object and formats it for
+output using the ModuleWriter object. By separating the output
+in this way the other parts of Madison can focus solely on
+generating policy. This keeps the semantic / syntactic issues
+cleanly separated from the formatting issues.
+"""
+
+import refpolicy
+import util
+
+class ModuleWriter:
+ def __init__(self):
+ self.fd = None
+ self.module = None
+ self.sort = True
+ self.requires = True
+
+ def write(self, module, fd):
+ self.module = module
+
+ if self.sort:
+ sort_filter(self.module)
+
+ # FIXME - make this handle nesting
+ for node, depth in refpolicy.walktree(self.module, showdepth=True):
+ fd.write("%s\n" % str(node))
+
+# Helper functions for sort_filter - this is all done old school
+# C style rather than with polymorphic methods because this sorting
+# is specific to output. It is not necessarily the comparison you
+# want generally.
+
+# Compare two IdSets - we could probably do something clever
+# with different here, but this works.
+def id_set_cmp(x, y):
+ xl = util.set_to_list(x)
+ xl.sort()
+ yl = util.set_to_list(y)
+ yl.sort()
+
+ if len(xl) != len(yl):
+ return cmp(xl[0], yl[0])
+ for v in zip(xl, yl):
+ if v[0] != v[1]:
+ return cmp(v[0], v[1])
+ return 0
+
+# Compare two avrules
+def avrule_cmp(a, b):
+ ret = id_set_cmp(a.src_types, b.src_types)
+ if ret is not 0:
+ return ret
+ ret = id_set_cmp(a.tgt_types, b.tgt_types)
+ if ret is not 0:
+ return ret
+ ret = id_set_cmp(a.obj_classes, b.obj_classes)
+ if ret is not 0:
+ return ret
+
+ # At this point, who cares - just return something
+ return cmp(len(a.perms), len(b.perms))
+
+# Compare two interface calls
+def ifcall_cmp(a, b):
+ if a.args[0] != b.args[0]:
+ return cmp(a.args[0], b.args[0])
+ return cmp(a.ifname, b.ifname)
+
+# Compare an two avrules or interface calls
+def rule_cmp(a, b):
+ if isinstance(a, refpolicy.InterfaceCall):
+ if isinstance(b, refpolicy.InterfaceCall):
+ return ifcall_cmp(a, b)
+ else:
+ return id_set_cmp([a.args[0]], b.src_types)
+ else:
+ if isinstance(b, refpolicy.AVRule):
+ return avrule_cmp(a,b)
+ else:
+ return id_set_cmp(a.src_types, [b.args[0]])
+
+def role_type_cmp(a, b):
+ return cmp(a.role, b.role)
+
+def sort_filter(module):
+ """Sort and group the output for readability.
+ """
+ def sort_node(node):
+ c = []
+
+ # Module statement
+ for mod in node.module_declarations():
+ c.append(mod)
+ c.append(refpolicy.Comment())
+
+ # Requires
+ for require in node.requires():
+ c.append(require)
+ c.append(refpolicy.Comment())
+
+ # Rules
+ #
+ # We are going to group output by source type (which
+ # we assume is the first argument for interfaces).
+ rules = []
+ rules.extend(node.avrules())
+ rules.extend(node.interface_calls())
+ rules.sort(rule_cmp)
+
+ cur = None
+ sep_rules = []
+ for rule in rules:
+ if isinstance(rule, refpolicy.InterfaceCall):
+ x = rule.args[0]
+ else:
+ x = util.first(rule.src_types)
+
+ if cur != x:
+ if cur:
+ sep_rules.append(refpolicy.Comment())
+ cur = x
+ comment = refpolicy.Comment()
+ comment.lines.append("============= %s ==============" % cur)
+ sep_rules.append(comment)
+ sep_rules.append(rule)
+
+ c.extend(sep_rules)
+
+
+ ras = []
+ ras.extend(node.role_types())
+ ras.sort(role_type_cmp)
+ if len(ras):
+ comment = refpolicy.Comment()
+ comment.lines.append("============= ROLES ==============")
+ c.append(comment)
+
+
+ c.extend(ras)
+
+ # Everything else
+ for child in node.children:
+ if child not in c:
+ c.append(child)
+
+ node.children = c
+
+ for node in module.nodes():
+ sort_node(node)
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/sepolgen/policygen.py b/lib/python2.7/site-packages/setoolsgui/sepolgen/policygen.py
new file mode 100644
index 0000000..5f38577
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/sepolgen/policygen.py
@@ -0,0 +1,402 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+"""
+classes and algorithms for the generation of SELinux policy.
+"""
+
+import itertools
+import textwrap
+
+import refpolicy
+import objectmodel
+import access
+import interfaces
+import matching
+import selinux.audit2why as audit2why
+try:
+ from setools import *
+except:
+ pass
+
+# Constants for the level of explanation from the generation
+# routines
+NO_EXPLANATION = 0
+SHORT_EXPLANATION = 1
+LONG_EXPLANATION = 2
+
+class PolicyGenerator:
+ """Generate a reference policy module from access vectors.
+
+ PolicyGenerator generates a new reference policy module
+ or updates an existing module based on requested access
+ in the form of access vectors.
+
+ It generates allow rules and optionally module require
+ statements and reference policy interfaces. By default
+ only allow rules are generated. The methods .set_gen_refpol
+ and .set_gen_requires turns on interface generation and
+ requires generation respectively.
+
+ PolicyGenerator can also optionally add comments explaining
+ why a particular access was allowed based on the audit
+ messages that generated the access. The access vectors
+ passed in must have the .audit_msgs field set correctly
+ and .explain set to SHORT|LONG_EXPLANATION to enable this
+ feature.
+
+ The module created by PolicyGenerator can be passed to
+ output.ModuleWriter to output a text representation.
+ """
+ def __init__(self, module=None):
+ """Initialize a PolicyGenerator with an optional
+ existing module.
+
+ If the module paramater is not None then access
+ will be added to the passed in module. Otherwise
+ a new reference policy module will be created.
+ """
+ self.ifgen = None
+ self.explain = NO_EXPLANATION
+ self.gen_requires = False
+ if module:
+ self.moduel = module
+ else:
+ self.module = refpolicy.Module()
+
+ self.dontaudit = False
+
+ self.domains = None
+ def set_gen_refpol(self, if_set=None, perm_maps=None):
+ """Set whether reference policy interfaces are generated.
+
+ To turn on interface generation pass in an interface set
+ to use for interface generation. To turn off interface
+ generation pass in None.
+
+ If interface generation is enabled requires generation
+ will also be enabled.
+ """
+ if if_set:
+ self.ifgen = InterfaceGenerator(if_set, perm_maps)
+ self.gen_requires = True
+ else:
+ self.ifgen = None
+ self.__set_module_style()
+
+
+ def set_gen_requires(self, status=True):
+ """Set whether module requires are generated.
+
+ Passing in true will turn on requires generation and
+ False will disable generation. If requires generation is
+ disabled interface generation will also be disabled and
+ can only be re-enabled via .set_gen_refpol.
+ """
+ self.gen_requires = status
+
+ def set_gen_explain(self, explain=SHORT_EXPLANATION):
+ """Set whether access is explained.
+ """
+ self.explain = explain
+
+ def set_gen_dontaudit(self, dontaudit):
+ self.dontaudit = dontaudit
+
+ def __set_module_style(self):
+ if self.ifgen:
+ refpolicy = True
+ else:
+ refpolicy = False
+ for mod in self.module.module_declarations():
+ mod.refpolicy = refpolicy
+
+ def set_module_name(self, name, version="1.0"):
+ """Set the name of the module and optionally the version.
+ """
+ # find an existing module declaration
+ m = None
+ for mod in self.module.module_declarations():
+ m = mod
+ if not m:
+ m = refpolicy.ModuleDeclaration()
+ self.module.children.insert(0, m)
+ m.name = name
+ m.version = version
+ if self.ifgen:
+ m.refpolicy = True
+ else:
+ m.refpolicy = False
+
+ def get_module(self):
+ # Generate the requires
+ if self.gen_requires:
+ gen_requires(self.module)
+
+ """Return the generated module"""
+ return self.module
+
+ def __add_allow_rules(self, avs):
+ for av in avs:
+ rule = refpolicy.AVRule(av)
+ if self.dontaudit:
+ rule.rule_type = rule.DONTAUDIT
+ rule.comment = ""
+ if self.explain:
+ rule.comment = str(refpolicy.Comment(explain_access(av, verbosity=self.explain)))
+ if av.type == audit2why.ALLOW:
+ rule.comment += "\n#!!!! This avc is allowed in the current policy"
+ if av.type == audit2why.DONTAUDIT:
+ rule.comment += "\n#!!!! This avc has a dontaudit rule in the current policy"
+
+ if av.type == audit2why.BOOLEAN:
+ if len(av.data) > 1:
+ rule.comment += "\n#!!!! This avc can be allowed using one of the these booleans:\n# %s" % ", ".join(map(lambda x: x[0], av.data))
+ else:
+ rule.comment += "\n#!!!! This avc can be allowed using the boolean '%s'" % av.data[0][0]
+
+ if av.type == audit2why.CONSTRAINT:
+ rule.comment += "\n#!!!! This avc is a constraint violation. You would need to modify the attributes of either the source or target types to allow this access."
+ rule.comment += "\n#Constraint rule: "
+ rule.comment += "\n\t" + av.data[0]
+ for reason in av.data[1:]:
+ rule.comment += "\n#\tPossible cause is the source %s and target %s are different." % reason
+
+ try:
+ if ( av.type == audit2why.TERULE and
+ "write" in av.perms and
+ ( "dir" in av.obj_class or "open" in av.perms )):
+ if not self.domains:
+ self.domains = seinfo(ATTRIBUTE, name="domain")[0]["types"]
+ types=[]
+
+ for i in map(lambda x: x[TCONTEXT], sesearch([ALLOW], {SCONTEXT: av.src_type, CLASS: av.obj_class, PERMS: av.perms})):
+ if i not in self.domains:
+ types.append(i)
+ if len(types) == 1:
+ rule.comment += "\n#!!!! The source type '%s' can write to a '%s' of the following type:\n# %s\n" % ( av.src_type, av.obj_class, ", ".join(types))
+ elif len(types) >= 1:
+ rule.comment += "\n#!!!! The source type '%s' can write to a '%s' of the following types:\n# %s\n" % ( av.src_type, av.obj_class, ", ".join(types))
+ except:
+ pass
+ self.module.children.append(rule)
+
+
+ def add_access(self, av_set):
+ """Add the access from the access vector set to this
+ module.
+ """
+ # Use the interface generator to split the access
+ # into raw allow rules and interfaces. After this
+ # a will contain a list of access that should be
+ # used as raw allow rules and the interfaces will
+ # be added to the module.
+ if self.ifgen:
+ raw_allow, ifcalls = self.ifgen.gen(av_set, self.explain)
+ self.module.children.extend(ifcalls)
+ else:
+ raw_allow = av_set
+
+ # Generate the raw allow rules from the filtered list
+ self.__add_allow_rules(raw_allow)
+
+ def add_role_types(self, role_type_set):
+ for role_type in role_type_set:
+ self.module.children.append(role_type)
+
+def explain_access(av, ml=None, verbosity=SHORT_EXPLANATION):
+ """Explain why a policy statement was generated.
+
+ Return a string containing a text explanation of
+ why a policy statement was generated. The string is
+ commented and wrapped and can be directly inserted
+ into a policy.
+
+ Params:
+ av - access vector representing the access. Should
+ have .audit_msgs set appropriately.
+ verbosity - the amount of explanation provided. Should
+ be set to NO_EXPLANATION, SHORT_EXPLANATION, or
+ LONG_EXPLANATION.
+ Returns:
+ list of strings - strings explaining the access or an empty
+ string if verbosity=NO_EXPLANATION or there is not sufficient
+ information to provide an explanation.
+ """
+ s = []
+
+ def explain_interfaces():
+ if not ml:
+ return
+ s.append(" Interface options:")
+ for match in ml.all():
+ ifcall = call_interface(match.interface, ml.av)
+ s.append(' %s # [%d]' % (ifcall.to_string(), match.dist))
+
+
+ # Format the raw audit data to explain why the
+ # access was requested - either long or short.
+ if verbosity == LONG_EXPLANATION:
+ for msg in av.audit_msgs:
+ s.append(' %s' % msg.header)
+ s.append(' scontext="%s" tcontext="%s"' %
+ (str(msg.scontext), str(msg.tcontext)))
+ s.append(' class="%s" perms="%s"' %
+ (msg.tclass, refpolicy.list_to_space_str(msg.accesses)))
+ s.append(' comm="%s" exe="%s" path="%s"' % (msg.comm, msg.exe, msg.path))
+ s.extend(textwrap.wrap('message="' + msg.message + '"', 80, initial_indent=" ",
+ subsequent_indent=" "))
+ explain_interfaces()
+ elif verbosity:
+ s.append(' src="%s" tgt="%s" class="%s", perms="%s"' %
+ (av.src_type, av.tgt_type, av.obj_class, av.perms.to_space_str()))
+ # For the short display we are only going to use the additional information
+ # from the first audit message. For the vast majority of cases this info
+ # will always be the same anyway.
+ if len(av.audit_msgs) > 0:
+ msg = av.audit_msgs[0]
+ s.append(' comm="%s" exe="%s" path="%s"' % (msg.comm, msg.exe, msg.path))
+ explain_interfaces()
+ return s
+
+def param_comp(a, b):
+ return cmp(b.num, a.num)
+
+def call_interface(interface, av):
+ params = []
+ args = []
+
+ params.extend(interface.params.values())
+ params.sort(param_comp)
+
+ ifcall = refpolicy.InterfaceCall()
+ ifcall.ifname = interface.name
+
+ for i in range(len(params)):
+ if params[i].type == refpolicy.SRC_TYPE:
+ ifcall.args.append(av.src_type)
+ elif params[i].type == refpolicy.TGT_TYPE:
+ ifcall.args.append(av.tgt_type)
+ elif params[i].type == refpolicy.OBJ_CLASS:
+ ifcall.args.append(av.obj_class)
+ else:
+ print params[i].type
+ assert(0)
+
+ assert(len(ifcall.args) > 0)
+
+ return ifcall
+
+class InterfaceGenerator:
+ def __init__(self, ifs, perm_maps=None):
+ self.ifs = ifs
+ self.hack_check_ifs(ifs)
+ self.matcher = matching.AccessMatcher(perm_maps)
+ self.calls = []
+
+ def hack_check_ifs(self, ifs):
+ # FIXME: Disable interfaces we can't call - this is a hack.
+ # Because we don't handle roles, multiple paramaters, etc.,
+ # etc., we must make certain we can actually use a returned
+ # interface.
+ for x in ifs.interfaces.values():
+ params = []
+ params.extend(x.params.values())
+ params.sort(param_comp)
+ for i in range(len(params)):
+ # Check that the paramater position matches
+ # the number (e.g., $1 is the first arg). This
+ # will fail if the parser missed something.
+ if (i + 1) != params[i].num:
+ x.enabled = False
+ break
+ # Check that we can handle the param type (currently excludes
+ # roles.
+ if params[i].type not in [refpolicy.SRC_TYPE, refpolicy.TGT_TYPE,
+ refpolicy.OBJ_CLASS]:
+ x.enabled = False
+ break
+
+ def gen(self, avs, verbosity):
+ raw_av = self.match(avs)
+ ifcalls = []
+ for ml in self.calls:
+ ifcall = call_interface(ml.best().interface, ml.av)
+ if verbosity:
+ ifcall.comment = refpolicy.Comment(explain_access(ml.av, ml, verbosity))
+ ifcalls.append((ifcall, ml))
+
+ d = []
+ for ifcall, ifs in ifcalls:
+ found = False
+ for o_ifcall in d:
+ if o_ifcall.matches(ifcall):
+ if o_ifcall.comment and ifcall.comment:
+ o_ifcall.comment.merge(ifcall.comment)
+ found = True
+ if not found:
+ d.append(ifcall)
+
+ return (raw_av, d)
+
+
+ def match(self, avs):
+ raw_av = []
+ for av in avs:
+ ans = matching.MatchList()
+ self.matcher.search_ifs(self.ifs, av, ans)
+ if len(ans):
+ self.calls.append(ans)
+ else:
+ raw_av.append(av)
+
+ return raw_av
+
+
+def gen_requires(module):
+ """Add require statements to the module.
+ """
+ def collect_requires(node):
+ r = refpolicy.Require()
+ for avrule in node.avrules():
+ r.types.update(avrule.src_types)
+ r.types.update(avrule.tgt_types)
+ for obj in avrule.obj_classes:
+ r.add_obj_class(obj, avrule.perms)
+
+ for ifcall in node.interface_calls():
+ for arg in ifcall.args:
+ # FIXME - handle non-type arguments when we
+ # can actually figure those out.
+ r.types.add(arg)
+
+ for role_type in node.role_types():
+ r.roles.add(role_type.role)
+ r.types.update(role_type.types)
+
+ r.types.discard("self")
+
+ node.children.insert(0, r)
+
+ # FUTURE - this is untested on modules with any sort of
+ # nesting
+ for node in module.nodes():
+ collect_requires(node)
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/sepolgen/refparser.py b/lib/python2.7/site-packages/setoolsgui/sepolgen/refparser.py
new file mode 100644
index 0000000..83542d3
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/sepolgen/refparser.py
@@ -0,0 +1,1128 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006-2007 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+# OVERVIEW
+#
+#
+# This is a parser for the refpolicy policy "language" - i.e., the
+# normal SELinux policy language plus the refpolicy style M4 macro
+# constructs on top of that base language. This parser is primarily
+# aimed at parsing the policy headers in order to create an abstract
+# policy representation suitable for generating policy.
+#
+# Both the lexer and parser are included in this file. The are implemented
+# using the Ply library (included with sepolgen).
+
+import sys
+import os
+import re
+import traceback
+
+import refpolicy
+import access
+import defaults
+
+import lex
+import yacc
+
+# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+#
+# lexer
+#
+# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+
+tokens = (
+ # basic tokens, punctuation
+ 'TICK',
+ 'SQUOTE',
+ 'OBRACE',
+ 'CBRACE',
+ 'SEMI',
+ 'COLON',
+ 'OPAREN',
+ 'CPAREN',
+ 'COMMA',
+ 'MINUS',
+ 'TILDE',
+ 'ASTERISK',
+ 'AMP',
+ 'BAR',
+ 'EXPL',
+ 'EQUAL',
+ 'FILENAME',
+ 'IDENTIFIER',
+ 'NUMBER',
+ 'PATH',
+ 'IPV6_ADDR',
+ # reserved words
+ # module
+ 'MODULE',
+ 'POLICY_MODULE',
+ 'REQUIRE',
+ # flask
+ 'SID',
+ 'GENFSCON',
+ 'FS_USE_XATTR',
+ 'FS_USE_TRANS',
+ 'FS_USE_TASK',
+ 'PORTCON',
+ 'NODECON',
+ 'NETIFCON',
+ 'PIRQCON',
+ 'IOMEMCON',
+ 'IOPORTCON',
+ 'PCIDEVICECON',
+ 'DEVICETREECON',
+ # object classes
+ 'CLASS',
+ # types and attributes
+ 'TYPEATTRIBUTE',
+ 'ROLEATTRIBUTE',
+ 'TYPE',
+ 'ATTRIBUTE',
+ 'ATTRIBUTE_ROLE',
+ 'ALIAS',
+ 'TYPEALIAS',
+ # conditional policy
+ 'BOOL',
+ 'TRUE',
+ 'FALSE',
+ 'IF',
+ 'ELSE',
+ # users and roles
+ 'ROLE',
+ 'TYPES',
+ # rules
+ 'ALLOW',
+ 'DONTAUDIT',
+ 'AUDITALLOW',
+ 'NEVERALLOW',
+ 'PERMISSIVE',
+ 'TYPE_TRANSITION',
+ 'TYPE_CHANGE',
+ 'TYPE_MEMBER',
+ 'RANGE_TRANSITION',
+ 'ROLE_TRANSITION',
+ # refpolicy keywords
+ 'OPT_POLICY',
+ 'INTERFACE',
+ 'TUNABLE_POLICY',
+ 'GEN_REQ',
+ 'TEMPLATE',
+ 'GEN_CONTEXT',
+ # m4
+ 'IFELSE',
+ 'IFDEF',
+ 'IFNDEF',
+ 'DEFINE'
+ )
+
+# All reserved keywords - see t_IDENTIFIER for how these are matched in
+# the lexer.
+reserved = {
+ # module
+ 'module' : 'MODULE',
+ 'policy_module' : 'POLICY_MODULE',
+ 'require' : 'REQUIRE',
+ # flask
+ 'sid' : 'SID',
+ 'genfscon' : 'GENFSCON',
+ 'fs_use_xattr' : 'FS_USE_XATTR',
+ 'fs_use_trans' : 'FS_USE_TRANS',
+ 'fs_use_task' : 'FS_USE_TASK',
+ 'portcon' : 'PORTCON',
+ 'nodecon' : 'NODECON',
+ 'netifcon' : 'NETIFCON',
+ 'pirqcon' : 'PIRQCON',
+ 'iomemcon' : 'IOMEMCON',
+ 'ioportcon' : 'IOPORTCON',
+ 'pcidevicecon' : 'PCIDEVICECON',
+ 'devicetreecon' : 'DEVICETREECON',
+ # object classes
+ 'class' : 'CLASS',
+ # types and attributes
+ 'typeattribute' : 'TYPEATTRIBUTE',
+ 'roleattribute' : 'ROLEATTRIBUTE',
+ 'type' : 'TYPE',
+ 'attribute' : 'ATTRIBUTE',
+ 'attribute_role' : 'ATTRIBUTE_ROLE',
+ 'alias' : 'ALIAS',
+ 'typealias' : 'TYPEALIAS',
+ # conditional policy
+ 'bool' : 'BOOL',
+ 'true' : 'TRUE',
+ 'false' : 'FALSE',
+ 'if' : 'IF',
+ 'else' : 'ELSE',
+ # users and roles
+ 'role' : 'ROLE',
+ 'types' : 'TYPES',
+ # rules
+ 'allow' : 'ALLOW',
+ 'dontaudit' : 'DONTAUDIT',
+ 'auditallow' : 'AUDITALLOW',
+ 'neverallow' : 'NEVERALLOW',
+ 'permissive' : 'PERMISSIVE',
+ 'type_transition' : 'TYPE_TRANSITION',
+ 'type_change' : 'TYPE_CHANGE',
+ 'type_member' : 'TYPE_MEMBER',
+ 'range_transition' : 'RANGE_TRANSITION',
+ 'role_transition' : 'ROLE_TRANSITION',
+ # refpolicy keywords
+ 'optional_policy' : 'OPT_POLICY',
+ 'interface' : 'INTERFACE',
+ 'tunable_policy' : 'TUNABLE_POLICY',
+ 'gen_require' : 'GEN_REQ',
+ 'template' : 'TEMPLATE',
+ 'gen_context' : 'GEN_CONTEXT',
+ # M4
+ 'ifelse' : 'IFELSE',
+ 'ifndef' : 'IFNDEF',
+ 'ifdef' : 'IFDEF',
+ 'define' : 'DEFINE'
+ }
+
+# The ply lexer allows definition of tokens in 2 ways: regular expressions
+# or functions.
+
+# Simple regex tokens
+t_TICK = r'\`'
+t_SQUOTE = r'\''
+t_OBRACE = r'\{'
+t_CBRACE = r'\}'
+# This will handle spurios extra ';' via the +
+t_SEMI = r'\;+'
+t_COLON = r'\:'
+t_OPAREN = r'\('
+t_CPAREN = r'\)'
+t_COMMA = r'\,'
+t_MINUS = r'\-'
+t_TILDE = r'\~'
+t_ASTERISK = r'\*'
+t_AMP = r'\&'
+t_BAR = r'\|'
+t_EXPL = r'\!'
+t_EQUAL = r'\='
+t_NUMBER = r'[0-9\.]+'
+t_PATH = r'/[a-zA-Z0-9)_\.\*/]*'
+#t_IPV6_ADDR = r'[a-fA-F0-9]{0,4}:[a-fA-F0-9]{0,4}:([a-fA-F0-9]{0,4}:)*'
+
+# Ignore whitespace - this is a special token for ply that more efficiently
+# ignores uninteresting tokens.
+t_ignore = " \t"
+
+# More complex tokens
+def t_IPV6_ADDR(t):
+ r'[a-fA-F0-9]{0,4}:[a-fA-F0-9]{0,4}:([a-fA-F0-9]|:)*'
+ # This is a function simply to force it sooner into
+ # the regex list
+ return t
+
+def t_m4comment(t):
+ r'dnl.*\n'
+ # Ignore all comments
+ t.lexer.lineno += 1
+
+def t_refpolicywarn1(t):
+ r'define.*refpolicywarn\(.*\n'
+ # Ignore refpolicywarn statements - they sometimes
+ # contain text that we can't parse.
+ t.skip(1)
+
+def t_refpolicywarn(t):
+ r'refpolicywarn\(.*\n'
+ # Ignore refpolicywarn statements - they sometimes
+ # contain text that we can't parse.
+ t.lexer.lineno += 1
+
+def t_IDENTIFIER(t):
+ r'[a-zA-Z_\$][a-zA-Z0-9_\-\+\.\$\*~]*'
+ # Handle any keywords
+ t.type = reserved.get(t.value,'IDENTIFIER')
+ return t
+
+def t_FILENAME(t):
+ r'\"[a-zA-Z0-9_\-\+\.\$\*~ :]+\"'
+ # Handle any keywords
+ t.type = reserved.get(t.value,'FILENAME')
+ return t
+
+def t_comment(t):
+ r'\#.*\n'
+ # Ignore all comments
+ t.lexer.lineno += 1
+
+def t_error(t):
+ print "Illegal character '%s'" % t.value[0]
+ t.skip(1)
+
+def t_newline(t):
+ r'\n+'
+ t.lexer.lineno += len(t.value)
+
+# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+#
+# Parser
+#
+# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+
+# Global data used during parsing - making it global is easier than
+# passing the state through the parsing functions.
+
+# m is the top-level data structure (stands for modules).
+m = None
+# error is either None (indicating no error) or a string error message.
+error = None
+parse_file = ""
+# spt is the support macros (e.g., obj/perm sets) - it is an instance of
+# refpolicy.SupportMacros and should always be present during parsing
+# though it may not contain any macros.
+spt = None
+success = True
+
+# utilities
+def collect(stmts, parent, val=None):
+ if stmts is None:
+ return
+ for s in stmts:
+ if s is None:
+ continue
+ s.parent = parent
+ if val is not None:
+ parent.children.insert(0, (val, s))
+ else:
+ parent.children.insert(0, s)
+
+def expand(ids, s):
+ for id in ids:
+ if spt.has_key(id):
+ s.update(spt.by_name(id))
+ else:
+ s.add(id)
+
+# Top-level non-terminal
+def p_statements(p):
+ '''statements : statement
+ | statements statement
+ | empty
+ '''
+ if len(p) == 2 and p[1]:
+ m.children.append(p[1])
+ elif len(p) > 2 and p[2]:
+ m.children.append(p[2])
+
+def p_statement(p):
+ '''statement : interface
+ | template
+ | obj_perm_set
+ | policy
+ | policy_module_stmt
+ | module_stmt
+ '''
+ p[0] = p[1]
+
+def p_empty(p):
+ 'empty :'
+ pass
+
+#
+# Reference policy language constructs
+#
+
+# This is for the policy module statement (e.g., policy_module(foo,1.2.0)).
+# We have a separate terminal for either the basic language module statement
+# and interface calls to make it easier to identifier.
+def p_policy_module_stmt(p):
+ 'policy_module_stmt : POLICY_MODULE OPAREN IDENTIFIER COMMA NUMBER CPAREN'
+ m = refpolicy.ModuleDeclaration()
+ m.name = p[3]
+ m.version = p[5]
+ m.refpolicy = True
+ p[0] = m
+
+def p_interface(p):
+ '''interface : INTERFACE OPAREN TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
+ '''
+ x = refpolicy.Interface(p[4])
+ collect(p[8], x)
+ p[0] = x
+
+def p_template(p):
+ '''template : TEMPLATE OPAREN TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
+ | DEFINE OPAREN TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
+ '''
+ x = refpolicy.Template(p[4])
+ collect(p[8], x)
+ p[0] = x
+
+def p_define(p):
+ '''define : DEFINE OPAREN TICK IDENTIFIER SQUOTE CPAREN'''
+ # This is for defining single M4 values (to be used later in ifdef statements).
+ # Example: define(`sulogin_no_pam'). We don't currently do anything with these
+ # but we should in the future when we correctly resolve ifdef statements.
+ p[0] = None
+
+def p_interface_stmts(p):
+ '''interface_stmts : policy
+ | interface_stmts policy
+ | empty
+ '''
+ if len(p) == 2 and p[1]:
+ p[0] = p[1]
+ elif len(p) > 2:
+ if not p[1]:
+ if p[2]:
+ p[0] = p[2]
+ elif not p[2]:
+ p[0] = p[1]
+ else:
+ p[0] = p[1] + p[2]
+
+def p_optional_policy(p):
+ '''optional_policy : OPT_POLICY OPAREN TICK interface_stmts SQUOTE CPAREN
+ | OPT_POLICY OPAREN TICK interface_stmts SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
+ '''
+ o = refpolicy.OptionalPolicy()
+ collect(p[4], o, val=True)
+ if len(p) > 7:
+ collect(p[8], o, val=False)
+ p[0] = [o]
+
+def p_tunable_policy(p):
+ '''tunable_policy : TUNABLE_POLICY OPAREN TICK cond_expr SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
+ | TUNABLE_POLICY OPAREN TICK cond_expr SQUOTE COMMA TICK interface_stmts SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
+ '''
+ x = refpolicy.TunablePolicy()
+ x.cond_expr = p[4]
+ collect(p[8], x, val=True)
+ if len(p) > 11:
+ collect(p[12], x, val=False)
+ p[0] = [x]
+
+def p_ifelse(p):
+ '''ifelse : IFELSE OPAREN TICK IDENTIFIER SQUOTE COMMA COMMA TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN optional_semi
+ | IFELSE OPAREN TICK IDENTIFIER SQUOTE COMMA TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN optional_semi
+ '''
+# x = refpolicy.IfDef(p[4])
+# v = True
+# collect(p[8], x, val=v)
+# if len(p) > 12:
+# collect(p[12], x, val=False)
+# p[0] = [x]
+ pass
+
+
+def p_ifdef(p):
+ '''ifdef : IFDEF OPAREN TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN optional_semi
+ | IFNDEF OPAREN TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN optional_semi
+ | IFDEF OPAREN TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN optional_semi
+ '''
+ x = refpolicy.IfDef(p[4])
+ if p[1] == 'ifdef':
+ v = True
+ else:
+ v = False
+ collect(p[8], x, val=v)
+ if len(p) > 12:
+ collect(p[12], x, val=False)
+ p[0] = [x]
+
+def p_interface_call(p):
+ '''interface_call : IDENTIFIER OPAREN interface_call_param_list CPAREN
+ | IDENTIFIER OPAREN CPAREN
+ | IDENTIFIER OPAREN interface_call_param_list CPAREN SEMI'''
+ # Allow spurious semi-colons at the end of interface calls
+ i = refpolicy.InterfaceCall(ifname=p[1])
+ if len(p) > 4:
+ i.args.extend(p[3])
+ p[0] = i
+
+def p_interface_call_param(p):
+ '''interface_call_param : IDENTIFIER
+ | IDENTIFIER MINUS IDENTIFIER
+ | nested_id_set
+ | TRUE
+ | FALSE
+ | FILENAME
+ '''
+ # Intentionally let single identifiers pass through
+ # List means set, non-list identifier
+ if len(p) == 2:
+ p[0] = p[1]
+ else:
+ p[0] = [p[1], "-" + p[3]]
+
+def p_interface_call_param_list(p):
+ '''interface_call_param_list : interface_call_param
+ | interface_call_param_list COMMA interface_call_param
+ '''
+ if len(p) == 2:
+ p[0] = [p[1]]
+ else:
+ p[0] = p[1] + [p[3]]
+
+
+def p_obj_perm_set(p):
+ 'obj_perm_set : DEFINE OPAREN TICK IDENTIFIER SQUOTE COMMA TICK names SQUOTE CPAREN'
+ s = refpolicy.ObjPermSet(p[4])
+ s.perms = p[8]
+ p[0] = s
+
+#
+# Basic SELinux policy language
+#
+
+def p_policy(p):
+ '''policy : policy_stmt
+ | optional_policy
+ | tunable_policy
+ | ifdef
+ | ifelse
+ | conditional
+ '''
+ p[0] = p[1]
+
+def p_policy_stmt(p):
+ '''policy_stmt : gen_require
+ | avrule_def
+ | typerule_def
+ | typeattribute_def
+ | roleattribute_def
+ | interface_call
+ | role_def
+ | role_allow
+ | permissive
+ | type_def
+ | typealias_def
+ | attribute_def
+ | attribute_role_def
+ | range_transition_def
+ | role_transition_def
+ | bool
+ | define
+ | initial_sid
+ | genfscon
+ | fs_use
+ | portcon
+ | nodecon
+ | netifcon
+ | pirqcon
+ | iomemcon
+ | ioportcon
+ | pcidevicecon
+ | devicetreecon
+ '''
+ if p[1]:
+ p[0] = [p[1]]
+
+def p_module_stmt(p):
+ 'module_stmt : MODULE IDENTIFIER NUMBER SEMI'
+ m = refpolicy.ModuleDeclaration()
+ m.name = p[2]
+ m.version = p[3]
+ m.refpolicy = False
+ p[0] = m
+
+def p_gen_require(p):
+ '''gen_require : GEN_REQ OPAREN TICK requires SQUOTE CPAREN
+ | REQUIRE OBRACE requires CBRACE'''
+ # We ignore the require statements - they are redundant data from our point-of-view.
+ # Checkmodule will verify them later anyway so we just assume that they match what
+ # is in the rest of the interface.
+ pass
+
+def p_requires(p):
+ '''requires : require
+ | requires require
+ | ifdef
+ | requires ifdef
+ '''
+ pass
+
+def p_require(p):
+ '''require : TYPE comma_list SEMI
+ | ROLE comma_list SEMI
+ | ATTRIBUTE comma_list SEMI
+ | ATTRIBUTE_ROLE comma_list SEMI
+ | CLASS comma_list SEMI
+ | BOOL comma_list SEMI
+ '''
+ pass
+
+def p_security_context(p):
+ '''security_context : IDENTIFIER COLON IDENTIFIER COLON IDENTIFIER
+ | IDENTIFIER COLON IDENTIFIER COLON IDENTIFIER COLON mls_range_def'''
+ # This will likely need some updates to handle complex levels
+ s = refpolicy.SecurityContext()
+ s.user = p[1]
+ s.role = p[3]
+ s.type = p[5]
+ if len(p) > 6:
+ s.level = p[7]
+
+ p[0] = s
+
+def p_gen_context(p):
+ '''gen_context : GEN_CONTEXT OPAREN security_context COMMA mls_range_def CPAREN
+ '''
+ # We actually store gen_context statements in a SecurityContext
+ # object - it knows how to output either a bare context or a
+ # gen_context statement.
+ s = p[3]
+ s.level = p[5]
+
+ p[0] = s
+
+def p_context(p):
+ '''context : security_context
+ | gen_context
+ '''
+ p[0] = p[1]
+
+def p_initial_sid(p):
+ '''initial_sid : SID IDENTIFIER context'''
+ s = refpolicy.InitialSid()
+ s.name = p[2]
+ s.context = p[3]
+ p[0] = s
+
+def p_genfscon(p):
+ '''genfscon : GENFSCON IDENTIFIER PATH context'''
+
+ g = refpolicy.GenfsCon()
+ g.filesystem = p[2]
+ g.path = p[3]
+ g.context = p[4]
+
+ p[0] = g
+
+def p_fs_use(p):
+ '''fs_use : FS_USE_XATTR IDENTIFIER context SEMI
+ | FS_USE_TASK IDENTIFIER context SEMI
+ | FS_USE_TRANS IDENTIFIER context SEMI
+ '''
+ f = refpolicy.FilesystemUse()
+ if p[1] == "fs_use_xattr":
+ f.type = refpolicy.FilesystemUse.XATTR
+ elif p[1] == "fs_use_task":
+ f.type = refpolicy.FilesystemUse.TASK
+ elif p[1] == "fs_use_trans":
+ f.type = refpolicy.FilesystemUse.TRANS
+
+ f.filesystem = p[2]
+ f.context = p[3]
+
+ p[0] = f
+
+def p_portcon(p):
+ '''portcon : PORTCON IDENTIFIER NUMBER context
+ | PORTCON IDENTIFIER NUMBER MINUS NUMBER context'''
+ c = refpolicy.PortCon()
+ c.port_type = p[2]
+ if len(p) == 5:
+ c.port_number = p[3]
+ c.context = p[4]
+ else:
+ c.port_number = p[3] + "-" + p[4]
+ c.context = p[5]
+
+ p[0] = c
+
+def p_nodecon(p):
+ '''nodecon : NODECON NUMBER NUMBER context
+ | NODECON IPV6_ADDR IPV6_ADDR context
+ '''
+ n = refpolicy.NodeCon()
+ n.start = p[2]
+ n.end = p[3]
+ n.context = p[4]
+
+ p[0] = n
+
+def p_netifcon(p):
+ 'netifcon : NETIFCON IDENTIFIER context context'
+ n = refpolicy.NetifCon()
+ n.interface = p[2]
+ n.interface_context = p[3]
+ n.packet_context = p[4]
+
+ p[0] = n
+
+def p_pirqcon(p):
+ 'pirqcon : PIRQCON NUMBER context'
+ c = refpolicy.PirqCon()
+ c.pirq_number = p[2]
+ c.context = p[3]
+
+ p[0] = c
+
+def p_iomemcon(p):
+ '''iomemcon : IOMEMCON NUMBER context
+ | IOMEMCON NUMBER MINUS NUMBER context'''
+ c = refpolicy.IomemCon()
+ if len(p) == 4:
+ c.device_mem = p[2]
+ c.context = p[3]
+ else:
+ c.device_mem = p[2] + "-" + p[3]
+ c.context = p[4]
+
+ p[0] = c
+
+def p_ioportcon(p):
+ '''ioportcon : IOPORTCON NUMBER context
+ | IOPORTCON NUMBER MINUS NUMBER context'''
+ c = refpolicy.IoportCon()
+ if len(p) == 4:
+ c.ioport = p[2]
+ c.context = p[3]
+ else:
+ c.ioport = p[2] + "-" + p[3]
+ c.context = p[4]
+
+ p[0] = c
+
+def p_pcidevicecon(p):
+ 'pcidevicecon : PCIDEVICECON NUMBER context'
+ c = refpolicy.PciDeviceCon()
+ c.device = p[2]
+ c.context = p[3]
+
+ p[0] = c
+
+def p_devicetreecon(p):
+ 'devicetreecon : DEVICETREECON NUMBER context'
+ c = refpolicy.DevicetTeeCon()
+ c.path = p[2]
+ c.context = p[3]
+
+ p[0] = c
+
+def p_mls_range_def(p):
+ '''mls_range_def : mls_level_def MINUS mls_level_def
+ | mls_level_def
+ '''
+ p[0] = p[1]
+ if len(p) > 2:
+ p[0] = p[0] + "-" + p[3]
+
+def p_mls_level_def(p):
+ '''mls_level_def : IDENTIFIER COLON comma_list
+ | IDENTIFIER
+ '''
+ p[0] = p[1]
+ if len(p) > 2:
+ p[0] = p[0] + ":" + ",".join(p[3])
+
+def p_type_def(p):
+ '''type_def : TYPE IDENTIFIER COMMA comma_list SEMI
+ | TYPE IDENTIFIER SEMI
+ | TYPE IDENTIFIER ALIAS names SEMI
+ | TYPE IDENTIFIER ALIAS names COMMA comma_list SEMI
+ '''
+ t = refpolicy.Type(p[2])
+ if len(p) == 6:
+ if p[3] == ',':
+ t.attributes.update(p[4])
+ else:
+ t.aliases = p[4]
+ elif len(p) > 4:
+ t.aliases = p[4]
+ if len(p) == 8:
+ t.attributes.update(p[6])
+ p[0] = t
+
+def p_attribute_def(p):
+ 'attribute_def : ATTRIBUTE IDENTIFIER SEMI'
+ a = refpolicy.Attribute(p[2])
+ p[0] = a
+
+def p_attribute_role_def(p):
+ 'attribute_role_def : ATTRIBUTE_ROLE IDENTIFIER SEMI'
+ a = refpolicy.Attribute_Role(p[2])
+ p[0] = a
+
+def p_typealias_def(p):
+ 'typealias_def : TYPEALIAS IDENTIFIER ALIAS names SEMI'
+ t = refpolicy.TypeAlias()
+ t.type = p[2]
+ t.aliases = p[4]
+ p[0] = t
+
+def p_role_def(p):
+ '''role_def : ROLE IDENTIFIER TYPES comma_list SEMI
+ | ROLE IDENTIFIER SEMI'''
+ r = refpolicy.Role()
+ r.role = p[2]
+ if len(p) > 4:
+ r.types.update(p[4])
+ p[0] = r
+
+def p_role_allow(p):
+ 'role_allow : ALLOW names names SEMI'
+ r = refpolicy.RoleAllow()
+ r.src_roles = p[2]
+ r.tgt_roles = p[3]
+ p[0] = r
+
+def p_permissive(p):
+ 'permissive : PERMISSIVE names SEMI'
+ t.skip(1)
+
+def p_avrule_def(p):
+ '''avrule_def : ALLOW names names COLON names names SEMI
+ | DONTAUDIT names names COLON names names SEMI
+ | AUDITALLOW names names COLON names names SEMI
+ | NEVERALLOW names names COLON names names SEMI
+ '''
+ a = refpolicy.AVRule()
+ if p[1] == 'dontaudit':
+ a.rule_type = refpolicy.AVRule.DONTAUDIT
+ elif p[1] == 'auditallow':
+ a.rule_type = refpolicy.AVRule.AUDITALLOW
+ elif p[1] == 'neverallow':
+ a.rule_type = refpolicy.AVRule.NEVERALLOW
+ a.src_types = p[2]
+ a.tgt_types = p[3]
+ a.obj_classes = p[5]
+ a.perms = p[6]
+ p[0] = a
+
+def p_typerule_def(p):
+ '''typerule_def : TYPE_TRANSITION names names COLON names IDENTIFIER SEMI
+ | TYPE_TRANSITION names names COLON names IDENTIFIER FILENAME SEMI
+ | TYPE_TRANSITION names names COLON names IDENTIFIER IDENTIFIER SEMI
+ | TYPE_CHANGE names names COLON names IDENTIFIER SEMI
+ | TYPE_MEMBER names names COLON names IDENTIFIER SEMI
+ '''
+ t = refpolicy.TypeRule()
+ if p[1] == 'type_change':
+ t.rule_type = refpolicy.TypeRule.TYPE_CHANGE
+ elif p[1] == 'type_member':
+ t.rule_type = refpolicy.TypeRule.TYPE_MEMBER
+ t.src_types = p[2]
+ t.tgt_types = p[3]
+ t.obj_classes = p[5]
+ t.dest_type = p[6]
+ t.file_name = p[7]
+ p[0] = t
+
+def p_bool(p):
+ '''bool : BOOL IDENTIFIER TRUE SEMI
+ | BOOL IDENTIFIER FALSE SEMI'''
+ b = refpolicy.Bool()
+ b.name = p[2]
+ if p[3] == "true":
+ b.state = True
+ else:
+ b.state = False
+ p[0] = b
+
+def p_conditional(p):
+ ''' conditional : IF OPAREN cond_expr CPAREN OBRACE interface_stmts CBRACE
+ | IF OPAREN cond_expr CPAREN OBRACE interface_stmts CBRACE ELSE OBRACE interface_stmts CBRACE
+ '''
+ c = refpolicy.Conditional()
+ c.cond_expr = p[3]
+ collect(p[6], c, val=True)
+ if len(p) > 8:
+ collect(p[10], c, val=False)
+ p[0] = [c]
+
+def p_typeattribute_def(p):
+ '''typeattribute_def : TYPEATTRIBUTE IDENTIFIER comma_list SEMI'''
+ t = refpolicy.TypeAttribute()
+ t.type = p[2]
+ t.attributes.update(p[3])
+ p[0] = t
+
+def p_roleattribute_def(p):
+ '''roleattribute_def : ROLEATTRIBUTE IDENTIFIER comma_list SEMI'''
+ t = refpolicy.RoleAttribute()
+ t.role = p[2]
+ t.roleattributes.update(p[3])
+ p[0] = t
+
+def p_range_transition_def(p):
+ '''range_transition_def : RANGE_TRANSITION names names COLON names mls_range_def SEMI
+ | RANGE_TRANSITION names names names SEMI'''
+ pass
+
+def p_role_transition_def(p):
+ '''role_transition_def : ROLE_TRANSITION names names names SEMI'''
+ pass
+
+def p_cond_expr(p):
+ '''cond_expr : IDENTIFIER
+ | EXPL cond_expr
+ | cond_expr AMP AMP cond_expr
+ | cond_expr BAR BAR cond_expr
+ | cond_expr EQUAL EQUAL cond_expr
+ | cond_expr EXPL EQUAL cond_expr
+ '''
+ l = len(p)
+ if l == 2:
+ p[0] = [p[1]]
+ elif l == 3:
+ p[0] = [p[1]] + p[2]
+ else:
+ p[0] = p[1] + [p[2] + p[3]] + p[4]
+
+
+#
+# Basic terminals
+#
+
+# Identifiers and lists of identifiers. These must
+# be handled somewhat gracefully. Names returns an IdSet and care must
+# be taken that this is _assigned_ to an object to correctly update
+# all of the flags (as opposed to using update). The other terminals
+# return list - this is to preserve ordering if it is important for
+# parsing (for example, interface_call must retain the ordering). Other
+# times the list should be used to update an IdSet.
+
+def p_names(p):
+ '''names : identifier
+ | nested_id_set
+ | asterisk
+ | TILDE identifier
+ | TILDE nested_id_set
+ | IDENTIFIER MINUS IDENTIFIER
+ '''
+ s = refpolicy.IdSet()
+ if len(p) < 3:
+ expand(p[1], s)
+ elif len(p) == 3:
+ expand(p[2], s)
+ s.compliment = True
+ else:
+ expand([p[1]])
+ s.add("-" + p[3])
+ p[0] = s
+
+def p_identifier(p):
+ 'identifier : IDENTIFIER'
+ p[0] = [p[1]]
+
+def p_asterisk(p):
+ 'asterisk : ASTERISK'
+ p[0] = [p[1]]
+
+def p_nested_id_set(p):
+ '''nested_id_set : OBRACE nested_id_list CBRACE
+ '''
+ p[0] = p[2]
+
+def p_nested_id_list(p):
+ '''nested_id_list : nested_id_element
+ | nested_id_list nested_id_element
+ '''
+ if len(p) == 2:
+ p[0] = p[1]
+ else:
+ p[0] = p[1] + p[2]
+
+def p_nested_id_element(p):
+ '''nested_id_element : identifier
+ | MINUS IDENTIFIER
+ | nested_id_set
+ '''
+ if len(p) == 2:
+ p[0] = p[1]
+ else:
+ # For now just leave the '-'
+ str = "-" + p[2]
+ p[0] = [str]
+
+def p_comma_list(p):
+ '''comma_list : nested_id_list
+ | comma_list COMMA nested_id_list
+ '''
+ if len(p) > 2:
+ p[1] = p[1] + p[3]
+ p[0] = p[1]
+
+def p_optional_semi(p):
+ '''optional_semi : SEMI
+ | empty'''
+ pass
+
+
+#
+# Interface to the parser
+#
+
+def p_error(tok):
+ global error, parse_file, success, parser
+ error = "%s: Syntax error on line %d %s [type=%s]" % (parse_file, tok.lineno, tok.value, tok.type)
+ print error
+ success = False
+
+def prep_spt(spt):
+ if not spt:
+ return { }
+ map = {}
+ for x in spt:
+ map[x.name] = x
+
+parser = None
+lexer = None
+def create_globals(module, support, debug):
+ global parser, lexer, m, spt
+
+ if not parser:
+ lexer = lex.lex()
+ parser = yacc.yacc(method="LALR", debug=debug, write_tables=0)
+
+ if module is not None:
+ m = module
+ else:
+ m = refpolicy.Module()
+
+ if not support:
+ spt = refpolicy.SupportMacros()
+ else:
+ spt = support
+
+def parse(text, module=None, support=None, debug=False):
+ create_globals(module, support, debug)
+ global error, parser, lexer, success
+
+ success = True
+
+ try:
+ parser.parse(text, debug=debug, lexer=lexer)
+ except Exception, e:
+ parser = None
+ lexer = None
+ error = "internal parser error: %s" % str(e) + "\n" + traceback.format_exc()
+
+ if not success:
+ # force the parser and lexer to be rebuilt - we have some problems otherwise
+ parser = None
+ msg = 'could not parse text: "%s"' % error
+ raise ValueError(msg)
+ return m
+
+def list_headers(root):
+ modules = []
+ support_macros = None
+
+ for dirpath, dirnames, filenames in os.walk(root):
+ for name in filenames:
+ modname = os.path.splitext(name)
+ filename = os.path.join(dirpath, name)
+
+ if modname[1] == '.spt':
+ if name == "obj_perm_sets.spt":
+ support_macros = filename
+ elif len(re.findall("patterns", modname[0])):
+ modules.append((modname[0], filename))
+ elif modname[1] == '.if':
+ modules.append((modname[0], filename))
+
+ return (modules, support_macros)
+
+
+def parse_headers(root, output=None, expand=True, debug=False):
+ import util
+
+ headers = refpolicy.Headers()
+
+ modules = []
+ support_macros = None
+
+ if os.path.isfile(root):
+ name = os.path.split(root)[1]
+ if name == '':
+ raise ValueError("Invalid file name %s" % root)
+ modname = os.path.splitext(name)
+ modules.append((modname[0], root))
+ all_modules, support_macros = list_headers(defaults.headers())
+ else:
+ modules, support_macros = list_headers(root)
+
+ if expand and not support_macros:
+ raise ValueError("could not find support macros (obj_perm_sets.spt)")
+
+ def o(msg):
+ if output:
+ output.write(msg)
+
+ def parse_file(f, module, spt=None):
+ global parse_file
+ if debug:
+ o("parsing file %s\n" % f)
+ try:
+ fd = open(f)
+ txt = fd.read()
+ fd.close()
+ parse_file = f
+ parse(txt, module, spt, debug)
+ except IOError, e:
+ return
+ except ValueError, e:
+ raise ValueError("error parsing file %s: %s" % (f, str(e)))
+
+ spt = None
+ if support_macros:
+ o("Parsing support macros (%s): " % support_macros)
+ spt = refpolicy.SupportMacros()
+ parse_file(support_macros, spt)
+
+ headers.children.append(spt)
+
+ # FIXME: Total hack - add in can_exec rather than parse the insanity
+ # of misc_macros. We are just going to pretend that this is an interface
+ # to make the expansion work correctly.
+ can_exec = refpolicy.Interface("can_exec")
+ av = access.AccessVector(["$1","$2","file","execute_no_trans","open", "read",
+ "getattr","lock","execute","ioctl"])
+
+ can_exec.children.append(refpolicy.AVRule(av))
+ headers.children.append(can_exec)
+
+ o("done.\n")
+
+ if output and not debug:
+ status = util.ConsoleProgressBar(sys.stdout, steps=len(modules))
+ status.start("Parsing interface files")
+
+ failures = []
+ for x in modules:
+ m = refpolicy.Module()
+ m.name = x[0]
+ try:
+ if expand:
+ parse_file(x[1], m, spt)
+ else:
+ parse_file(x[1], m)
+ except ValueError, e:
+ o(str(e) + "\n")
+ failures.append(x[1])
+ continue
+
+ headers.children.append(m)
+ if output and not debug:
+ status.step()
+
+ if len(failures):
+ o("failed to parse some headers: %s" % ", ".join(failures))
+
+ return headers
diff --git a/lib/python2.7/site-packages/setoolsgui/sepolgen/refpolicy.py b/lib/python2.7/site-packages/setoolsgui/sepolgen/refpolicy.py
new file mode 100644
index 0000000..b8ed5c1
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/sepolgen/refpolicy.py
@@ -0,0 +1,917 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+import string
+import itertools
+import selinux
+
+# OVERVIEW
+#
+# This file contains objects and functions used to represent the reference
+# policy (including the headers, M4 macros, and policy language statements).
+#
+# This representation is very different from the semantic representation
+# used in libsepol. Instead, it is a more typical abstract representation
+# used by the first stage of compilers. It is basically a parse tree.
+#
+# This choice is intentional as it allows us to handle the unprocessed
+# M4 statements - including the $1 style arguments - and to more easily generate
+# the data structures that we need for policy generation.
+#
+
+# Constans for referring to fields
+SRC_TYPE = 0
+TGT_TYPE = 1
+OBJ_CLASS = 2
+PERMS = 3
+ROLE = 4
+DEST_TYPE = 5
+
+# String represenations of the above constants
+field_to_str = ["source", "target", "object", "permission", "role", "destination" ]
+str_to_field = { "source" : SRC_TYPE, "target" : TGT_TYPE, "object" : OBJ_CLASS,
+ "permission" : PERMS, "role" : ROLE, "destination" : DEST_TYPE }
+
+# Base Classes
+
+class PolicyBase:
+ def __init__(self, parent=None):
+ self.parent = None
+ self.comment = None
+
+class Node(PolicyBase):
+ """Base class objects produced from parsing the reference policy.
+
+ The Node class is used as the base class for any non-leaf
+ object produced by parsing the reference policy. This object
+ should contain a reference to its parent (or None for a top-level
+ object) and 0 or more children.
+
+ The general idea here is to have a very simple tree structure. Children
+ are not separated out by type. Instead the tree structure represents
+ fairly closely the real structure of the policy statements.
+
+ The object should be iterable - by default over all children but
+ subclasses are free to provide additional iterators over a subset
+ of their childre (see Interface for example).
+ """
+
+ def __init__(self, parent=None):
+ PolicyBase.__init__(self, parent)
+ self.children = []
+
+ def __iter__(self):
+ return iter(self.children)
+
+ # Not all of the iterators will return something on all Nodes, but
+ # they won't explode either. Putting them here is just easier.
+
+ # Top level nodes
+
+ def nodes(self):
+ return itertools.ifilter(lambda x: isinstance(x, Node), walktree(self))
+
+ def modules(self):
+ return itertools.ifilter(lambda x: isinstance(x, Module), walktree(self))
+
+ def interfaces(self):
+ return itertools.ifilter(lambda x: isinstance(x, Interface), walktree(self))
+
+ def templates(self):
+ return itertools.ifilter(lambda x: isinstance(x, Template), walktree(self))
+
+ def support_macros(self):
+ return itertools.ifilter(lambda x: isinstance(x, SupportMacros), walktree(self))
+
+ # Common policy statements
+
+ def module_declarations(self):
+ return itertools.ifilter(lambda x: isinstance(x, ModuleDeclaration), walktree(self))
+
+ def interface_calls(self):
+ return itertools.ifilter(lambda x: isinstance(x, InterfaceCall), walktree(self))
+
+ def avrules(self):
+ return itertools.ifilter(lambda x: isinstance(x, AVRule), walktree(self))
+
+ def typerules(self):
+ return itertools.ifilter(lambda x: isinstance(x, TypeRule), walktree(self))
+
+ def typeattributes(self):
+ """Iterate over all of the TypeAttribute children of this Interface."""
+ return itertools.ifilter(lambda x: isinstance(x, TypeAttribute), walktree(self))
+
+ def roleattributes(self):
+ """Iterate over all of the RoleAttribute children of this Interface."""
+ return itertools.ifilter(lambda x: isinstance(x, RoleAttribute), walktree(self))
+
+ def requires(self):
+ return itertools.ifilter(lambda x: isinstance(x, Require), walktree(self))
+
+ def roles(self):
+ return itertools.ifilter(lambda x: isinstance(x, Role), walktree(self))
+
+ def role_allows(self):
+ return itertools.ifilter(lambda x: isinstance(x, RoleAllow), walktree(self))
+
+ def role_types(self):
+ return itertools.ifilter(lambda x: isinstance(x, RoleType), walktree(self))
+
+ def __str__(self):
+ if self.comment:
+ return str(self.comment) + "\n" + self.to_string()
+ else:
+ return self.to_string()
+
+ def __repr__(self):
+ return "<%s(%s)>" % (self.__class__.__name__, self.to_string())
+
+ def to_string(self):
+ return ""
+
+
+class Leaf(PolicyBase):
+ def __init__(self, parent=None):
+ PolicyBase.__init__(self, parent)
+
+ def __str__(self):
+ if self.comment:
+ return str(self.comment) + "\n" + self.to_string()
+ else:
+ return self.to_string()
+
+ def __repr__(self):
+ return "<%s(%s)>" % (self.__class__.__name__, self.to_string())
+
+ def to_string(self):
+ return ""
+
+
+
+# Utility functions
+
+def walktree(node, depthfirst=True, showdepth=False, type=None):
+ """Iterate over a Node and its Children.
+
+ The walktree function iterates over a tree containing Nodes and
+ leaf objects. The iteration can perform a depth first or a breadth
+ first traversal of the tree (controlled by the depthfirst
+ paramater. The passed in node will be returned.
+
+ This function will only work correctly for trees - arbitrary graphs
+ will likely cause infinite looping.
+ """
+ # We control depth first / versus breadth first by
+ # how we pop items off of the node stack.
+ if depthfirst:
+ index = -1
+ else:
+ index = 0
+
+ stack = [(node, 0)]
+ while len(stack) > 0:
+ cur, depth = stack.pop(index)
+ if showdepth:
+ yield cur, depth
+ else:
+ yield cur
+
+ # If the node is not a Node instance it must
+ # be a leaf - so no need to add it to the stack
+ if isinstance(cur, Node):
+ items = []
+ i = len(cur.children) - 1
+ while i >= 0:
+ if type is None or isinstance(cur.children[i], type):
+ items.append((cur.children[i], depth + 1))
+ i -= 1
+
+ stack.extend(items)
+
+def walknode(node, type=None):
+ """Iterate over the direct children of a Node.
+
+ The walktree function iterates over the children of a Node.
+ Unlike walktree it does note return the passed in node or
+ the children of any Node objects (that is, it does not go
+ beyond the current level in the tree).
+ """
+ for x in node:
+ if type is None or isinstance(x, type):
+ yield x
+
+
+def list_to_space_str(s, cont=('{', '}')):
+ """Convert a set (or any sequence type) into a string representation
+ formatted to match SELinux space separated list conventions.
+
+ For example the list ['read', 'write'] would be converted into:
+ '{ read write }'
+ """
+ l = len(s)
+ str = ""
+ if l < 1:
+ raise ValueError("cannot convert 0 len set to string")
+ str = " ".join(s)
+ if l == 1:
+ return str
+ else:
+ return cont[0] + " " + str + " " + cont[1]
+
+def list_to_comma_str(s):
+ l = len(s)
+ if l < 1:
+ raise ValueError("cannot conver 0 len set to comma string")
+
+ return ", ".join(s)
+
+# Basic SELinux types
+
+class IdSet(set):
+ def __init__(self, list=None):
+ if list:
+ set.__init__(self, list)
+ else:
+ set.__init__(self)
+ self.compliment = False
+
+ def to_space_str(self):
+ return list_to_space_str(self)
+
+ def to_comma_str(self):
+ return list_to_comma_str(self)
+
+class SecurityContext(Leaf):
+ """An SELinux security context with optional MCS / MLS fields."""
+ def __init__(self, context=None, parent=None):
+ """Create a SecurityContext object, optionally from a string.
+
+ Parameters:
+ [context] - string representing a security context. Same format
+ as a string passed to the from_string method.
+ """
+ Leaf.__init__(self, parent)
+ self.user = ""
+ self.role = ""
+ self.type = ""
+ self.level = None
+ if context is not None:
+ self.from_string(context)
+
+ def from_string(self, context):
+ """Parse a string representing a context into a SecurityContext.
+
+ The string should be in the standard format - e.g.,
+ 'user:role:type:level'.
+
+ Raises ValueError if the string is not parsable as a security context.
+ """
+ fields = context.split(":")
+ if len(fields) < 3:
+ raise ValueError("context string [%s] not in a valid format" % context)
+
+ self.user = fields[0]
+ self.role = fields[1]
+ self.type = fields[2]
+ if len(fields) > 3:
+ # FUTURE - normalize level fields to allow more comparisons to succeed.
+ self.level = string.join(fields[3:], ':')
+ else:
+ self.level = None
+
+ def __eq__(self, other):
+ """Compare two SecurityContext objects - all fields must be exactly the
+ the same for the comparison to work. It is possible for the level fields
+ to be semantically the same yet syntactically different - in this case
+ this function will return false.
+ """
+ return self.user == other.user and \
+ self.role == other.role and \
+ self.type == other.type and \
+ self.level == other.level
+
+ def to_string(self, default_level=None):
+ """Return a string representing this security context.
+
+ By default, the string will contiain a MCS / MLS level
+ potentially from the default which is passed in if none was
+ set.
+
+ Arguments:
+ default_level - the default level to use if self.level is an
+ empty string.
+
+ Returns:
+ A string represening the security context in the form
+ 'user:role:type:level'.
+ """
+ fields = [self.user, self.role, self.type]
+ if self.level is None:
+ if default_level is None:
+ if selinux.is_selinux_mls_enabled() == 1:
+ fields.append("s0")
+ else:
+ fields.append(default_level)
+ else:
+ fields.append(self.level)
+ return ":".join(fields)
+
+class ObjectClass(Leaf):
+ """SELinux object class and permissions.
+
+ This class is a basic representation of an SELinux object
+ class - it does not represent separate common permissions -
+ just the union of the common and class specific permissions.
+ It is meant to be convenient for policy generation.
+ """
+ def __init__(self, name="", parent=None):
+ Leaf.__init__(self, parent)
+ self.name = name
+ self.perms = IdSet()
+
+# Basic statements
+
+class TypeAttribute(Leaf):
+ """SElinux typeattribute statement.
+
+ This class represents a typeattribute statement.
+ """
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.type = ""
+ self.attributes = IdSet()
+
+ def to_string(self):
+ return "typeattribute %s %s;" % (self.type, self.attributes.to_comma_str())
+
+class RoleAttribute(Leaf):
+ """SElinux roleattribute statement.
+
+ This class represents a roleattribute statement.
+ """
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.role = ""
+ self.roleattributes = IdSet()
+
+ def to_string(self):
+ return "roleattribute %s %s;" % (self.role, self.roleattributes.to_comma_str())
+
+
+class Role(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.role = ""
+ self.types = IdSet()
+
+ def to_string(self):
+ s = ""
+ for t in self.types:
+ s += "role %s types %s;\n" % (self.role, t)
+ return s
+
+class Type(Leaf):
+ def __init__(self, name="", parent=None):
+ Leaf.__init__(self, parent)
+ self.name = name
+ self.attributes = IdSet()
+ self.aliases = IdSet()
+
+ def to_string(self):
+ s = "type %s" % self.name
+ if len(self.aliases) > 0:
+ s = s + "alias %s" % self.aliases.to_space_str()
+ if len(self.attributes) > 0:
+ s = s + ", %s" % self.attributes.to_comma_str()
+ return s + ";"
+
+class TypeAlias(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.type = ""
+ self.aliases = IdSet()
+
+ def to_string(self):
+ return "typealias %s alias %s;" % (self.type, self.aliases.to_space_str())
+
+class Attribute(Leaf):
+ def __init__(self, name="", parent=None):
+ Leaf.__init__(self, parent)
+ self.name = name
+
+ def to_string(self):
+ return "attribute %s;" % self.name
+
+class Attribute_Role(Leaf):
+ def __init__(self, name="", parent=None):
+ Leaf.__init__(self, parent)
+ self.name = name
+
+ def to_string(self):
+ return "attribute_role %s;" % self.name
+
+
+# Classes representing rules
+
+class AVRule(Leaf):
+ """SELinux access vector (AV) rule.
+
+ The AVRule class represents all varieties of AV rules including
+ allow, dontaudit, and auditallow (indicated by the flags self.ALLOW,
+ self.DONTAUDIT, and self.AUDITALLOW respectively).
+
+ The source and target types, object classes, and perms are all represented
+ by sets containing strings. Sets are used to make it simple to add
+ strings repeatedly while avoiding duplicates.
+
+ No checking is done to make certain that the symbols are valid or
+ consistent (e.g., perms that don't match the object classes). It is
+ even possible to put invalid types like '$1' into the rules to allow
+ storage of the reference policy interfaces.
+ """
+ ALLOW = 0
+ DONTAUDIT = 1
+ AUDITALLOW = 2
+ NEVERALLOW = 3
+
+ def __init__(self, av=None, parent=None):
+ Leaf.__init__(self, parent)
+ self.src_types = IdSet()
+ self.tgt_types = IdSet()
+ self.obj_classes = IdSet()
+ self.perms = IdSet()
+ self.rule_type = self.ALLOW
+ if av:
+ self.from_av(av)
+
+ def __rule_type_str(self):
+ if self.rule_type == self.ALLOW:
+ return "allow"
+ elif self.rule_type == self.DONTAUDIT:
+ return "dontaudit"
+ else:
+ return "auditallow"
+
+ def from_av(self, av):
+ """Add the access from an access vector to this allow
+ rule.
+ """
+ self.src_types.add(av.src_type)
+ if av.src_type == av.tgt_type:
+ self.tgt_types.add("self")
+ else:
+ self.tgt_types.add(av.tgt_type)
+ self.obj_classes.add(av.obj_class)
+ self.perms.update(av.perms)
+
+ def to_string(self):
+ """Return a string representation of the rule
+ that is a valid policy language representation (assuming
+ that the types, object class, etc. are valie).
+ """
+ return "%s %s %s:%s %s;" % (self.__rule_type_str(),
+ self.src_types.to_space_str(),
+ self.tgt_types.to_space_str(),
+ self.obj_classes.to_space_str(),
+ self.perms.to_space_str())
+class TypeRule(Leaf):
+ """SELinux type rules.
+
+ This class is very similar to the AVRule class, but is for representing
+ the type rules (type_trans, type_change, and type_member). The major
+ difference is the lack of perms and only and sing destination type.
+ """
+ TYPE_TRANSITION = 0
+ TYPE_CHANGE = 1
+ TYPE_MEMBER = 2
+
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.src_types = IdSet()
+ self.tgt_types = IdSet()
+ self.obj_classes = IdSet()
+ self.dest_type = ""
+ self.rule_type = self.TYPE_TRANSITION
+
+ def __rule_type_str(self):
+ if self.rule_type == self.TYPE_TRANSITION:
+ return "type_transition"
+ elif self.rule_type == self.TYPE_CHANGE:
+ return "type_change"
+ else:
+ return "type_member"
+
+ def to_string(self):
+ return "%s %s %s:%s %s;" % (self.__rule_type_str(),
+ self.src_types.to_space_str(),
+ self.tgt_types.to_space_str(),
+ self.obj_classes.to_space_str(),
+ self.dest_type)
+
+class RoleAllow(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.src_roles = IdSet()
+ self.tgt_roles = IdSet()
+
+ def to_string(self):
+ return "allow %s %s;" % (self.src_roles.to_comma_str(),
+ self.tgt_roles.to_comma_str())
+
+class RoleType(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.role = ""
+ self.types = IdSet()
+
+ def to_string(self):
+ s = ""
+ for t in self.types:
+ s += "role %s types %s;\n" % (self.role, t)
+ return s
+
+class ModuleDeclaration(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.name = ""
+ self.version = ""
+ self.refpolicy = False
+
+ def to_string(self):
+ if self.refpolicy:
+ return "policy_module(%s, %s)" % (self.name, self.version)
+ else:
+ return "module %s %s;" % (self.name, self.version)
+
+class Conditional(Node):
+ def __init__(self, parent=None):
+ Node.__init__(self, parent)
+ self.cond_expr = []
+
+ def to_string(self):
+ return "[If %s]" % list_to_space_str(self.cond_expr, cont=("", ""))
+
+class Bool(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.name = ""
+ self.state = False
+
+ def to_string(self):
+ s = "bool %s " % self.name
+ if s.state:
+ return s + "true"
+ else:
+ return s + "false"
+
+class InitialSid(Leaf):
+ def __init(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.name = ""
+ self.context = None
+
+ def to_string(self):
+ return "sid %s %s" % (self.name, str(self.context))
+
+class GenfsCon(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.filesystem = ""
+ self.path = ""
+ self.context = None
+
+ def to_string(self):
+ return "genfscon %s %s %s" % (self.filesystem, self.path, str(self.context))
+
+class FilesystemUse(Leaf):
+ XATTR = 1
+ TRANS = 2
+ TASK = 3
+
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.type = self.XATTR
+ self.filesystem = ""
+ self.context = None
+
+ def to_string(self):
+ s = ""
+ if self.type == XATTR:
+ s = "fs_use_xattr "
+ elif self.type == TRANS:
+ s = "fs_use_trans "
+ elif self.type == TASK:
+ s = "fs_use_task "
+
+ return "%s %s %s;" % (s, self.filesystem, str(self.context))
+
+class PortCon(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.port_type = ""
+ self.port_number = ""
+ self.context = None
+
+ def to_string(self):
+ return "portcon %s %s %s" % (self.port_type, self.port_number, str(self.context))
+
+class NodeCon(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.start = ""
+ self.end = ""
+ self.context = None
+
+ def to_string(self):
+ return "nodecon %s %s %s" % (self.start, self.end, str(self.context))
+
+class NetifCon(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.interface = ""
+ self.interface_context = None
+ self.packet_context = None
+
+ def to_string(self):
+ return "netifcon %s %s %s" % (self.interface, str(self.interface_context),
+ str(self.packet_context))
+class PirqCon(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.pirq_number = ""
+ self.context = None
+
+ def to_string(self):
+ return "pirqcon %s %s" % (self.pirq_number, str(self.context))
+
+class IomemCon(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.device_mem = ""
+ self.context = None
+
+ def to_string(self):
+ return "iomemcon %s %s" % (self.device_mem, str(self.context))
+
+class IoportCon(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.ioport = ""
+ self.context = None
+
+ def to_string(self):
+ return "ioportcon %s %s" % (self.ioport, str(self.context))
+
+class PciDeviceCon(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.device = ""
+ self.context = None
+
+ def to_string(self):
+ return "pcidevicecon %s %s" % (self.device, str(self.context))
+
+class DeviceTreeCon(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.path = ""
+ self.context = None
+
+ def to_string(self):
+ return "devicetreecon %s %s" % (self.path, str(self.context))
+
+# Reference policy specific types
+
+def print_tree(head):
+ for node, depth in walktree(head, showdepth=True):
+ s = ""
+ for i in range(depth):
+ s = s + "\t"
+ print s + str(node)
+
+
+class Headers(Node):
+ def __init__(self, parent=None):
+ Node.__init__(self, parent)
+
+ def to_string(self):
+ return "[Headers]"
+
+
+class Module(Node):
+ def __init__(self, parent=None):
+ Node.__init__(self, parent)
+
+ def to_string(self):
+ return ""
+
+class Interface(Node):
+ """A reference policy interface definition.
+
+ This class represents a reference policy interface definition.
+ """
+ def __init__(self, name="", parent=None):
+ Node.__init__(self, parent)
+ self.name = name
+
+ def to_string(self):
+ return "[Interface name: %s]" % self.name
+
+class TunablePolicy(Node):
+ def __init__(self, parent=None):
+ Node.__init__(self, parent)
+ self.cond_expr = []
+
+ def to_string(self):
+ return "[Tunable Policy %s]" % list_to_space_str(self.cond_expr, cont=("", ""))
+
+class Template(Node):
+ def __init__(self, name="", parent=None):
+ Node.__init__(self, parent)
+ self.name = name
+
+ def to_string(self):
+ return "[Template name: %s]" % self.name
+
+class IfDef(Node):
+ def __init__(self, name="", parent=None):
+ Node.__init__(self, parent)
+ self.name = name
+
+ def to_string(self):
+ return "[Ifdef name: %s]" % self.name
+
+class InterfaceCall(Leaf):
+ def __init__(self, ifname="", parent=None):
+ Leaf.__init__(self, parent)
+ self.ifname = ifname
+ self.args = []
+ self.comments = []
+
+ def matches(self, other):
+ if self.ifname != other.ifname:
+ return False
+ if len(self.args) != len(other.args):
+ return False
+ for a,b in zip(self.args, other.args):
+ if a != b:
+ return False
+ return True
+
+ def to_string(self):
+ s = "%s(" % self.ifname
+ i = 0
+ for a in self.args:
+ if isinstance(a, list):
+ str = list_to_space_str(a)
+ else:
+ str = a
+
+ if i != 0:
+ s = s + ", %s" % str
+ else:
+ s = s + str
+ i += 1
+ return s + ")"
+
+class OptionalPolicy(Node):
+ def __init__(self, parent=None):
+ Node.__init__(self, parent)
+
+ def to_string(self):
+ return "[Optional Policy]"
+
+class SupportMacros(Node):
+ def __init__(self, parent=None):
+ Node.__init__(self, parent)
+ self.map = None
+
+ def to_string(self):
+ return "[Support Macros]"
+
+ def __expand_perm(self, perm):
+ # Recursive expansion - the assumption is that these
+ # are ordered correctly so that no macro is used before
+ # it is defined
+ s = set()
+ if self.map.has_key(perm):
+ for p in self.by_name(perm):
+ s.update(self.__expand_perm(p))
+ else:
+ s.add(perm)
+ return s
+
+ def __gen_map(self):
+ self.map = {}
+ for x in self:
+ exp_perms = set()
+ for perm in x.perms:
+ exp_perms.update(self.__expand_perm(perm))
+ self.map[x.name] = exp_perms
+
+ def by_name(self, name):
+ if not self.map:
+ self.__gen_map()
+ return self.map[name]
+
+ def has_key(self, name):
+ if not self.map:
+ self.__gen_map()
+ return self.map.has_key(name)
+
+class Require(Leaf):
+ def __init__(self, parent=None):
+ Leaf.__init__(self, parent)
+ self.types = IdSet()
+ self.obj_classes = { }
+ self.roles = IdSet()
+ self.data = IdSet()
+ self.users = IdSet()
+
+ def add_obj_class(self, obj_class, perms):
+ p = self.obj_classes.setdefault(obj_class, IdSet())
+ p.update(perms)
+
+
+ def to_string(self):
+ s = []
+ s.append("require {")
+ for type in self.types:
+ s.append("\ttype %s;" % type)
+ for obj_class, perms in self.obj_classes.items():
+ s.append("\tclass %s %s;" % (obj_class, perms.to_space_str()))
+ for role in self.roles:
+ s.append("\trole %s;" % role)
+ for bool in self.data:
+ s.append("\tbool %s;" % bool)
+ for user in self.users:
+ s.append("\tuser %s;" % user)
+ s.append("}")
+
+ # Handle empty requires
+ if len(s) == 2:
+ return ""
+
+ return "\n".join(s)
+
+
+class ObjPermSet:
+ def __init__(self, name):
+ self.name = name
+ self.perms = set()
+
+ def to_string(self):
+ return "define(`%s', `%s')" % (self.name, self.perms.to_space_str())
+
+class ClassMap:
+ def __init__(self, obj_class, perms):
+ self.obj_class = obj_class
+ self.perms = perms
+
+ def to_string(self):
+ return self.obj_class + ": " + self.perms
+
+class Comment:
+ def __init__(self, l=None):
+ if l:
+ self.lines = l
+ else:
+ self.lines = []
+
+ def to_string(self):
+ # If there are no lines, treat this as a spacer between
+ # policy statements and return a new line.
+ if len(self.lines) == 0:
+ return ""
+ else:
+ out = []
+ for line in self.lines:
+ out.append("#" + line)
+ return "\n".join(out)
+
+ def merge(self, other):
+ if len(other.lines):
+ for line in other.lines:
+ if line != "":
+ self.lines.append(line)
+
+ def __str__(self):
+ return self.to_string()
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/sepolgen/sepolgeni18n.py b/lib/python2.7/site-packages/setoolsgui/sepolgen/sepolgeni18n.py
new file mode 100644
index 0000000..998c435
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/sepolgen/sepolgeni18n.py
@@ -0,0 +1,26 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+try:
+ import gettext
+ t = gettext.translation( 'yumex' )
+ _ = t.gettext
+except:
+ def _(str):
+ return str
diff --git a/lib/python2.7/site-packages/setoolsgui/sepolgen/util.py b/lib/python2.7/site-packages/setoolsgui/sepolgen/util.py
new file mode 100644
index 0000000..74a11f5
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/sepolgen/util.py
@@ -0,0 +1,87 @@
+# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
+#
+# Copyright (C) 2006 Red Hat
+# see file 'COPYING' for use and warranty information
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 only
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+class ConsoleProgressBar:
+ def __init__(self, out, steps=100, indicator='#'):
+ self.blocks = 0
+ self.current = 0
+ self.steps = steps
+ self.indicator = indicator
+ self.out = out
+ self.done = False
+
+ def start(self, message=None):
+ self.done = False
+ if message:
+ self.out.write('\n%s:\n' % message)
+ self.out.write('%--10---20---30---40---50---60---70---80---90--100\n')
+
+ def step(self, n=1):
+ self.current += n
+
+ old = self.blocks
+ self.blocks = int(round(self.current / float(self.steps) * 100) / 2)
+
+ if self.blocks > 50:
+ self.blocks = 50
+
+ new = self.blocks - old
+
+ self.out.write(self.indicator * new)
+ self.out.flush()
+
+ if self.blocks == 50 and not self.done:
+ self.done = True
+ self.out.write("\n")
+
+def set_to_list(s):
+ l = []
+ l.extend(s)
+ return l
+
+def first(s, sorted=False):
+ """
+ Return the first element of a set.
+
+ It sometimes useful to return the first element from a set but,
+ because sets are not indexable, this is rather hard. This function
+ will return the first element from a set. If sorted is True, then
+ the set will first be sorted (making this an expensive operation).
+ Otherwise a random element will be returned (as sets are not ordered).
+ """
+ if not len(s):
+ raise IndexError("empty containter")
+
+ if sorted:
+ l = set_to_list(s)
+ l.sort()
+ return l[0]
+ else:
+ for x in s:
+ return x
+
+if __name__ == "__main__":
+ import sys
+ import time
+ p = ConsoleProgressBar(sys.stdout, steps=999)
+ p.start("computing pi")
+ for i in range(999):
+ p.step()
+ time.sleep(0.001)
+
diff --git a/lib/python2.7/site-packages/setoolsgui/sepolgen/yacc.py b/lib/python2.7/site-packages/setoolsgui/sepolgen/yacc.py
new file mode 100644
index 0000000..bc4536d
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/sepolgen/yacc.py
@@ -0,0 +1,2209 @@
+#-----------------------------------------------------------------------------
+# ply: yacc.py
+#
+# Author(s): David M. Beazley (dave@dabeaz.com)
+#
+# Copyright (C) 2001-2006, David M. Beazley
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# See the file COPYING for a complete copy of the LGPL.
+#
+#
+# This implements an LR parser that is constructed from grammar rules defined
+# as Python functions. The grammer is specified by supplying the BNF inside
+# Python documentation strings. The inspiration for this technique was borrowed
+# from John Aycock's Spark parsing system. PLY might be viewed as cross between
+# Spark and the GNU bison utility.
+#
+# The current implementation is only somewhat object-oriented. The
+# LR parser itself is defined in terms of an object (which allows multiple
+# parsers to co-exist). However, most of the variables used during table
+# construction are defined in terms of global variables. Users shouldn't
+# notice unless they are trying to define multiple parsers at the same
+# time using threads (in which case they should have their head examined).
+#
+# This implementation supports both SLR and LALR(1) parsing. LALR(1)
+# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
+# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
+# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
+# by the more efficient DeRemer and Pennello algorithm.
+#
+# :::::::: WARNING :::::::
+#
+# Construction of LR parsing tables is fairly complicated and expensive.
+# To make this module run fast, a *LOT* of work has been put into
+# optimization---often at the expensive of readability and what might
+# consider to be good Python "coding style." Modify the code at your
+# own risk!
+# ----------------------------------------------------------------------------
+
+__version__ = "2.2"
+
+#-----------------------------------------------------------------------------
+# === User configurable parameters ===
+#
+# Change these to modify the default behavior of yacc (if you wish)
+#-----------------------------------------------------------------------------
+
+yaccdebug = 1 # Debugging mode. If set, yacc generates a
+ # a 'parser.out' file in the current directory
+
+debug_file = 'parser.out' # Default name of the debugging file
+tab_module = 'parsetab' # Default name of the table module
+default_lr = 'LALR' # Default LR table generation method
+
+error_count = 3 # Number of symbols that must be shifted to leave recovery mode
+
+import re, types, sys, cStringIO, hashlib, os.path
+
+# Exception raised for yacc-related errors
+class YaccError(Exception): pass
+
+#-----------------------------------------------------------------------------
+# === LR Parsing Engine ===
+#
+# The following classes are used for the LR parser itself. These are not
+# used during table construction and are independent of the actual LR
+# table generation algorithm
+#-----------------------------------------------------------------------------
+
+# This class is used to hold non-terminal grammar symbols during parsing.
+# It normally has the following attributes set:
+# .type = Grammar symbol type
+# .value = Symbol value
+# .lineno = Starting line number
+# .endlineno = Ending line number (optional, set automatically)
+# .lexpos = Starting lex position
+# .endlexpos = Ending lex position (optional, set automatically)
+
+class YaccSymbol:
+ def __str__(self): return self.type
+ def __repr__(self): return str(self)
+
+# This class is a wrapper around the objects actually passed to each
+# grammar rule. Index lookup and assignment actually assign the
+# .value attribute of the underlying YaccSymbol object.
+# The lineno() method returns the line number of a given
+# item (or 0 if not defined). The linespan() method returns
+# a tuple of (startline,endline) representing the range of lines
+# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
+# representing the range of positional information for a symbol.
+
+class YaccProduction:
+ def __init__(self,s,stack=None):
+ self.slice = s
+ self.pbstack = []
+ self.stack = stack
+
+ def __getitem__(self,n):
+ if type(n) == types.IntType:
+ if n >= 0: return self.slice[n].value
+ else: return self.stack[n].value
+ else:
+ return [s.value for s in self.slice[n.start:n.stop:n.step]]
+
+ def __setitem__(self,n,v):
+ self.slice[n].value = v
+
+ def __len__(self):
+ return len(self.slice)
+
+ def lineno(self,n):
+ return getattr(self.slice[n],"lineno",0)
+
+ def linespan(self,n):
+ startline = getattr(self.slice[n],"lineno",0)
+ endline = getattr(self.slice[n],"endlineno",startline)
+ return startline,endline
+
+ def lexpos(self,n):
+ return getattr(self.slice[n],"lexpos",0)
+
+ def lexspan(self,n):
+ startpos = getattr(self.slice[n],"lexpos",0)
+ endpos = getattr(self.slice[n],"endlexpos",startpos)
+ return startpos,endpos
+
+ def pushback(self,n):
+ if n <= 0:
+ raise ValueError, "Expected a positive value"
+ if n > (len(self.slice)-1):
+ raise ValueError, "Can't push %d tokens. Only %d are available." % (n,len(self.slice)-1)
+ for i in range(0,n):
+ self.pbstack.append(self.slice[-i-1])
+
+# The LR Parsing engine. This is defined as a class so that multiple parsers
+# can exist in the same process. A user never instantiates this directly.
+# Instead, the global yacc() function should be used to create a suitable Parser
+# object.
+
+class Parser:
+ def __init__(self,magic=None):
+
+ # This is a hack to keep users from trying to instantiate a Parser
+ # object directly.
+
+ if magic != "xyzzy":
+ raise YaccError, "Can't instantiate Parser. Use yacc() instead."
+
+ # Reset internal state
+ self.productions = None # List of productions
+ self.errorfunc = None # Error handling function
+ self.action = { } # LR Action table
+ self.goto = { } # LR goto table
+ self.require = { } # Attribute require table
+ self.method = "Unknown LR" # Table construction method used
+
+ def errok(self):
+ self.errorcount = 0
+
+ def restart(self):
+ del self.statestack[:]
+ del self.symstack[:]
+ sym = YaccSymbol()
+ sym.type = '$end'
+ self.symstack.append(sym)
+ self.statestack.append(0)
+
+ def parse(self,input=None,lexer=None,debug=0):
+ lookahead = None # Current lookahead symbol
+ lookaheadstack = [ ] # Stack of lookahead symbols
+ actions = self.action # Local reference to action table
+ goto = self.goto # Local reference to goto table
+ prod = self.productions # Local reference to production list
+ pslice = YaccProduction(None) # Production object passed to grammar rules
+ pslice.parser = self # Parser object
+ self.errorcount = 0 # Used during error recovery
+
+ # If no lexer was given, we will try to use the lex module
+ if not lexer:
+ import lex
+ lexer = lex.lexer
+
+ pslice.lexer = lexer
+
+ # If input was supplied, pass to lexer
+ if input:
+ lexer.input(input)
+
+ # Tokenize function
+ get_token = lexer.token
+
+ statestack = [ ] # Stack of parsing states
+ self.statestack = statestack
+ symstack = [ ] # Stack of grammar symbols
+ self.symstack = symstack
+
+ pslice.stack = symstack # Put in the production
+ errtoken = None # Err token
+
+ # The start state is assumed to be (0,$end)
+ statestack.append(0)
+ sym = YaccSymbol()
+ sym.type = '$end'
+ symstack.append(sym)
+
+ while 1:
+ # Get the next symbol on the input. If a lookahead symbol
+ # is already set, we just use that. Otherwise, we'll pull
+ # the next token off of the lookaheadstack or from the lexer
+ if debug > 1:
+ print 'state', statestack[-1]
+ if not lookahead:
+ if not lookaheadstack:
+ lookahead = get_token() # Get the next token
+ else:
+ lookahead = lookaheadstack.pop()
+ if not lookahead:
+ lookahead = YaccSymbol()
+ lookahead.type = '$end'
+ if debug:
+ errorlead = ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()
+
+ # Check the action table
+ s = statestack[-1]
+ ltype = lookahead.type
+ t = actions.get((s,ltype),None)
+
+ if debug > 1:
+ print 'action', t
+ if t is not None:
+ if t > 0:
+ # shift a symbol on the stack
+ if ltype == '$end':
+ # Error, end of input
+ sys.stderr.write("yacc: Parse error. EOF\n")
+ return
+ statestack.append(t)
+ if debug > 1:
+ sys.stderr.write("%-60s shift state %s\n" % (errorlead, t))
+ symstack.append(lookahead)
+ lookahead = None
+
+ # Decrease error count on successful shift
+ if self.errorcount > 0:
+ self.errorcount -= 1
+
+ continue
+
+ if t < 0:
+ # reduce a symbol on the stack, emit a production
+ p = prod[-t]
+ pname = p.name
+ plen = p.len
+
+ # Get production function
+ sym = YaccSymbol()
+ sym.type = pname # Production name
+ sym.value = None
+ if debug > 1:
+ sys.stderr.write("%-60s reduce %d\n" % (errorlead, -t))
+
+ if plen:
+ targ = symstack[-plen-1:]
+ targ[0] = sym
+ try:
+ sym.lineno = targ[1].lineno
+ sym.endlineno = getattr(targ[-1],"endlineno",targ[-1].lineno)
+ sym.lexpos = targ[1].lexpos
+ sym.endlexpos = getattr(targ[-1],"endlexpos",targ[-1].lexpos)
+ except AttributeError:
+ sym.lineno = 0
+ del symstack[-plen:]
+ del statestack[-plen:]
+ else:
+ sym.lineno = 0
+ targ = [ sym ]
+ pslice.slice = targ
+ pslice.pbstack = []
+ # Call the grammar rule with our special slice object
+ p.func(pslice)
+
+ # If there was a pushback, put that on the stack
+ if pslice.pbstack:
+ lookaheadstack.append(lookahead)
+ for _t in pslice.pbstack:
+ lookaheadstack.append(_t)
+ lookahead = None
+
+ symstack.append(sym)
+ statestack.append(goto[statestack[-1],pname])
+ continue
+
+ if t == 0:
+ n = symstack[-1]
+ return getattr(n,"value",None)
+ sys.stderr.write(errorlead, "\n")
+
+ if t == None:
+ if debug:
+ sys.stderr.write(errorlead + "\n")
+ # We have some kind of parsing error here. To handle
+ # this, we are going to push the current token onto
+ # the tokenstack and replace it with an 'error' token.
+ # If there are any synchronization rules, they may
+ # catch it.
+ #
+ # In addition to pushing the error token, we call call
+ # the user defined p_error() function if this is the
+ # first syntax error. This function is only called if
+ # errorcount == 0.
+ if not self.errorcount:
+ self.errorcount = error_count
+ errtoken = lookahead
+ if errtoken.type == '$end':
+ errtoken = None # End of file!
+ if self.errorfunc:
+ global errok,token,restart
+ errok = self.errok # Set some special functions available in error recovery
+ token = get_token
+ restart = self.restart
+ tok = self.errorfunc(errtoken)
+ del errok, token, restart # Delete special functions
+
+ if not self.errorcount:
+ # User must have done some kind of panic
+ # mode recovery on their own. The
+ # returned token is the next lookahead
+ lookahead = tok
+ errtoken = None
+ continue
+ else:
+ if errtoken:
+ if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
+ else: lineno = 0
+ if lineno:
+ sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
+ else:
+ sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
+ else:
+ sys.stderr.write("yacc: Parse error in input. EOF\n")
+ return
+
+ else:
+ self.errorcount = error_count
+
+ # case 1: the statestack only has 1 entry on it. If we're in this state, the
+ # entire parse has been rolled back and we're completely hosed. The token is
+ # discarded and we just keep going.
+
+ if len(statestack) <= 1 and lookahead.type != '$end':
+ lookahead = None
+ errtoken = None
+ # Nuke the pushback stack
+ del lookaheadstack[:]
+ continue
+
+ # case 2: the statestack has a couple of entries on it, but we're
+ # at the end of the file. nuke the top entry and generate an error token
+
+ # Start nuking entries on the stack
+ if lookahead.type == '$end':
+ # Whoa. We're really hosed here. Bail out
+ return
+
+ if lookahead.type != 'error':
+ sym = symstack[-1]
+ if sym.type == 'error':
+ # Hmmm. Error is on top of stack, we'll just nuke input
+ # symbol and continue
+ lookahead = None
+ continue
+ t = YaccSymbol()
+ t.type = 'error'
+ if hasattr(lookahead,"lineno"):
+ t.lineno = lookahead.lineno
+ t.value = lookahead
+ lookaheadstack.append(lookahead)
+ lookahead = t
+ else:
+ symstack.pop()
+ statestack.pop()
+
+ continue
+
+ # Call an error function here
+ raise RuntimeError, "yacc: internal parser error!!!\n"
+
+# -----------------------------------------------------------------------------
+# === Parser Construction ===
+#
+# The following functions and variables are used to implement the yacc() function
+# itself. This is pretty hairy stuff involving lots of error checking,
+# construction of LR items, kernels, and so forth. Although a lot of
+# this work is done using global variables, the resulting Parser object
+# is completely self contained--meaning that it is safe to repeatedly
+# call yacc() with different grammars in the same application.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# validate_file()
+#
+# This function checks to see if there are duplicated p_rulename() functions
+# in the parser module file. Without this function, it is really easy for
+# users to make mistakes by cutting and pasting code fragments (and it's a real
+# bugger to try and figure out why the resulting parser doesn't work). Therefore,
+# we just do a little regular expression pattern matching of def statements
+# to try and detect duplicates.
+# -----------------------------------------------------------------------------
+
+def validate_file(filename):
+ base,ext = os.path.splitext(filename)
+ if ext != '.py': return 1 # No idea. Assume it's okay.
+
+ try:
+ f = open(filename)
+ lines = f.readlines()
+ f.close()
+ except IOError:
+ return 1 # Oh well
+
+ # Match def p_funcname(
+ fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
+ counthash = { }
+ linen = 1
+ noerror = 1
+ for l in lines:
+ m = fre.match(l)
+ if m:
+ name = m.group(1)
+ prev = counthash.get(name)
+ if not prev:
+ counthash[name] = linen
+ else:
+ sys.stderr.write("%s:%d: Function %s redefined. Previously defined on line %d\n" % (filename,linen,name,prev))
+ noerror = 0
+ linen += 1
+ return noerror
+
+# This function looks for functions that might be grammar rules, but which don't have the proper p_suffix.
+def validate_dict(d):
+ for n,v in d.items():
+ if n[0:2] == 'p_' and type(v) in (types.FunctionType, types.MethodType): continue
+ if n[0:2] == 't_': continue
+
+ if n[0:2] == 'p_':
+ sys.stderr.write("yacc: Warning. '%s' not defined as a function\n" % n)
+ if 1 and isinstance(v,types.FunctionType) and v.func_code.co_argcount == 1:
+ try:
+ doc = v.__doc__.split(" ")
+ if doc[1] == ':':
+ sys.stderr.write("%s:%d: Warning. Possible grammar rule '%s' defined without p_ prefix.\n" % (v.func_code.co_filename, v.func_code.co_firstlineno,n))
+ except StandardError:
+ pass
+
+# -----------------------------------------------------------------------------
+# === GRAMMAR FUNCTIONS ===
+#
+# The following global variables and functions are used to store, manipulate,
+# and verify the grammar rules specified by the user.
+# -----------------------------------------------------------------------------
+
+# Initialize all of the global variables used during grammar construction
+def initialize_vars():
+ global Productions, Prodnames, Prodmap, Terminals
+ global Nonterminals, First, Follow, Precedence, LRitems
+ global Errorfunc, Signature, Requires
+
+ Productions = [None] # A list of all of the productions. The first
+ # entry is always reserved for the purpose of
+ # building an augmented grammar
+
+ Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
+ # productions of that nonterminal.
+
+ Prodmap = { } # A dictionary that is only used to detect duplicate
+ # productions.
+
+ Terminals = { } # A dictionary mapping the names of terminal symbols to a
+ # list of the rules where they are used.
+
+ Nonterminals = { } # A dictionary mapping names of nonterminals to a list
+ # of rule numbers where they are used.
+
+ First = { } # A dictionary of precomputed FIRST(x) symbols
+
+ Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
+
+ Precedence = { } # Precedence rules for each terminal. Contains tuples of the
+ # form ('right',level) or ('nonassoc', level) or ('left',level)
+
+ LRitems = [ ] # A list of all LR items for the grammar. These are the
+ # productions with the "dot" like E -> E . PLUS E
+
+ Errorfunc = None # User defined error handler
+
+ Signature = hashlib.sha256() # Digital signature of the grammar rules, precedence
+ # and other information. Used to determined when a
+ # parsing table needs to be regenerated.
+
+ Requires = { } # Requires list
+
+ # File objects used when creating the parser.out debugging file
+ global _vf, _vfc
+ _vf = cStringIO.StringIO()
+ _vfc = cStringIO.StringIO()
+
+# -----------------------------------------------------------------------------
+# class Production:
+#
+# This class stores the raw information about a single production or grammar rule.
+# It has a few required attributes:
+#
+# name - Name of the production (nonterminal)
+# prod - A list of symbols making up its production
+# number - Production number.
+#
+# In addition, a few additional attributes are used to help with debugging or
+# optimization of table generation.
+#
+# file - File where production action is defined.
+# lineno - Line number where action is defined
+# func - Action function
+# prec - Precedence level
+# lr_next - Next LR item. Example, if we are ' E -> E . PLUS E'
+# then lr_next refers to 'E -> E PLUS . E'
+# lr_index - LR item index (location of the ".") in the prod list.
+# lookaheads - LALR lookahead symbols for this item
+# len - Length of the production (number of symbols on right hand side)
+# -----------------------------------------------------------------------------
+
+class Production:
+ def __init__(self,**kw):
+ for k,v in kw.items():
+ setattr(self,k,v)
+ self.lr_index = -1
+ self.lr0_added = 0 # Flag indicating whether or not added to LR0 closure
+ self.lr1_added = 0 # Flag indicating whether or not added to LR1
+ self.usyms = [ ]
+ self.lookaheads = { }
+ self.lk_added = { }
+ self.setnumbers = [ ]
+
+ def __str__(self):
+ if self.prod:
+ s = "%s -> %s" % (self.name," ".join(self.prod))
+ else:
+ s = "%s -> <empty>" % self.name
+ return s
+
+ def __repr__(self):
+ return str(self)
+
+ # Compute lr_items from the production
+ def lr_item(self,n):
+ if n > len(self.prod): return None
+ p = Production()
+ p.name = self.name
+ p.prod = list(self.prod)
+ p.number = self.number
+ p.lr_index = n
+ p.lookaheads = { }
+ p.setnumbers = self.setnumbers
+ p.prod.insert(n,".")
+ p.prod = tuple(p.prod)
+ p.len = len(p.prod)
+ p.usyms = self.usyms
+
+ # Precompute list of productions immediately following
+ try:
+ p.lrafter = Prodnames[p.prod[n+1]]
+ except (IndexError,KeyError),e:
+ p.lrafter = []
+ try:
+ p.lrbefore = p.prod[n-1]
+ except IndexError:
+ p.lrbefore = None
+
+ return p
+
+class MiniProduction:
+ pass
+
+# regex matching identifiers
+_is_identifier = re.compile(r'^[a-zA-Z0-9_-~]+$')
+
+# -----------------------------------------------------------------------------
+# add_production()
+#
+# Given an action function, this function assembles a production rule.
+# The production rule is assumed to be found in the function's docstring.
+# This rule has the general syntax:
+#
+# name1 ::= production1
+# | production2
+# | production3
+# ...
+# | productionn
+# name2 ::= production1
+# | production2
+# ...
+# -----------------------------------------------------------------------------
+
+def add_production(f,file,line,prodname,syms):
+
+ if Terminals.has_key(prodname):
+ sys.stderr.write("%s:%d: Illegal rule name '%s'. Already defined as a token.\n" % (file,line,prodname))
+ return -1
+ if prodname == 'error':
+ sys.stderr.write("%s:%d: Illegal rule name '%s'. error is a reserved word.\n" % (file,line,prodname))
+ return -1
+
+ if not _is_identifier.match(prodname):
+ sys.stderr.write("%s:%d: Illegal rule name '%s'\n" % (file,line,prodname))
+ return -1
+
+ for x in range(len(syms)):
+ s = syms[x]
+ if s[0] in "'\"":
+ try:
+ c = eval(s)
+ if (len(c) > 1):
+ sys.stderr.write("%s:%d: Literal token %s in rule '%s' may only be a single character\n" % (file,line,s, prodname))
+ return -1
+ if not Terminals.has_key(c):
+ Terminals[c] = []
+ syms[x] = c
+ continue
+ except SyntaxError:
+ pass
+ if not _is_identifier.match(s) and s != '%prec':
+ sys.stderr.write("%s:%d: Illegal name '%s' in rule '%s'\n" % (file,line,s, prodname))
+ return -1
+
+ # See if the rule is already in the rulemap
+ map = "%s -> %s" % (prodname,syms)
+ if Prodmap.has_key(map):
+ m = Prodmap[map]
+ sys.stderr.write("%s:%d: Duplicate rule %s.\n" % (file,line, m))
+ sys.stderr.write("%s:%d: Previous definition at %s:%d\n" % (file,line, m.file, m.line))
+ return -1
+
+ p = Production()
+ p.name = prodname
+ p.prod = syms
+ p.file = file
+ p.line = line
+ p.func = f
+ p.number = len(Productions)
+
+
+ Productions.append(p)
+ Prodmap[map] = p
+ if not Nonterminals.has_key(prodname):
+ Nonterminals[prodname] = [ ]
+
+ # Add all terminals to Terminals
+ i = 0
+ while i < len(p.prod):
+ t = p.prod[i]
+ if t == '%prec':
+ try:
+ precname = p.prod[i+1]
+ except IndexError:
+ sys.stderr.write("%s:%d: Syntax error. Nothing follows %%prec.\n" % (p.file,p.line))
+ return -1
+
+ prec = Precedence.get(precname,None)
+ if not prec:
+ sys.stderr.write("%s:%d: Nothing known about the precedence of '%s'\n" % (p.file,p.line,precname))
+ return -1
+ else:
+ p.prec = prec
+ del p.prod[i]
+ del p.prod[i]
+ continue
+
+ if Terminals.has_key(t):
+ Terminals[t].append(p.number)
+ # Is a terminal. We'll assign a precedence to p based on this
+ if not hasattr(p,"prec"):
+ p.prec = Precedence.get(t,('right',0))
+ else:
+ if not Nonterminals.has_key(t):
+ Nonterminals[t] = [ ]
+ Nonterminals[t].append(p.number)
+ i += 1
+
+ if not hasattr(p,"prec"):
+ p.prec = ('right',0)
+
+ # Set final length of productions
+ p.len = len(p.prod)
+ p.prod = tuple(p.prod)
+
+ # Calculate unique syms in the production
+ p.usyms = [ ]
+ for s in p.prod:
+ if s not in p.usyms:
+ p.usyms.append(s)
+
+ # Add to the global productions list
+ try:
+ Prodnames[p.name].append(p)
+ except KeyError:
+ Prodnames[p.name] = [ p ]
+ return 0
+
+# Given a raw rule function, this function rips out its doc string
+# and adds rules to the grammar
+
+def add_function(f):
+ line = f.func_code.co_firstlineno
+ file = f.func_code.co_filename
+ error = 0
+
+ if isinstance(f,types.MethodType):
+ reqdargs = 2
+ else:
+ reqdargs = 1
+
+ if f.func_code.co_argcount > reqdargs:
+ sys.stderr.write("%s:%d: Rule '%s' has too many arguments.\n" % (file,line,f.__name__))
+ return -1
+
+ if f.func_code.co_argcount < reqdargs:
+ sys.stderr.write("%s:%d: Rule '%s' requires an argument.\n" % (file,line,f.__name__))
+ return -1
+
+ if f.__doc__:
+ # Split the doc string into lines
+ pstrings = f.__doc__.splitlines()
+ lastp = None
+ dline = line
+ for ps in pstrings:
+ dline += 1
+ p = ps.split()
+ if not p: continue
+ try:
+ if p[0] == '|':
+ # This is a continuation of a previous rule
+ if not lastp:
+ sys.stderr.write("%s:%d: Misplaced '|'.\n" % (file,dline))
+ return -1
+ prodname = lastp
+ if len(p) > 1:
+ syms = p[1:]
+ else:
+ syms = [ ]
+ else:
+ prodname = p[0]
+ lastp = prodname
+ assign = p[1]
+ if len(p) > 2:
+ syms = p[2:]
+ else:
+ syms = [ ]
+ if assign != ':' and assign != '::=':
+ sys.stderr.write("%s:%d: Syntax error. Expected ':'\n" % (file,dline))
+ return -1
+
+
+ e = add_production(f,file,dline,prodname,syms)
+ error += e
+
+
+ except StandardError:
+ sys.stderr.write("%s:%d: Syntax error in rule '%s'\n" % (file,dline,ps))
+ error -= 1
+ else:
+ sys.stderr.write("%s:%d: No documentation string specified in function '%s'\n" % (file,line,f.__name__))
+ return error
+
+
+# Cycle checking code (Michael Dyck)
+
+def compute_reachable():
+ '''
+ Find each symbol that can be reached from the start symbol.
+ Print a warning for any nonterminals that can't be reached.
+ (Unused terminals have already had their warning.)
+ '''
+ Reachable = { }
+ for s in Terminals.keys() + Nonterminals.keys():
+ Reachable[s] = 0
+
+ mark_reachable_from( Productions[0].prod[0], Reachable )
+
+ for s in Nonterminals.keys():
+ if not Reachable[s]:
+ sys.stderr.write("yacc: Symbol '%s' is unreachable.\n" % s)
+
+def mark_reachable_from(s, Reachable):
+ '''
+ Mark all symbols that are reachable from symbol s.
+ '''
+ if Reachable[s]:
+ # We've already reached symbol s.
+ return
+ Reachable[s] = 1
+ for p in Prodnames.get(s,[]):
+ for r in p.prod:
+ mark_reachable_from(r, Reachable)
+
+# -----------------------------------------------------------------------------
+# compute_terminates()
+#
+# This function looks at the various parsing rules and tries to detect
+# infinite recursion cycles (grammar rules where there is no possible way
+# to derive a string of only terminals).
+# -----------------------------------------------------------------------------
+def compute_terminates():
+ '''
+ Raise an error for any symbols that don't terminate.
+ '''
+ Terminates = {}
+
+ # Terminals:
+ for t in Terminals.keys():
+ Terminates[t] = 1
+
+ Terminates['$end'] = 1
+
+ # Nonterminals:
+
+ # Initialize to false:
+ for n in Nonterminals.keys():
+ Terminates[n] = 0
+
+ # Then propagate termination until no change:
+ while 1:
+ some_change = 0
+ for (n,pl) in Prodnames.items():
+ # Nonterminal n terminates iff any of its productions terminates.
+ for p in pl:
+ # Production p terminates iff all of its rhs symbols terminate.
+ for s in p.prod:
+ if not Terminates[s]:
+ # The symbol s does not terminate,
+ # so production p does not terminate.
+ p_terminates = 0
+ break
+ else:
+ # didn't break from the loop,
+ # so every symbol s terminates
+ # so production p terminates.
+ p_terminates = 1
+
+ if p_terminates:
+ # symbol n terminates!
+ if not Terminates[n]:
+ Terminates[n] = 1
+ some_change = 1
+ # Don't need to consider any more productions for this n.
+ break
+
+ if not some_change:
+ break
+
+ some_error = 0
+ for (s,terminates) in Terminates.items():
+ if not terminates:
+ if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
+ # s is used-but-not-defined, and we've already warned of that,
+ # so it would be overkill to say that it's also non-terminating.
+ pass
+ else:
+ sys.stderr.write("yacc: Infinite recursion detected for symbol '%s'.\n" % s)
+ some_error = 1
+
+ return some_error
+
+# -----------------------------------------------------------------------------
+# verify_productions()
+#
+# This function examines all of the supplied rules to see if they seem valid.
+# -----------------------------------------------------------------------------
+def verify_productions(cycle_check=1):
+ error = 0
+ for p in Productions:
+ if not p: continue
+
+ for s in p.prod:
+ if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
+ sys.stderr.write("%s:%d: Symbol '%s' used, but not defined as a token or a rule.\n" % (p.file,p.line,s))
+ error = 1
+ continue
+
+ unused_tok = 0
+ # Now verify all of the tokens
+ if yaccdebug:
+ _vf.write("Unused terminals:\n\n")
+ for s,v in Terminals.items():
+ if s != 'error' and not v:
+ sys.stderr.write("yacc: Warning. Token '%s' defined, but not used.\n" % s)
+ if yaccdebug: _vf.write(" %s\n"% s)
+ unused_tok += 1
+
+ # Print out all of the productions
+ if yaccdebug:
+ _vf.write("\nGrammar\n\n")
+ for i in range(1,len(Productions)):
+ _vf.write("Rule %-5d %s\n" % (i, Productions[i]))
+
+ unused_prod = 0
+ # Verify the use of all productions
+ for s,v in Nonterminals.items():
+ if not v:
+ p = Prodnames[s][0]
+ sys.stderr.write("%s:%d: Warning. Rule '%s' defined, but not used.\n" % (p.file,p.line, s))
+ unused_prod += 1
+
+
+ if unused_tok == 1:
+ sys.stderr.write("yacc: Warning. There is 1 unused token.\n")
+ if unused_tok > 1:
+ sys.stderr.write("yacc: Warning. There are %d unused tokens.\n" % unused_tok)
+
+ if unused_prod == 1:
+ sys.stderr.write("yacc: Warning. There is 1 unused rule.\n")
+ if unused_prod > 1:
+ sys.stderr.write("yacc: Warning. There are %d unused rules.\n" % unused_prod)
+
+ if yaccdebug:
+ _vf.write("\nTerminals, with rules where they appear\n\n")
+ ks = Terminals.keys()
+ ks.sort()
+ for k in ks:
+ _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Terminals[k]])))
+ _vf.write("\nNonterminals, with rules where they appear\n\n")
+ ks = Nonterminals.keys()
+ ks.sort()
+ for k in ks:
+ _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Nonterminals[k]])))
+
+ if (cycle_check):
+ compute_reachable()
+ error += compute_terminates()
+# error += check_cycles()
+ return error
+
+# -----------------------------------------------------------------------------
+# build_lritems()
+#
+# This function walks the list of productions and builds a complete set of the
+# LR items. The LR items are stored in two ways: First, they are uniquely
+# numbered and placed in the list _lritems. Second, a linked list of LR items
+# is built for each production. For example:
+#
+# E -> E PLUS E
+#
+# Creates the list
+#
+# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
+# -----------------------------------------------------------------------------
+
+def build_lritems():
+ for p in Productions:
+ lastlri = p
+ lri = p.lr_item(0)
+ i = 0
+ while 1:
+ lri = p.lr_item(i)
+ lastlri.lr_next = lri
+ if not lri: break
+ lri.lr_num = len(LRitems)
+ LRitems.append(lri)
+ lastlri = lri
+ i += 1
+
+ # In order for the rest of the parser generator to work, we need to
+ # guarantee that no more lritems are generated. Therefore, we nuke
+ # the p.lr_item method. (Only used in debugging)
+ # Production.lr_item = None
+
+# -----------------------------------------------------------------------------
+# add_precedence()
+#
+# Given a list of precedence rules, add to the precedence table.
+# -----------------------------------------------------------------------------
+
+def add_precedence(plist):
+ plevel = 0
+ error = 0
+ for p in plist:
+ plevel += 1
+ try:
+ prec = p[0]
+ terms = p[1:]
+ if prec != 'left' and prec != 'right' and prec != 'nonassoc':
+ sys.stderr.write("yacc: Invalid precedence '%s'\n" % prec)
+ return -1
+ for t in terms:
+ if Precedence.has_key(t):
+ sys.stderr.write("yacc: Precedence already specified for terminal '%s'\n" % t)
+ error += 1
+ continue
+ Precedence[t] = (prec,plevel)
+ except:
+ sys.stderr.write("yacc: Invalid precedence table.\n")
+ error += 1
+
+ return error
+
+# -----------------------------------------------------------------------------
+# augment_grammar()
+#
+# Compute the augmented grammar. This is just a rule S' -> start where start
+# is the starting symbol.
+# -----------------------------------------------------------------------------
+
+def augment_grammar(start=None):
+ if not start:
+ start = Productions[1].name
+ Productions[0] = Production(name="S'",prod=[start],number=0,len=1,prec=('right',0),func=None)
+ Productions[0].usyms = [ start ]
+ Nonterminals[start].append(0)
+
+
+# -------------------------------------------------------------------------
+# first()
+#
+# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
+#
+# During execution of compute_first1, the result may be incomplete.
+# Afterward (e.g., when called from compute_follow()), it will be complete.
+# -------------------------------------------------------------------------
+def first(beta):
+
+ # We are computing First(x1,x2,x3,...,xn)
+ result = [ ]
+ for x in beta:
+ x_produces_empty = 0
+
+ # Add all the non-<empty> symbols of First[x] to the result.
+ for f in First[x]:
+ if f == '<empty>':
+ x_produces_empty = 1
+ else:
+ if f not in result: result.append(f)
+
+ if x_produces_empty:
+ # We have to consider the next x in beta,
+ # i.e. stay in the loop.
+ pass
+ else:
+ # We don't have to consider any further symbols in beta.
+ break
+ else:
+ # There was no 'break' from the loop,
+ # so x_produces_empty was true for all x in beta,
+ # so beta produces empty as well.
+ result.append('<empty>')
+
+ return result
+
+
+# FOLLOW(x)
+# Given a non-terminal. This function computes the set of all symbols
+# that might follow it. Dragon book, p. 189.
+
+def compute_follow(start=None):
+ # Add '$end' to the follow list of the start symbol
+ for k in Nonterminals.keys():
+ Follow[k] = [ ]
+
+ if not start:
+ start = Productions[1].name
+
+ Follow[start] = [ '$end' ]
+
+ while 1:
+ didadd = 0
+ for p in Productions[1:]:
+ # Here is the production set
+ for i in range(len(p.prod)):
+ B = p.prod[i]
+ if Nonterminals.has_key(B):
+ # Okay. We got a non-terminal in a production
+ fst = first(p.prod[i+1:])
+ hasempty = 0
+ for f in fst:
+ if f != '<empty>' and f not in Follow[B]:
+ Follow[B].append(f)
+ didadd = 1
+ if f == '<empty>':
+ hasempty = 1
+ if hasempty or i == (len(p.prod)-1):
+ # Add elements of follow(a) to follow(b)
+ for f in Follow[p.name]:
+ if f not in Follow[B]:
+ Follow[B].append(f)
+ didadd = 1
+ if not didadd: break
+
+ if 0 and yaccdebug:
+ _vf.write('\nFollow:\n')
+ for k in Nonterminals.keys():
+ _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Follow[k]])))
+
+# -------------------------------------------------------------------------
+# compute_first1()
+#
+# Compute the value of FIRST1(X) for all symbols
+# -------------------------------------------------------------------------
+def compute_first1():
+
+ # Terminals:
+ for t in Terminals.keys():
+ First[t] = [t]
+
+ First['$end'] = ['$end']
+ First['#'] = ['#'] # what's this for?
+
+ # Nonterminals:
+
+ # Initialize to the empty set:
+ for n in Nonterminals.keys():
+ First[n] = []
+
+ # Then propagate symbols until no change:
+ while 1:
+ some_change = 0
+ for n in Nonterminals.keys():
+ for p in Prodnames[n]:
+ for f in first(p.prod):
+ if f not in First[n]:
+ First[n].append( f )
+ some_change = 1
+ if not some_change:
+ break
+
+ if 0 and yaccdebug:
+ _vf.write('\nFirst:\n')
+ for k in Nonterminals.keys():
+ _vf.write("%-20s : %s\n" %
+ (k, " ".join([str(s) for s in First[k]])))
+
+# -----------------------------------------------------------------------------
+# === SLR Generation ===
+#
+# The following functions are used to construct SLR (Simple LR) parsing tables
+# as described on p.221-229 of the dragon book.
+# -----------------------------------------------------------------------------
+
+# Global variables for the LR parsing engine
+def lr_init_vars():
+ global _lr_action, _lr_goto, _lr_method
+ global _lr_goto_cache, _lr0_cidhash
+
+ _lr_action = { } # Action table
+ _lr_goto = { } # Goto table
+ _lr_method = "Unknown" # LR method used
+ _lr_goto_cache = { }
+ _lr0_cidhash = { }
+
+
+# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
+# prodlist is a list of productions.
+
+_add_count = 0 # Counter used to detect cycles
+
+def lr0_closure(I):
+ global _add_count
+
+ _add_count += 1
+ prodlist = Productions
+
+ # Add everything in I to J
+ J = I[:]
+ didadd = 1
+ while didadd:
+ didadd = 0
+ for j in J:
+ for x in j.lrafter:
+ if x.lr0_added == _add_count: continue
+ # Add B --> .G to J
+ J.append(x.lr_next)
+ x.lr0_added = _add_count
+ didadd = 1
+
+ return J
+
+# Compute the LR(0) goto function goto(I,X) where I is a set
+# of LR(0) items and X is a grammar symbol. This function is written
+# in a way that guarantees uniqueness of the generated goto sets
+# (i.e. the same goto set will never be returned as two different Python
+# objects). With uniqueness, we can later do fast set comparisons using
+# id(obj) instead of element-wise comparison.
+
+def lr0_goto(I,x):
+ # First we look for a previously cached entry
+ g = _lr_goto_cache.get((id(I),x),None)
+ if g: return g
+
+ # Now we generate the goto set in a way that guarantees uniqueness
+ # of the result
+
+ s = _lr_goto_cache.get(x,None)
+ if not s:
+ s = { }
+ _lr_goto_cache[x] = s
+
+ gs = [ ]
+ for p in I:
+ n = p.lr_next
+ if n and n.lrbefore == x:
+ s1 = s.get(id(n),None)
+ if not s1:
+ s1 = { }
+ s[id(n)] = s1
+ gs.append(n)
+ s = s1
+ g = s.get('$end',None)
+ if not g:
+ if gs:
+ g = lr0_closure(gs)
+ s['$end'] = g
+ else:
+ s['$end'] = gs
+ _lr_goto_cache[(id(I),x)] = g
+ return g
+
+_lr0_cidhash = { }
+
+# Compute the LR(0) sets of item function
+def lr0_items():
+
+ C = [ lr0_closure([Productions[0].lr_next]) ]
+ i = 0
+ for I in C:
+ _lr0_cidhash[id(I)] = i
+ i += 1
+
+ # Loop over the items in C and each grammar symbols
+ i = 0
+ while i < len(C):
+ I = C[i]
+ i += 1
+
+ # Collect all of the symbols that could possibly be in the goto(I,X) sets
+ asyms = { }
+ for ii in I:
+ for s in ii.usyms:
+ asyms[s] = None
+
+ for x in asyms.keys():
+ g = lr0_goto(I,x)
+ if not g: continue
+ if _lr0_cidhash.has_key(id(g)): continue
+ _lr0_cidhash[id(g)] = len(C)
+ C.append(g)
+
+ return C
+
+# -----------------------------------------------------------------------------
+# ==== LALR(1) Parsing ====
+#
+# LALR(1) parsing is almost exactly the same as SLR except that instead of
+# relying upon Follow() sets when performing reductions, a more selective
+# lookahead set that incorporates the state of the LR(0) machine is utilized.
+# Thus, we mainly just have to focus on calculating the lookahead sets.
+#
+# The method used here is due to DeRemer and Pennelo (1982).
+#
+# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
+# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
+# Vol. 4, No. 4, Oct. 1982, pp. 615-649
+#
+# Further details can also be found in:
+#
+# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
+# McGraw-Hill Book Company, (1985).
+#
+# Note: This implementation is a complete replacement of the LALR(1)
+# implementation in PLY-1.x releases. That version was based on
+# a less efficient algorithm and it had bugs in its implementation.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# compute_nullable_nonterminals()
+#
+# Creates a dictionary containing all of the non-terminals that might produce
+# an empty production.
+# -----------------------------------------------------------------------------
+
+def compute_nullable_nonterminals():
+ nullable = {}
+ num_nullable = 0
+ while 1:
+ for p in Productions[1:]:
+ if p.len == 0:
+ nullable[p.name] = 1
+ continue
+ for t in p.prod:
+ if not nullable.has_key(t): break
+ else:
+ nullable[p.name] = 1
+ if len(nullable) == num_nullable: break
+ num_nullable = len(nullable)
+ return nullable
+
+# -----------------------------------------------------------------------------
+# find_nonterminal_trans(C)
+#
+# Given a set of LR(0) items, this functions finds all of the non-terminal
+# transitions. These are transitions in which a dot appears immediately before
+# a non-terminal. Returns a list of tuples of the form (state,N) where state
+# is the state number and N is the nonterminal symbol.
+#
+# The input C is the set of LR(0) items.
+# -----------------------------------------------------------------------------
+
+def find_nonterminal_transitions(C):
+ trans = []
+ for state in range(len(C)):
+ for p in C[state]:
+ if p.lr_index < p.len - 1:
+ t = (state,p.prod[p.lr_index+1])
+ if Nonterminals.has_key(t[1]):
+ if t not in trans: trans.append(t)
+ state = state + 1
+ return trans
+
+# -----------------------------------------------------------------------------
+# dr_relation()
+#
+# Computes the DR(p,A) relationships for non-terminal transitions. The input
+# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
+#
+# Returns a list of terminals.
+# -----------------------------------------------------------------------------
+
+def dr_relation(C,trans,nullable):
+ dr_set = { }
+ state,N = trans
+ terms = []
+
+ g = lr0_goto(C[state],N)
+ for p in g:
+ if p.lr_index < p.len - 1:
+ a = p.prod[p.lr_index+1]
+ if Terminals.has_key(a):
+ if a not in terms: terms.append(a)
+
+ # This extra bit is to handle the start state
+ if state == 0 and N == Productions[0].prod[0]:
+ terms.append('$end')
+
+ return terms
+
+# -----------------------------------------------------------------------------
+# reads_relation()
+#
+# Computes the READS() relation (p,A) READS (t,C).
+# -----------------------------------------------------------------------------
+
+def reads_relation(C, trans, empty):
+ # Look for empty transitions
+ rel = []
+ state, N = trans
+
+ g = lr0_goto(C[state],N)
+ j = _lr0_cidhash.get(id(g),-1)
+ for p in g:
+ if p.lr_index < p.len - 1:
+ a = p.prod[p.lr_index + 1]
+ if empty.has_key(a):
+ rel.append((j,a))
+
+ return rel
+
+# -----------------------------------------------------------------------------
+# compute_lookback_includes()
+#
+# Determines the lookback and includes relations
+#
+# LOOKBACK:
+#
+# This relation is determined by running the LR(0) state machine forward.
+# For example, starting with a production "N : . A B C", we run it forward
+# to obtain "N : A B C ." We then build a relationship between this final
+# state and the starting state. These relationships are stored in a dictionary
+# lookdict.
+#
+# INCLUDES:
+#
+# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
+#
+# This relation is used to determine non-terminal transitions that occur
+# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
+# if the following holds:
+#
+# B -> LAT, where T -> epsilon and p' -L-> p
+#
+# L is essentially a prefix (which may be empty), T is a suffix that must be
+# able to derive an empty string. State p' must lead to state p with the string L.
+#
+# -----------------------------------------------------------------------------
+
+def compute_lookback_includes(C,trans,nullable):
+
+ lookdict = {} # Dictionary of lookback relations
+ includedict = {} # Dictionary of include relations
+
+ # Make a dictionary of non-terminal transitions
+ dtrans = {}
+ for t in trans:
+ dtrans[t] = 1
+
+ # Loop over all transitions and compute lookbacks and includes
+ for state,N in trans:
+ lookb = []
+ includes = []
+ for p in C[state]:
+ if p.name != N: continue
+
+ # Okay, we have a name match. We now follow the production all the way
+ # through the state machine until we get the . on the right hand side
+
+ lr_index = p.lr_index
+ j = state
+ while lr_index < p.len - 1:
+ lr_index = lr_index + 1
+ t = p.prod[lr_index]
+
+ # Check to see if this symbol and state are a non-terminal transition
+ if dtrans.has_key((j,t)):
+ # Yes. Okay, there is some chance that this is an includes relation
+ # the only way to know for certain is whether the rest of the
+ # production derives empty
+
+ li = lr_index + 1
+ while li < p.len:
+ if Terminals.has_key(p.prod[li]): break # No forget it
+ if not nullable.has_key(p.prod[li]): break
+ li = li + 1
+ else:
+ # Appears to be a relation between (j,t) and (state,N)
+ includes.append((j,t))
+
+ g = lr0_goto(C[j],t) # Go to next set
+ j = _lr0_cidhash.get(id(g),-1) # Go to next state
+
+ # When we get here, j is the final state, now we have to locate the production
+ for r in C[j]:
+ if r.name != p.name: continue
+ if r.len != p.len: continue
+ i = 0
+ # This look is comparing a production ". A B C" with "A B C ."
+ while i < r.lr_index:
+ if r.prod[i] != p.prod[i+1]: break
+ i = i + 1
+ else:
+ lookb.append((j,r))
+ for i in includes:
+ if not includedict.has_key(i): includedict[i] = []
+ includedict[i].append((state,N))
+ lookdict[(state,N)] = lookb
+
+ return lookdict,includedict
+
+# -----------------------------------------------------------------------------
+# digraph()
+# traverse()
+#
+# The following two functions are used to compute set valued functions
+# of the form:
+#
+# F(x) = F'(x) U U{F(y) | x R y}
+#
+# This is used to compute the values of Read() sets as well as FOLLOW sets
+# in LALR(1) generation.
+#
+# Inputs: X - An input set
+# R - A relation
+# FP - Set-valued function
+# ------------------------------------------------------------------------------
+
+def digraph(X,R,FP):
+ N = { }
+ for x in X:
+ N[x] = 0
+ stack = []
+ F = { }
+ for x in X:
+ if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
+ return F
+
+def traverse(x,N,stack,F,X,R,FP):
+ stack.append(x)
+ d = len(stack)
+ N[x] = d
+ F[x] = FP(x) # F(X) <- F'(x)
+
+ rel = R(x) # Get y's related to x
+ for y in rel:
+ if N[y] == 0:
+ traverse(y,N,stack,F,X,R,FP)
+ N[x] = min(N[x],N[y])
+ for a in F.get(y,[]):
+ if a not in F[x]: F[x].append(a)
+ if N[x] == d:
+ N[stack[-1]] = sys.maxint
+ F[stack[-1]] = F[x]
+ element = stack.pop()
+ while element != x:
+ N[stack[-1]] = sys.maxint
+ F[stack[-1]] = F[x]
+ element = stack.pop()
+
+# -----------------------------------------------------------------------------
+# compute_read_sets()
+#
+# Given a set of LR(0) items, this function computes the read sets.
+#
+# Inputs: C = Set of LR(0) items
+# ntrans = Set of nonterminal transitions
+# nullable = Set of empty transitions
+#
+# Returns a set containing the read sets
+# -----------------------------------------------------------------------------
+
+def compute_read_sets(C, ntrans, nullable):
+ FP = lambda x: dr_relation(C,x,nullable)
+ R = lambda x: reads_relation(C,x,nullable)
+ F = digraph(ntrans,R,FP)
+ return F
+
+# -----------------------------------------------------------------------------
+# compute_follow_sets()
+#
+# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
+# and an include set, this function computes the follow sets
+#
+# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
+#
+# Inputs:
+# ntrans = Set of nonterminal transitions
+# readsets = Readset (previously computed)
+# inclsets = Include sets (previously computed)
+#
+# Returns a set containing the follow sets
+# -----------------------------------------------------------------------------
+
+def compute_follow_sets(ntrans,readsets,inclsets):
+ FP = lambda x: readsets[x]
+ R = lambda x: inclsets.get(x,[])
+ F = digraph(ntrans,R,FP)
+ return F
+
+# -----------------------------------------------------------------------------
+# add_lookaheads()
+#
+# Attaches the lookahead symbols to grammar rules.
+#
+# Inputs: lookbacks - Set of lookback relations
+# followset - Computed follow set
+#
+# This function directly attaches the lookaheads to productions contained
+# in the lookbacks set
+# -----------------------------------------------------------------------------
+
+def add_lookaheads(lookbacks,followset):
+ for trans,lb in lookbacks.items():
+ # Loop over productions in lookback
+ for state,p in lb:
+ if not p.lookaheads.has_key(state):
+ p.lookaheads[state] = []
+ f = followset.get(trans,[])
+ for a in f:
+ if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
+
+# -----------------------------------------------------------------------------
+# add_lalr_lookaheads()
+#
+# This function does all of the work of adding lookahead information for use
+# with LALR parsing
+# -----------------------------------------------------------------------------
+
+def add_lalr_lookaheads(C):
+ # Determine all of the nullable nonterminals
+ nullable = compute_nullable_nonterminals()
+
+ # Find all non-terminal transitions
+ trans = find_nonterminal_transitions(C)
+
+ # Compute read sets
+ readsets = compute_read_sets(C,trans,nullable)
+
+ # Compute lookback/includes relations
+ lookd, included = compute_lookback_includes(C,trans,nullable)
+
+ # Compute LALR FOLLOW sets
+ followsets = compute_follow_sets(trans,readsets,included)
+
+ # Add all of the lookaheads
+ add_lookaheads(lookd,followsets)
+
+# -----------------------------------------------------------------------------
+# lr_parse_table()
+#
+# This function constructs the parse tables for SLR or LALR
+# -----------------------------------------------------------------------------
+def lr_parse_table(method):
+ global _lr_method
+ goto = _lr_goto # Goto array
+ action = _lr_action # Action array
+ actionp = { } # Action production array (temporary)
+
+ _lr_method = method
+
+ n_srconflict = 0
+ n_rrconflict = 0
+
+ if yaccdebug:
+ sys.stderr.write("yacc: Generating %s parsing table...\n" % method)
+ _vf.write("\n\nParsing method: %s\n\n" % method)
+
+ # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
+ # This determines the number of states
+
+ C = lr0_items()
+
+ if method == 'LALR':
+ add_lalr_lookaheads(C)
+
+ # Build the parser table, state by state
+ st = 0
+ for I in C:
+ # Loop over each production in I
+ actlist = [ ] # List of actions
+
+ if yaccdebug:
+ _vf.write("\nstate %d\n\n" % st)
+ for p in I:
+ _vf.write(" (%d) %s\n" % (p.number, str(p)))
+ _vf.write("\n")
+
+ for p in I:
+ try:
+ if p.prod[-1] == ".":
+ if p.name == "S'":
+ # Start symbol. Accept!
+ action[st,"$end"] = 0
+ actionp[st,"$end"] = p
+ else:
+ # We are at the end of a production. Reduce!
+ if method == 'LALR':
+ laheads = p.lookaheads[st]
+ else:
+ laheads = Follow[p.name]
+ for a in laheads:
+ actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
+ r = action.get((st,a),None)
+ if r is not None:
+ # Whoa. Have a shift/reduce or reduce/reduce conflict
+ if r > 0:
+ # Need to decide on shift or reduce here
+ # By default we favor shifting. Need to add
+ # some precedence rules here.
+ sprec,slevel = Productions[actionp[st,a].number].prec
+ rprec,rlevel = Precedence.get(a,('right',0))
+ if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
+ # We really need to reduce here.
+ action[st,a] = -p.number
+ actionp[st,a] = p
+ if not slevel and not rlevel:
+ _vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st)
+ _vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a)
+ n_srconflict += 1
+ elif (slevel == rlevel) and (rprec == 'nonassoc'):
+ action[st,a] = None
+ else:
+ # Hmmm. Guess we'll keep the shift
+ if not rlevel:
+ _vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st)
+ _vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a)
+ n_srconflict +=1
+ elif r < 0:
+ # Reduce/reduce conflict. In this case, we favor the rule
+ # that was defined first in the grammar file
+ oldp = Productions[-r]
+ pp = Productions[p.number]
+ if oldp.line > pp.line:
+ action[st,a] = -p.number
+ actionp[st,a] = p
+ # sys.stderr.write("Reduce/reduce conflict in state %d\n" % st)
+ n_rrconflict += 1
+ _vfc.write("reduce/reduce conflict in state %d resolved using rule %d (%s).\n" % (st, actionp[st,a].number, actionp[st,a]))
+ _vf.write(" ! reduce/reduce conflict for %s resolved using rule %d (%s).\n" % (a,actionp[st,a].number, actionp[st,a]))
+ else:
+ sys.stderr.write("Unknown conflict in state %d\n" % st)
+ else:
+ action[st,a] = -p.number
+ actionp[st,a] = p
+ else:
+ i = p.lr_index
+ a = p.prod[i+1] # Get symbol right after the "."
+ if Terminals.has_key(a):
+ g = lr0_goto(I,a)
+ j = _lr0_cidhash.get(id(g),-1)
+ if j >= 0:
+ # We are in a shift state
+ actlist.append((a,p,"shift and go to state %d" % j))
+ r = action.get((st,a),None)
+ if r is not None:
+ # Whoa have a shift/reduce or shift/shift conflict
+ if r > 0:
+ if r != j:
+ sys.stderr.write("Shift/shift conflict in state %d\n" % st)
+ elif r < 0:
+ # Do a precedence check.
+ # - if precedence of reduce rule is higher, we reduce.
+ # - if precedence of reduce is same and left assoc, we reduce.
+ # - otherwise we shift
+ rprec,rlevel = Productions[actionp[st,a].number].prec
+ sprec,slevel = Precedence.get(a,('right',0))
+ if (slevel > rlevel) or ((slevel == rlevel) and (rprec != 'left')):
+ # We decide to shift here... highest precedence to shift
+ action[st,a] = j
+ actionp[st,a] = p
+ if not rlevel:
+ n_srconflict += 1
+ _vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st)
+ _vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a)
+ elif (slevel == rlevel) and (rprec == 'nonassoc'):
+ action[st,a] = None
+ else:
+ # Hmmm. Guess we'll keep the reduce
+ if not slevel and not rlevel:
+ n_srconflict +=1
+ _vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st)
+ _vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a)
+
+ else:
+ sys.stderr.write("Unknown conflict in state %d\n" % st)
+ else:
+ action[st,a] = j
+ actionp[st,a] = p
+
+ except StandardError,e:
+ raise YaccError, "Hosed in lr_parse_table", e
+
+ # Print the actions associated with each terminal
+ if yaccdebug:
+ _actprint = { }
+ for a,p,m in actlist:
+ if action.has_key((st,a)):
+ if p is actionp[st,a]:
+ _vf.write(" %-15s %s\n" % (a,m))
+ _actprint[(a,m)] = 1
+ _vf.write("\n")
+ for a,p,m in actlist:
+ if action.has_key((st,a)):
+ if p is not actionp[st,a]:
+ if not _actprint.has_key((a,m)):
+ _vf.write(" ! %-15s [ %s ]\n" % (a,m))
+ _actprint[(a,m)] = 1
+
+ # Construct the goto table for this state
+ if yaccdebug:
+ _vf.write("\n")
+ nkeys = { }
+ for ii in I:
+ for s in ii.usyms:
+ if Nonterminals.has_key(s):
+ nkeys[s] = None
+ for n in nkeys.keys():
+ g = lr0_goto(I,n)
+ j = _lr0_cidhash.get(id(g),-1)
+ if j >= 0:
+ goto[st,n] = j
+ if yaccdebug:
+ _vf.write(" %-30s shift and go to state %d\n" % (n,j))
+
+ st += 1
+
+ if yaccdebug:
+ if n_srconflict == 1:
+ sys.stderr.write("yacc: %d shift/reduce conflict\n" % n_srconflict)
+ if n_srconflict > 1:
+ sys.stderr.write("yacc: %d shift/reduce conflicts\n" % n_srconflict)
+ if n_rrconflict == 1:
+ sys.stderr.write("yacc: %d reduce/reduce conflict\n" % n_rrconflict)
+ if n_rrconflict > 1:
+ sys.stderr.write("yacc: %d reduce/reduce conflicts\n" % n_rrconflict)
+
+# -----------------------------------------------------------------------------
+# ==== LR Utility functions ====
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# _lr_write_tables()
+#
+# This function writes the LR parsing tables to a file
+# -----------------------------------------------------------------------------
+
+def lr_write_tables(modulename=tab_module,outputdir=''):
+ filename = os.path.join(outputdir,modulename) + ".py"
+ try:
+ f = open(filename,"w")
+
+ f.write("""
+# %s
+# This file is automatically generated. Do not edit.
+
+_lr_method = %s
+
+_lr_signature = %s
+""" % (filename, repr(_lr_method), repr(Signature.digest())))
+
+ # Change smaller to 0 to go back to original tables
+ smaller = 1
+
+ # Factor out names to try and make smaller
+ if smaller:
+ items = { }
+
+ for k,v in _lr_action.items():
+ i = items.get(k[1])
+ if not i:
+ i = ([],[])
+ items[k[1]] = i
+ i[0].append(k[0])
+ i[1].append(v)
+
+ f.write("\n_lr_action_items = {")
+ for k,v in items.items():
+ f.write("%r:([" % k)
+ for i in v[0]:
+ f.write("%r," % i)
+ f.write("],[")
+ for i in v[1]:
+ f.write("%r," % i)
+
+ f.write("]),")
+ f.write("}\n")
+
+ f.write("""
+_lr_action = { }
+for _k, _v in _lr_action_items.items():
+ for _x,_y in zip(_v[0],_v[1]):
+ _lr_action[(_x,_k)] = _y
+del _lr_action_items
+""")
+
+ else:
+ f.write("\n_lr_action = { ");
+ for k,v in _lr_action.items():
+ f.write("(%r,%r):%r," % (k[0],k[1],v))
+ f.write("}\n");
+
+ if smaller:
+ # Factor out names to try and make smaller
+ items = { }
+
+ for k,v in _lr_goto.items():
+ i = items.get(k[1])
+ if not i:
+ i = ([],[])
+ items[k[1]] = i
+ i[0].append(k[0])
+ i[1].append(v)
+
+ f.write("\n_lr_goto_items = {")
+ for k,v in items.items():
+ f.write("%r:([" % k)
+ for i in v[0]:
+ f.write("%r," % i)
+ f.write("],[")
+ for i in v[1]:
+ f.write("%r," % i)
+
+ f.write("]),")
+ f.write("}\n")
+
+ f.write("""
+_lr_goto = { }
+for _k, _v in _lr_goto_items.items():
+ for _x,_y in zip(_v[0],_v[1]):
+ _lr_goto[(_x,_k)] = _y
+del _lr_goto_items
+""")
+ else:
+ f.write("\n_lr_goto = { ");
+ for k,v in _lr_goto.items():
+ f.write("(%r,%r):%r," % (k[0],k[1],v))
+ f.write("}\n");
+
+ # Write production table
+ f.write("_lr_productions = [\n")
+ for p in Productions:
+ if p:
+ if (p.func):
+ f.write(" (%r,%d,%r,%r,%d),\n" % (p.name, p.len, p.func.__name__,p.file,p.line))
+ else:
+ f.write(" (%r,%d,None,None,None),\n" % (p.name, p.len))
+ else:
+ f.write(" None,\n")
+ f.write("]\n")
+
+ f.close()
+
+ except IOError,e:
+ print "Unable to create '%s'" % filename
+ print e
+ return
+
+def lr_read_tables(module=tab_module,optimize=0):
+ global _lr_action, _lr_goto, _lr_productions, _lr_method
+ try:
+ exec "import %s as parsetab" % module
+
+ if (optimize) or (Signature.digest() == parsetab._lr_signature):
+ _lr_action = parsetab._lr_action
+ _lr_goto = parsetab._lr_goto
+ _lr_productions = parsetab._lr_productions
+ _lr_method = parsetab._lr_method
+ return 1
+ else:
+ return 0
+
+ except (ImportError,AttributeError):
+ return 0
+
+
+# Available instance types. This is used when parsers are defined by a class.
+# it's a little funky because I want to preserve backwards compatibility
+# with Python 2.0 where types.ObjectType is undefined.
+
+try:
+ _INSTANCETYPE = (types.InstanceType, types.ObjectType)
+except AttributeError:
+ _INSTANCETYPE = types.InstanceType
+
+# -----------------------------------------------------------------------------
+# yacc(module)
+#
+# Build the parser module
+# -----------------------------------------------------------------------------
+
+def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module, start=None, check_recursion=1, optimize=0,write_tables=1,debugfile=debug_file,outputdir=''):
+ global yaccdebug
+ yaccdebug = debug
+
+ initialize_vars()
+ files = { }
+ error = 0
+
+
+ # Add parsing method to signature
+ Signature.update(method)
+
+ # If a "module" parameter was supplied, extract its dictionary.
+ # Note: a module may in fact be an instance as well.
+
+ if module:
+ # User supplied a module object.
+ if isinstance(module, types.ModuleType):
+ ldict = module.__dict__
+ elif isinstance(module, _INSTANCETYPE):
+ _items = [(k,getattr(module,k)) for k in dir(module)]
+ ldict = { }
+ for i in _items:
+ ldict[i[0]] = i[1]
+ else:
+ raise ValueError,"Expected a module"
+
+ else:
+ # No module given. We might be able to get information from the caller.
+ # Throw an exception and unwind the traceback to get the globals
+
+ try:
+ raise RuntimeError
+ except RuntimeError:
+ e,b,t = sys.exc_info()
+ f = t.tb_frame
+ f = f.f_back # Walk out to our calling function
+ ldict = f.f_globals # Grab its globals dictionary
+
+ # Add starting symbol to signature
+ if not start:
+ start = ldict.get("start",None)
+ if start:
+ Signature.update(start)
+
+ # If running in optimized mode. We're going to
+
+ if (optimize and lr_read_tables(tabmodule,1)):
+ # Read parse table
+ del Productions[:]
+ for p in _lr_productions:
+ if not p:
+ Productions.append(None)
+ else:
+ m = MiniProduction()
+ m.name = p[0]
+ m.len = p[1]
+ m.file = p[3]
+ m.line = p[4]
+ if p[2]:
+ m.func = ldict[p[2]]
+ Productions.append(m)
+
+ else:
+ # Get the tokens map
+ if (module and isinstance(module,_INSTANCETYPE)):
+ tokens = getattr(module,"tokens",None)
+ else:
+ tokens = ldict.get("tokens",None)
+
+ if not tokens:
+ raise YaccError,"module does not define a list 'tokens'"
+ if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
+ raise YaccError,"tokens must be a list or tuple."
+
+ # Check to see if a requires dictionary is defined.
+ requires = ldict.get("require",None)
+ if requires:
+ if not (isinstance(requires,types.DictType)):
+ raise YaccError,"require must be a dictionary."
+
+ for r,v in requires.items():
+ try:
+ if not (isinstance(v,types.ListType)):
+ raise TypeError
+ v1 = [x.split(".") for x in v]
+ Requires[r] = v1
+ except StandardError:
+ print "Invalid specification for rule '%s' in require. Expected a list of strings" % r
+
+
+ # Build the dictionary of terminals. We a record a 0 in the
+ # dictionary to track whether or not a terminal is actually
+ # used in the grammar
+
+ if 'error' in tokens:
+ print "yacc: Illegal token 'error'. Is a reserved word."
+ raise YaccError,"Illegal token name"
+
+ for n in tokens:
+ if Terminals.has_key(n):
+ print "yacc: Warning. Token '%s' multiply defined." % n
+ Terminals[n] = [ ]
+
+ Terminals['error'] = [ ]
+
+ # Get the precedence map (if any)
+ prec = ldict.get("precedence",None)
+ if prec:
+ if not (isinstance(prec,types.ListType) or isinstance(prec,types.TupleType)):
+ raise YaccError,"precedence must be a list or tuple."
+ add_precedence(prec)
+ Signature.update(repr(prec))
+
+ for n in tokens:
+ if not Precedence.has_key(n):
+ Precedence[n] = ('right',0) # Default, right associative, 0 precedence
+
+ # Look for error handler
+ ef = ldict.get('p_error',None)
+ if ef:
+ if isinstance(ef,types.FunctionType):
+ ismethod = 0
+ elif isinstance(ef, types.MethodType):
+ ismethod = 1
+ else:
+ raise YaccError,"'p_error' defined, but is not a function or method."
+ eline = ef.func_code.co_firstlineno
+ efile = ef.func_code.co_filename
+ files[efile] = None
+
+ if (ef.func_code.co_argcount != 1+ismethod):
+ raise YaccError,"%s:%d: p_error() requires 1 argument." % (efile,eline)
+ global Errorfunc
+ Errorfunc = ef
+ else:
+ print "yacc: Warning. no p_error() function is defined."
+
+ # Get the list of built-in functions with p_ prefix
+ symbols = [ldict[f] for f in ldict.keys()
+ if (type(ldict[f]) in (types.FunctionType, types.MethodType) and ldict[f].__name__[:2] == 'p_'
+ and ldict[f].__name__ != 'p_error')]
+
+ # Check for non-empty symbols
+ if len(symbols) == 0:
+ raise YaccError,"no rules of the form p_rulename are defined."
+
+ # Sort the symbols by line number
+ symbols.sort(lambda x,y: cmp(x.func_code.co_firstlineno,y.func_code.co_firstlineno))
+
+ # Add all of the symbols to the grammar
+ for f in symbols:
+ if (add_function(f)) < 0:
+ error += 1
+ else:
+ files[f.func_code.co_filename] = None
+
+ # Make a signature of the docstrings
+ for f in symbols:
+ if f.__doc__:
+ Signature.update(f.__doc__)
+
+ lr_init_vars()
+
+ if error:
+ raise YaccError,"Unable to construct parser."
+
+ if not lr_read_tables(tabmodule):
+
+ # Validate files
+ for filename in files.keys():
+ if not validate_file(filename):
+ error = 1
+
+ # Validate dictionary
+ validate_dict(ldict)
+
+ if start and not Prodnames.has_key(start):
+ raise YaccError,"Bad starting symbol '%s'" % start
+
+ augment_grammar(start)
+ error = verify_productions(cycle_check=check_recursion)
+ otherfunc = [ldict[f] for f in ldict.keys()
+ if (type(f) in (types.FunctionType,types.MethodType) and ldict[f].__name__[:2] != 'p_')]
+
+ if error:
+ raise YaccError,"Unable to construct parser."
+
+ build_lritems()
+ compute_first1()
+ compute_follow(start)
+
+ if method in ['SLR','LALR']:
+ lr_parse_table(method)
+ else:
+ raise YaccError, "Unknown parsing method '%s'" % method
+
+ if write_tables:
+ lr_write_tables(tabmodule,outputdir)
+
+ if yaccdebug:
+ try:
+ f = open(os.path.join(outputdir,debugfile),"w")
+ f.write(_vfc.getvalue())
+ f.write("\n\n")
+ f.write(_vf.getvalue())
+ f.close()
+ except IOError,e:
+ print "yacc: can't create '%s'" % debugfile,e
+
+ # Made it here. Create a parser object and set up its internal state.
+ # Set global parse() method to bound method of parser object.
+
+ p = Parser("xyzzy")
+ p.productions = Productions
+ p.errorfunc = Errorfunc
+ p.action = _lr_action
+ p.goto = _lr_goto
+ p.method = _lr_method
+ p.require = Requires
+
+ global parse
+ parse = p.parse
+
+ global parser
+ parser = p
+
+ # Clean up all of the globals we created
+ if (not optimize):
+ yacc_cleanup()
+ return p
+
+# yacc_cleanup function. Delete all of the global variables
+# used during table construction
+
+def yacc_cleanup():
+ global _lr_action, _lr_goto, _lr_method, _lr_goto_cache
+ del _lr_action, _lr_goto, _lr_method, _lr_goto_cache
+
+ global Productions, Prodnames, Prodmap, Terminals
+ global Nonterminals, First, Follow, Precedence, LRitems
+ global Errorfunc, Signature, Requires
+
+ del Productions, Prodnames, Prodmap, Terminals
+ del Nonterminals, First, Follow, Precedence, LRitems
+ del Errorfunc, Signature, Requires
+
+ global _vf, _vfc
+ del _vf, _vfc
+
+
+# Stub that raises an error if parsing is attempted without first calling yacc()
+def parse(*args,**kwargs):
+ raise YaccError, "yacc: No parser built with yacc()"
+
diff --git a/lib/python2.7/site-packages/setoolsgui/sesearch b/lib/python2.7/site-packages/setoolsgui/sesearch
new file mode 100755
index 0000000..e861db6
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/sesearch
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SETools. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import print_function
+import setools
+import argparse
+import sys
+import logging
+
+parser = argparse.ArgumentParser(
+ description="SELinux policy rule search tool.",
+ epilog="TE/MLS rule searches cannot be mixed with RBAC rule searches.")
+parser.add_argument("--version", action="version", version=setools.__version__)
+parser.add_argument("policy", help="Path to the SELinux policy to search.", nargs="?")
+parser.add_argument("-v", "--verbose", action="store_true",
+ help="Print extra informational messages")
+parser.add_argument("--debug", action="store_true", dest="debug", help="Enable debugging.")
+
+rtypes = parser.add_argument_group("TE Rule Types")
+rtypes.add_argument("-A", "--allow", action="append_const",
+ const="allow", dest="tertypes",
+ help="Search allow rules.")
+rtypes.add_argument("--auditallow", action="append_const",
+ const="auditallow", dest="tertypes",
+ help="Search auditallow rules.")
+rtypes.add_argument("--dontaudit", action="append_const",
+ const="dontaudit", dest="tertypes",
+ help="Search dontaudit rules.")
+rtypes.add_argument("-T", "--type_trans", action="append_const",
+ const="type_transition", dest="tertypes",
+ help="Search type_transition rules.")
+rtypes.add_argument("--type_change", action="append_const",
+ const="type_change", dest="tertypes",
+ help="Search type_change rules.")
+rtypes.add_argument("--type_member", action="append_const",
+ const="type_member", dest="tertypes",
+ help="Search type_member rules.")
+
+rbacrtypes = parser.add_argument_group("RBAC Rule Types")
+rbacrtypes.add_argument("--role_allow", action="append_const",
+ const="allow", dest="rbacrtypes",
+ help="Search role allow rules.")
+rbacrtypes.add_argument("--role_trans", action="append_const",
+ const="role_transition", dest="rbacrtypes",
+ help="Search role_transition rules.")
+
+mlsrtypes = parser.add_argument_group("MLS Rule Types")
+mlsrtypes.add_argument("--range_trans", action="append_const",
+ const="range_transition", dest="mlsrtypes",
+ help="Search range_transition rules.")
+
+expr = parser.add_argument_group("Expressions")
+expr.add_argument("-s", "--source",
+ help="Source type/role of the TE/RBAC rule.")
+expr.add_argument("-t", "--target",
+ help="Target type/role of the TE/RBAC rule.")
+expr.add_argument("-c", "--class", dest="tclass",
+ help="Comma separated list of object classes")
+expr.add_argument("-p", "--perms", metavar="PERMS",
+ help="Comma separated list of permissions.")
+expr.add_argument("-D", "--default",
+ help="Default of the rule. (type/role/range transition rules)")
+expr.add_argument("-b", "--bool", dest="boolean", metavar="BOOL",
+ help="Comma separated list of Booleans in the conditional expression.")
+
+opts = parser.add_argument_group("Search options")
+opts.add_argument("-eb", action="store_true", dest="boolean_equal",
+ help="Match Boolean list exactly instead of matching any listed Boolean.")
+opts.add_argument("-ep", action="store_true", dest="perms_equal",
+ help="Match permission set exactly instead of matching any listed permission.")
+opts.add_argument("-ds", action="store_false", dest="source_indirect",
+ help="Match source attributes directly instead of matching member types/roles.")
+opts.add_argument("-dt", action="store_false", dest="target_indirect",
+ help="Match target attributes directly instead of matching member types/roles.")
+opts.add_argument("-rs", action="store_true", dest="source_regex",
+ help="Use regular expression matching for the source type/role.")
+opts.add_argument("-rt", action="store_true", dest="target_regex",
+ help="Use regular expression matching for the target type/role.")
+opts.add_argument("-rc", action="store_true", dest="tclass_regex",
+ help="Use regular expression matching for the object class.")
+opts.add_argument("-rd", action="store_true", dest="default_regex",
+ help="Use regular expression matching for the default type/role.")
+opts.add_argument("-rb", action="store_true", dest="boolean_regex",
+ help="Use regular expression matching for Booleans.")
+
+args = parser.parse_args()
+
+if not args.tertypes and not args.mlsrtypes and not args.rbacrtypes:
+ parser.error("At least one rule type must be specified.")
+
+if args.debug:
+ logging.basicConfig(level=logging.DEBUG,
+ format='%(asctime)s|%(levelname)s|%(name)s|%(message)s')
+elif args.verbose:
+ logging.basicConfig(level=logging.INFO, format='%(message)s')
+else:
+ logging.basicConfig(level=logging.WARNING, format='%(message)s')
+
+try:
+ p = setools.SELinuxPolicy(args.policy)
+
+ if args.tertypes:
+ q = setools.TERuleQuery(p,
+ ruletype=args.tertypes,
+ source=args.source,
+ source_indirect=args.source_indirect,
+ source_regex=args.source_regex,
+ target=args.target,
+ target_indirect=args.target_indirect,
+ target_regex=args.target_regex,
+ tclass_regex=args.tclass_regex,
+ perms_equal=args.perms_equal,
+ default=args.default,
+ default_regex=args.default_regex,
+ boolean_regex=args.boolean_regex,
+ boolean_equal=args.boolean_equal)
+
+ # these are broken out from the above statement to prevent making a list
+ # with an empty string in it (split on empty string)
+ if args.tclass:
+ if args.tclass_regex:
+ q.tclass = args.tclass
+ else:
+ q.tclass = args.tclass.split(",")
+
+ if args.perms:
+ q.perms = args.perms.split(",")
+
+ if args.boolean:
+ if args.boolean_regex:
+ q.boolean = args.boolean
+ else:
+ q.boolean = args.boolean.split(",")
+
+ for r in sorted(q.results()):
+ print(r)
+
+ if args.rbacrtypes:
+ q = setools.RBACRuleQuery(p,
+ ruletype=args.rbacrtypes,
+ source=args.source,
+ source_indirect=args.source_indirect,
+ source_regex=args.source_regex,
+ target=args.target,
+ target_indirect=args.target_indirect,
+ target_regex=args.target_regex,
+ default=args.default,
+ default_regex=args.default_regex,
+ tclass_regex=args.tclass_regex)
+
+ # these are broken out from the above statement to prevent making a list
+ # with an empty string in it (split on empty string)
+ if args.tclass:
+ if args.tclass_regex:
+ q.tclass = args.tclass
+ else:
+ q.tclass = args.tclass.split(",")
+
+ for r in sorted(q.results()):
+ print(r)
+
+ if args.mlsrtypes:
+ q = setools.MLSRuleQuery(p,
+ ruletype=args.mlsrtypes,
+ source=args.source,
+ source_regex=args.source_regex,
+ target=args.target,
+ target_regex=args.target_regex,
+ tclass_regex=args.tclass_regex,
+ default=args.default)
+
+ # these are broken out from the above statement to prevent making a list
+ # with an empty string in it (split on empty string)
+ if args.tclass:
+ if args.tclass_regex:
+ q.tclass = args.tclass
+ else:
+ q.tclass = args.tclass.split(",")
+
+ for r in sorted(q.results()):
+ print(r)
+
+except Exception as err:
+ if args.debug:
+ import traceback
+ traceback.print_exc()
+ else:
+ print(err)
+
+ sys.exit(-1)
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/__init__.py b/lib/python2.7/site-packages/setoolsgui/setools/__init__.py
new file mode 100644
index 0000000..4d03553
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/__init__.py
@@ -0,0 +1,68 @@
+"""The SETools SELinux policy analysis library."""
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+#try:
+# import pkg_resources
+# # pylint: disable=no-member
+# __version__ = pkg_resources.get_distribution("setools").version
+#except ImportError: # pragma: no cover
+# __version__ = "unknown"
+__version__ = "3.3.8"
+
+# Python classes for policy representation
+from . import policyrep
+from .policyrep import SELinuxPolicy
+
+# Exceptions
+from . import exception
+
+# Component Queries
+from .boolquery import BoolQuery
+from .categoryquery import CategoryQuery
+from .commonquery import CommonQuery
+from .objclassquery import ObjClassQuery
+from .polcapquery import PolCapQuery
+from .rolequery import RoleQuery
+from .sensitivityquery import SensitivityQuery
+from .typequery import TypeQuery
+from .typeattrquery import TypeAttributeQuery
+from .userquery import UserQuery
+
+# Rule Queries
+from .mlsrulequery import MLSRuleQuery
+from .rbacrulequery import RBACRuleQuery
+from .terulequery import TERuleQuery
+
+# Constraint queries
+from .constraintquery import ConstraintQuery
+
+# In-policy Context Queries
+from .fsusequery import FSUseQuery
+from .genfsconquery import GenfsconQuery
+from .initsidquery import InitialSIDQuery
+from .netifconquery import NetifconQuery
+from .nodeconquery import NodeconQuery
+from .portconquery import PortconQuery
+
+# Information Flow Analysis
+from .infoflow import InfoFlowAnalysis
+from .permmap import PermissionMap
+
+# Domain Transition Analysis
+from .dta import DomainTransitionAnalysis
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/boolquery.py b/lib/python2.7/site-packages/setoolsgui/setools/boolquery.py
new file mode 100644
index 0000000..b70b7d5
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/boolquery.py
@@ -0,0 +1,66 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+
+from . import compquery
+from .descriptors import CriteriaDescriptor
+
+
+class BoolQuery(compquery.ComponentQuery):
+
+ """Query SELinux policy Booleans.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The Boolean name to match.
+ name_regex If true, regular expression matching
+ will be used on the Boolean name.
+ default The default state to match. If this
+ is None, the default state not be matched.
+ """
+
+ _default = None
+
+ @property
+ def default(self):
+ return self._default
+
+ @default.setter
+ def default(self, value):
+ if value is None:
+ self._default = None
+ else:
+ self._default = bool(value)
+
+ def results(self):
+ """Generator which yields all Booleans matching the criteria."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("Default: {0.default}".format(self))
+
+ for boolean in self.policy.bools():
+ if not self._match_name(boolean):
+ continue
+
+ if self.default is not None and boolean.state != self.default:
+ continue
+
+ yield boolean
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/categoryquery.py b/lib/python2.7/site-packages/setoolsgui/setools/categoryquery.py
new file mode 100644
index 0000000..d4d7c4c
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/categoryquery.py
@@ -0,0 +1,55 @@
+# Copyright 2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+
+from . import compquery
+from . import mixins
+
+
+class CategoryQuery(mixins.MatchAlias, compquery.ComponentQuery):
+
+ """
+ Query MLS Categories
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The name of the category to match.
+ name_regex If true, regular expression matching will
+ be used for matching the name.
+ alias The alias name to match.
+ alias_regex If true, regular expression matching
+ will be used on the alias names.
+ """
+
+ def results(self):
+ """Generator which yields all matching categories."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("Alias: {0.alias}, regex: {0.alias_regex}".format(self))
+
+ for cat in self.policy.categories():
+ if not self._match_name(cat):
+ continue
+
+ if not self._match_alias(cat):
+ continue
+
+ yield cat
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/commonquery.py b/lib/python2.7/site-packages/setoolsgui/setools/commonquery.py
new file mode 100644
index 0000000..e105ccb
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/commonquery.py
@@ -0,0 +1,60 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import compquery, mixins
+
+
+class CommonQuery(mixins.MatchPermission, compquery.ComponentQuery):
+
+ """
+ Query common permission sets.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The name of the common to match.
+ name_regex If true, regular expression matching will
+ be used for matching the name.
+ perms The permissions to match.
+ perms_equal If true, only commons with permission sets
+ that are equal to the criteria will
+ match. Otherwise, any intersection
+ will match.
+ perms_regex If true, regular expression matching will be used
+ on the permission names instead of set logic.
+ """
+
+ def results(self):
+ """Generator which yields all matching commons."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("Perms: {0.perms!r}, regex: {0.perms_regex}, eq: {0.perms_equal}".
+ format(self))
+
+ for com in self.policy.commons():
+ if not self._match_name(com):
+ continue
+
+ if not self._match_perms(com):
+ continue
+
+ yield com
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/compquery.py b/lib/python2.7/site-packages/setoolsgui/setools/compquery.py
new file mode 100644
index 0000000..3d8851a
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/compquery.py
@@ -0,0 +1,39 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+# pylint: disable=no-member,attribute-defined-outside-init,abstract-method
+import re
+
+from . import query
+from .descriptors import CriteriaDescriptor
+
+
+class ComponentQuery(query.PolicyQuery):
+
+ """Base class for SETools component queries."""
+
+ name = CriteriaDescriptor("name_regex")
+ name_regex = False
+
+ def _match_name(self, obj):
+ """Match the object to the name criteria."""
+ if not self.name:
+ # if there is no criteria, everything matches.
+ return True
+
+ return self._match_regex(obj, self.name, self.name_regex)
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/constraintquery.py b/lib/python2.7/site-packages/setoolsgui/setools/constraintquery.py
new file mode 100644
index 0000000..82a6fc2
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/constraintquery.py
@@ -0,0 +1,142 @@
+# Copyright 2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import mixins, query
+from .descriptors import CriteriaDescriptor, CriteriaSetDescriptor, RuletypeDescriptor
+from .policyrep.exception import ConstraintUseError
+
+
+class ConstraintQuery(mixins.MatchObjClass, mixins.MatchPermission, query.PolicyQuery):
+
+ """
+ Query constraint rules, (mls)constrain/(mls)validatetrans.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ ruletype The list of rule type(s) to match.
+ tclass The object class(es) to match.
+ tclass_regex If true, use a regular expression for
+ matching the rule's object class.
+ perms The permission(s) to match.
+ perms_equal If true, the permission set of the rule
+ must exactly match the permissions
+ criteria. If false, any set intersection
+ will match.
+ perms_regex If true, regular expression matching will be used
+ on the permission names instead of set logic.
+ role The name of the role to match in the
+ constraint expression.
+ role_indirect If true, members of an attribute will be
+ matched rather than the attribute itself.
+ role_regex If true, regular expression matching will
+ be used on the role.
+ type_ The name of the type/attribute to match in the
+ constraint expression.
+ type_indirect If true, members of an attribute will be
+ matched rather than the attribute itself.
+ type_regex If true, regular expression matching will
+ be used on the type/attribute.
+ user The name of the user to match in the
+ constraint expression.
+ user_regex If true, regular expression matching will
+ be used on the user.
+ """
+
+ ruletype = RuletypeDescriptor("validate_constraint_ruletype")
+ user = CriteriaDescriptor("user_regex", "lookup_user")
+ user_regex = False
+ role = CriteriaDescriptor("role_regex", "lookup_role")
+ role_regex = False
+ role_indirect = True
+ type_ = CriteriaDescriptor("type_regex", "lookup_type_or_attr")
+ type_regex = False
+ type_indirect = True
+
+ def _match_expr(self, expr, criteria, indirect, regex):
+ """
+ Match roles/types/users in a constraint expression,
+ optionally by expanding the contents of attributes.
+
+ Parameters:
+ expr The expression to match.
+ criteria The criteria to match.
+ indirect If attributes in the expression should be expanded.
+ regex If regular expression matching should be used.
+ """
+
+ if indirect:
+ obj = set()
+ for item in expr:
+ obj.update(item.expand())
+ else:
+ obj = expr
+
+ return self._match_in_set(obj, criteria, regex)
+
+ def results(self):
+ """Generator which yields all matching constraints rules."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Ruletypes: {0.ruletype}".format(self))
+ self.log.debug("Class: {0.tclass!r}, regex: {0.tclass_regex}".format(self))
+ self.log.debug("Perms: {0.perms!r}, regex: {0.perms_regex}, eq: {0.perms_equal}".
+ format(self))
+ self.log.debug("User: {0.user!r}, regex: {0.user_regex}".format(self))
+ self.log.debug("Role: {0.role!r}, regex: {0.role_regex}".format(self))
+ self.log.debug("Type: {0.type_!r}, regex: {0.type_regex}".format(self))
+
+ for c in self.policy.constraints():
+ if self.ruletype:
+ if c.ruletype not in self.ruletype:
+ continue
+
+ if not self._match_object_class(c):
+ continue
+
+ try:
+ if not self._match_perms(c):
+ continue
+ except ConstraintUseError:
+ continue
+
+ if self.role and not self._match_expr(
+ c.roles,
+ self.role,
+ self.role_indirect,
+ self.role_regex):
+ continue
+
+ if self.type_ and not self._match_expr(
+ c.types,
+ self.type_,
+ self.type_indirect,
+ self.type_regex):
+ continue
+
+ if self.user and not self._match_expr(
+ c.users,
+ self.user,
+ False,
+ self.user_regex):
+ continue
+
+ yield c
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/contextquery.py b/lib/python2.7/site-packages/setoolsgui/setools/contextquery.py
new file mode 100644
index 0000000..5ce1632
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/contextquery.py
@@ -0,0 +1,98 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+# pylint: disable=attribute-defined-outside-init,no-member
+import re
+
+from . import query
+from .descriptors import CriteriaDescriptor
+
+
+class ContextQuery(query.PolicyQuery):
+
+ """
+ Base class for SETools in-policy labeling/context queries.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ context The object to match.
+ user The user to match in the context.
+ user_regex If true, regular expression matching
+ will be used on the user.
+ role The role to match in the context.
+ role_regex If true, regular expression matching
+ will be used on the role.
+ type_ The type to match in the context.
+ type_regex If true, regular expression matching
+ will be used on the type.
+ range_ The range to match in the context.
+ range_subset If true, the criteria will match if it
+ is a subset of the context's range.
+ range_overlap If true, the criteria will match if it
+ overlaps any of the context's range.
+ range_superset If true, the criteria will match if it
+ is a superset of the context's range.
+ range_proper If true, use proper superset/subset
+ on range matching operations.
+ No effect if not using set operations.
+ """
+
+ user = CriteriaDescriptor("user_regex", "lookup_user")
+ user_regex = False
+ role = CriteriaDescriptor("role_regex", "lookup_role")
+ role_regex = False
+ type_ = CriteriaDescriptor("type_regex", "lookup_type")
+ type_regex = False
+ range_ = CriteriaDescriptor(lookup_function="lookup_range")
+ range_overlap = False
+ range_subset = False
+ range_superset = False
+ range_proper = False
+
+ def _match_context(self, context):
+
+ if self.user and not query.PolicyQuery._match_regex(
+ context.user,
+ self.user,
+ self.user_regex):
+ return False
+
+ if self.role and not query.PolicyQuery._match_regex(
+ context.role,
+ self.role,
+ self.role_regex):
+ return False
+
+ if self.type_ and not query.PolicyQuery._match_regex(
+ context.type_,
+ self.type_,
+ self.type_regex):
+ return False
+
+ if self.range_ and not query.PolicyQuery._match_range(
+ context.range_,
+ self.range_,
+ self.range_subset,
+ self.range_overlap,
+ self.range_superset,
+ self.range_proper):
+ return False
+
+ return True
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/descriptors.py b/lib/python2.7/site-packages/setoolsgui/setools/descriptors.py
new file mode 100644
index 0000000..eab9210
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/descriptors.py
@@ -0,0 +1,230 @@
+# Copyright 2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+"""
+SETools descriptors.
+
+These classes override how a class's attributes are get/set/deleted.
+This is how the @property decorator works.
+
+See https://docs.python.org/3/howto/descriptor.html
+for more details.
+"""
+
+import re
+from collections import defaultdict
+from weakref import WeakKeyDictionary
+
+#
+# Query criteria descriptors
+#
+# Implementation note: if the name_regex attribute value
+# is changed the criteria must be reset.
+#
+
+
+class CriteriaDescriptor(object):
+
+ """
+ Single item criteria descriptor.
+
+ Parameters:
+ name_regex The name of instance's regex setting attribute;
+ used as name_regex below. If unset,
+ regular expressions will never be used.
+ lookup_function The name of the SELinuxPolicy lookup function,
+ e.g. lookup_type or lookup_boolean.
+ default_value The default value of the criteria. The default
+ is None.
+
+ Read-only instance attribute use (obj parameter):
+ policy The instance of SELinuxPolicy
+ name_regex This attribute is read to determine if
+ the criteria should be looked up or
+ compiled into a regex. If the attribute
+ does not exist, False is assumed.
+ """
+
+ def __init__(self, name_regex=None, lookup_function=None, default_value=None):
+ assert name_regex or lookup_function, "A simple attribute should be used if there is " \
+ "no regex nor lookup function."
+ self.regex = name_regex
+ self.default_value = default_value
+ self.lookup_function = lookup_function
+
+ # use weak references so instances can be
+ # garbage collected, rather than unnecessarily
+ # kept around due to this descriptor.
+ self.instances = WeakKeyDictionary()
+
+ def __get__(self, obj, objtype=None):
+ if obj is None:
+ return self
+
+ return self.instances.setdefault(obj, self.default_value)
+
+ def __set__(self, obj, value):
+ if not value:
+ self.instances[obj] = None
+ elif self.regex and getattr(obj, self.regex, False):
+ self.instances[obj] = re.compile(value)
+ elif self.lookup_function:
+ lookup = getattr(obj.policy, self.lookup_function)
+ self.instances[obj] = lookup(value)
+ else:
+ self.instances[obj] = value
+
+
+class CriteriaSetDescriptor(CriteriaDescriptor):
+
+ """Descriptor for a set of criteria."""
+
+ def __set__(self, obj, value):
+ if not value:
+ self.instances[obj] = None
+ elif self.regex and getattr(obj, self.regex, False):
+ self.instances[obj] = re.compile(value)
+ elif self.lookup_function:
+ lookup = getattr(obj.policy, self.lookup_function)
+ self.instances[obj] = set(lookup(v) for v in value)
+ else:
+ self.instances[obj] = set(value)
+
+
+class RuletypeDescriptor(object):
+
+ """
+ Descriptor for a list of rule types.
+
+ Parameters:
+ validator The name of the SELinuxPolicy ruletype
+ validator function, e.g. validate_te_ruletype
+ default_value The default value of the criteria. The default
+ is None.
+
+ Read-only instance attribute use (obj parameter):
+ policy The instance of SELinuxPolicy
+ """
+
+ def __init__(self, validator):
+ self.validator = validator
+
+ # use weak references so instances can be
+ # garbage collected, rather than unnecessarily
+ # kept around due to this descriptor.
+ self.instances = WeakKeyDictionary()
+
+ def __get__(self, obj, objtype=None):
+ if obj is None:
+ return self
+
+ return self.instances.setdefault(obj, None)
+
+ def __set__(self, obj, value):
+ if value:
+ validate = getattr(obj.policy, self.validator)
+ validate(value)
+ self.instances[obj] = value
+ else:
+ self.instances[obj] = None
+
+
+#
+# NetworkX Graph Descriptors
+#
+# These descriptors are used to simplify all
+# of the dictionary use in the NetworkX graph.
+#
+
+
+class NetworkXGraphEdgeDescriptor(object):
+
+ """
+ Descriptor base class for NetworkX graph edge attributes.
+
+ Parameter:
+ name The edge property name
+
+ Instance class attribute use (obj parameter):
+ G The NetworkX graph
+ source The edge's source node
+ target The edge's target node
+ """
+
+ def __init__(self, propname):
+ self.name = propname
+
+ def __get__(self, obj, objtype=None):
+ if obj is None:
+ return self
+
+ return obj.G[obj.source][obj.target][self.name]
+
+ def __set__(self, obj, value):
+ raise NotImplementedError
+
+ def __delete__(self, obj):
+ raise NotImplementedError
+
+
+class EdgeAttrDict(NetworkXGraphEdgeDescriptor):
+
+ """A descriptor for edge attributes that are dictionaries."""
+
+ def __set__(self, obj, value):
+ # None is a special value to initialize the attribute
+ if value is None:
+ obj.G[obj.source][obj.target][self.name] = defaultdict(list)
+ else:
+ raise ValueError("{0} dictionaries should not be assigned directly".format(self.name))
+
+ def __delete__(self, obj):
+ obj.G[obj.source][obj.target][self.name].clear()
+
+
+class EdgeAttrIntMax(NetworkXGraphEdgeDescriptor):
+
+ """
+ A descriptor for edge attributes that are non-negative integers that always
+ keep the max assigned value until re-initialized.
+ """
+
+ def __set__(self, obj, value):
+ # None is a special value to initialize
+ if value is None:
+ obj.G[obj.source][obj.target][self.name] = 0
+ else:
+ current_value = obj.G[obj.source][obj.target][self.name]
+ obj.G[obj.source][obj.target][self.name] = max(current_value, value)
+
+
+class EdgeAttrList(NetworkXGraphEdgeDescriptor):
+
+ """A descriptor for edge attributes that are lists."""
+
+ def __set__(self, obj, value):
+ # None is a special value to initialize
+ if value is None:
+ obj.G[obj.source][obj.target][self.name] = []
+ else:
+ raise ValueError("{0} lists should not be assigned directly".format(self.name))
+
+ def __delete__(self, obj):
+ # in Python3 a .clear() function was added for lists
+ # keep this implementation for Python 2 compat
+ del obj.G[obj.source][obj.target][self.name][:]
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/dta.py b/lib/python2.7/site-packages/setoolsgui/setools/dta.py
new file mode 100644
index 0000000..271efc4
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/dta.py
@@ -0,0 +1,603 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import itertools
+import logging
+from collections import defaultdict, namedtuple
+
+import networkx as nx
+from networkx.exception import NetworkXError, NetworkXNoPath
+
+from .descriptors import EdgeAttrDict, EdgeAttrList
+
+__all__ = ['DomainTransitionAnalysis']
+
+# Return values for the analysis
+# are in the following tuple formats:
+step_output = namedtuple("step", ["source",
+ "target",
+ "transition",
+ "entrypoints",
+ "setexec",
+ "dyntransition",
+ "setcurrent"])
+
+entrypoint_output = namedtuple("entrypoints", ["name",
+ "entrypoint",
+ "execute",
+ "type_transition"])
+
+
+class DomainTransitionAnalysis(object):
+
+ """Domain transition analysis."""
+
+ def __init__(self, policy, reverse=False, exclude=None):
+ """
+ Parameter:
+ policy The policy to analyze.
+ """
+ self.log = logging.getLogger(self.__class__.__name__)
+
+ self.policy = policy
+ self.exclude = exclude
+ self.reverse = reverse
+ self.rebuildgraph = True
+ self.rebuildsubgraph = True
+ self.G = nx.DiGraph()
+ self.subG = None
+
+ @property
+ def reverse(self):
+ return self._reverse
+
+ @reverse.setter
+ def reverse(self, direction):
+ self._reverse = bool(direction)
+ self.rebuildsubgraph = True
+
+ @property
+ def exclude(self):
+ return self._exclude
+
+ @exclude.setter
+ def exclude(self, types):
+ if types:
+ self._exclude = [self.policy.lookup_type(t) for t in types]
+ else:
+ self._exclude = None
+
+ self.rebuildsubgraph = True
+
+ def shortest_path(self, source, target):
+ """
+ Generator which yields one shortest domain transition path
+ between the source and target types (there may be more).
+
+ Parameters:
+ source The source type.
+ target The target type.
+
+ Yield: generator(steps)
+
+ steps A generator that returns the tuple of
+ source, target, and rules for each
+ domain transition.
+ """
+ s = self.policy.lookup_type(source)
+ t = self.policy.lookup_type(target)
+
+ if self.rebuildsubgraph:
+ self._build_subgraph()
+
+ self.log.info("Generating one shortest path from {0} to {1}...".format(s, t))
+
+ try:
+ yield self.__generate_steps(nx.shortest_path(self.subG, s, t))
+ except (NetworkXNoPath, NetworkXError):
+ # NetworkXError: the type is valid but not in graph, e.g. excluded
+ # NetworkXNoPath: no paths or the target type is
+ # not in the graph
+ pass
+
+ def all_paths(self, source, target, maxlen=2):
+ """
+ Generator which yields all domain transition paths between
+ the source and target up to the specified maximum path
+ length.
+
+ Parameters:
+ source The source type.
+ target The target type.
+ maxlen Maximum length of paths.
+
+ Yield: generator(steps)
+
+ steps A generator that returns the tuple of
+ source, target, and rules for each
+ domain transition.
+ """
+ if maxlen < 1:
+ raise ValueError("Maximum path length must be positive.")
+
+ s = self.policy.lookup_type(source)
+ t = self.policy.lookup_type(target)
+
+ if self.rebuildsubgraph:
+ self._build_subgraph()
+
+ self.log.info("Generating all paths from {0} to {1}, max len {2}...".format(s, t, maxlen))
+
+ try:
+ for path in nx.all_simple_paths(self.subG, s, t, maxlen):
+ yield self.__generate_steps(path)
+ except (NetworkXNoPath, NetworkXError):
+ # NetworkXError: the type is valid but not in graph, e.g. excluded
+ # NetworkXNoPath: no paths or the target type is
+ # not in the graph
+ pass
+
+ def all_shortest_paths(self, source, target):
+ """
+ Generator which yields all shortest domain transition paths
+ between the source and target types.
+
+ Parameters:
+ source The source type.
+ target The target type.
+
+ Yield: generator(steps)
+
+ steps A generator that returns the tuple of
+ source, target, and rules for each
+ domain transition.
+ """
+ s = self.policy.lookup_type(source)
+ t = self.policy.lookup_type(target)
+
+ if self.rebuildsubgraph:
+ self._build_subgraph()
+
+ self.log.info("Generating all shortest paths from {0} to {1}...".format(s, t))
+
+ try:
+ for path in nx.all_shortest_paths(self.subG, s, t):
+ yield self.__generate_steps(path)
+ except (NetworkXNoPath, NetworkXError, KeyError):
+ # NetworkXError: the type is valid but not in graph, e.g. excluded
+ # NetworkXNoPath: no paths or the target type is
+ # not in the graph
+ # KeyError: work around NetworkX bug
+ # when the source node is not in the graph
+ pass
+
+ def transitions(self, type_):
+ """
+ Generator which yields all domain transitions out of a
+ specified source type.
+
+ Parameters:
+ type_ The starting type.
+
+ Yield: generator(steps)
+
+ steps A generator that returns the tuple of
+ source, target, and rules for each
+ domain transition.
+ """
+ s = self.policy.lookup_type(type_)
+
+ if self.rebuildsubgraph:
+ self._build_subgraph()
+
+ self.log.info("Generating all transitions {1} {0}".
+ format(s, "in to" if self.reverse else "out from"))
+
+ try:
+ for source, target in self.subG.out_edges_iter(s):
+ edge = Edge(self.subG, source, target)
+
+ if self.reverse:
+ real_source, real_target = target, source
+ else:
+ real_source, real_target = source, target
+
+ yield step_output(real_source, real_target,
+ edge.transition,
+ self.__generate_entrypoints(edge),
+ edge.setexec,
+ edge.dyntransition,
+ edge.setcurrent)
+
+ except NetworkXError:
+ # NetworkXError: the type is valid but not in graph, e.g. excluded
+ pass
+
+ def get_stats(self): # pragma: no cover
+ """
+ Get the domain transition graph statistics.
+
+ Return: tuple(nodes, edges)
+
+ nodes The number of nodes (types) in the graph.
+ edges The number of edges (domain transitions) in the graph.
+ """
+ return (self.G.number_of_nodes(), self.G.number_of_edges())
+
+ #
+ # Internal functions follow
+ #
+ @staticmethod
+ def __generate_entrypoints(edge):
+ """
+ Generator which yields the entrypoint, execute, and
+ type_transition rules for each entrypoint.
+
+ Parameter:
+ data The dictionary of entrypoints.
+
+ Yield: tuple(type, entry, exec, trans)
+
+ type The entrypoint type.
+ entry The list of entrypoint rules.
+ exec The list of execute rules.
+ trans The list of type_transition rules.
+ """
+ for e in edge.entrypoint:
+ yield entrypoint_output(e, edge.entrypoint[e], edge.execute[e], edge.type_transition[e])
+
+ def __generate_steps(self, path):
+ """
+ Generator which yields the source, target, and associated rules
+ for each domain transition.
+
+ Parameter:
+ path A list of graph node names representing an information flow path.
+
+ Yield: tuple(source, target, transition, entrypoints,
+ setexec, dyntransition, setcurrent)
+
+ source The source type for this step of the domain transition.
+ target The target type for this step of the domain transition.
+ transition The list of transition rules.
+ entrypoints Generator which yields entrypoint-related rules.
+ setexec The list of setexec rules.
+ dyntranstion The list of dynamic transition rules.
+ setcurrent The list of setcurrent rules.
+ """
+
+ for s in range(1, len(path)):
+ source = path[s - 1]
+ target = path[s]
+ edge = Edge(self.subG, source, target)
+
+ # Yield the actual source and target.
+ # The above perspective is reversed
+ # if the graph has been reversed.
+ if self.reverse:
+ real_source, real_target = target, source
+ else:
+ real_source, real_target = source, target
+
+ yield step_output(real_source, real_target,
+ edge.transition,
+ self.__generate_entrypoints(edge),
+ edge.setexec,
+ edge.dyntransition,
+ edge.setcurrent)
+
+ #
+ # Graph building functions
+ #
+
+ # Domain transition requirements:
+ #
+ # Standard transitions a->b:
+ # allow a b:process transition;
+ # allow a b_exec:file execute;
+ # allow b b_exec:file entrypoint;
+ #
+ # and at least one of:
+ # allow a self:process setexec;
+ # type_transition a b_exec:process b;
+ #
+ # Dynamic transition x->y:
+ # allow x y:process dyntransition;
+ # allow x self:process setcurrent;
+ #
+ # Algorithm summary:
+ # 1. iterate over all rules
+ # 1. skip non allow/type_transition rules
+ # 2. if process transition or dyntransition, create edge,
+ # initialize rule lists, add the (dyn)transition rule
+ # 3. if process setexec or setcurrent, add to appropriate dict
+ # keyed on the subject
+ # 4. if file exec, entrypoint, or type_transition:process,
+ # add to appropriate dict keyed on subject,object.
+ # 2. Iterate over all graph edges:
+ # 1. if there is a transition rule (else add to invalid
+ # transition list):
+ # 1. use set intersection to find matching exec
+ # and entrypoint rules. If none, add to invalid
+ # transition list.
+ # 2. for each valid entrypoint, add rules to the
+ # edge's lists if there is either a
+ # type_transition for it or the source process
+ # has setexec permissions.
+ # 3. If there are neither type_transitions nor
+ # setexec permissions, add to the invalid
+ # transition list
+ # 2. if there is a dyntransition rule (else add to invalid
+ # dyntrans list):
+ # 1. If the source has a setcurrent rule, add it
+ # to the edge's list, else add to invalid
+ # dyntransition list.
+ # 3. Iterate over all graph edges:
+ # 1. if the edge has an invalid trans and dyntrans, delete
+ # the edge.
+ # 2. if the edge has an invalid trans, clear the related
+ # lists on the edge.
+ # 3. if the edge has an invalid dyntrans, clear the related
+ # lists on the edge.
+ #
+ def _build_graph(self):
+ self.G.clear()
+
+ self.log.info("Building graph from {0}...".format(self.policy))
+
+ # hash tables keyed on domain type
+ setexec = defaultdict(list)
+ setcurrent = defaultdict(list)
+
+ # hash tables keyed on (domain, entrypoint file type)
+ # the parameter for defaultdict has to be callable
+ # hence the lambda for the nested defaultdict
+ execute = defaultdict(lambda: defaultdict(list))
+ entrypoint = defaultdict(lambda: defaultdict(list))
+
+ # hash table keyed on (domain, entrypoint, target domain)
+ type_trans = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
+
+ for rule in self.policy.terules():
+ if rule.ruletype == "allow":
+ if rule.tclass not in ["process", "file"]:
+ continue
+
+ perms = rule.perms
+
+ if rule.tclass == "process":
+ if "transition" in perms:
+ for s, t in itertools.product(rule.source.expand(), rule.target.expand()):
+ # only add edges if they actually
+ # transition to a new type
+ if s != t:
+ edge = Edge(self.G, s, t, create=True)
+ edge.transition.append(rule)
+
+ if "dyntransition" in perms:
+ for s, t in itertools.product(rule.source.expand(), rule.target.expand()):
+ # only add edges if they actually
+ # transition to a new type
+ if s != t:
+ e = Edge(self.G, s, t, create=True)
+ e.dyntransition.append(rule)
+
+ if "setexec" in perms:
+ for s in rule.source.expand():
+ setexec[s].append(rule)
+
+ if "setcurrent" in perms:
+ for s in rule.source.expand():
+ setcurrent[s].append(rule)
+
+ else:
+ if "execute" in perms:
+ for s, t in itertools.product(
+ rule.source.expand(),
+ rule.target.expand()):
+ execute[s][t].append(rule)
+
+ if "entrypoint" in perms:
+ for s, t in itertools.product(rule.source.expand(), rule.target.expand()):
+ entrypoint[s][t].append(rule)
+
+ elif rule.ruletype == "type_transition":
+ if rule.tclass != "process":
+ continue
+
+ d = rule.default
+ for s, t in itertools.product(rule.source.expand(), rule.target.expand()):
+ type_trans[s][t][d].append(rule)
+
+ invalid_edge = []
+ clear_transition = []
+ clear_dyntransition = []
+
+ for s, t in self.G.edges_iter():
+ edge = Edge(self.G, s, t)
+ invalid_trans = False
+ invalid_dyntrans = False
+
+ if edge.transition:
+ # get matching domain exec w/entrypoint type
+ entry = set(entrypoint[t].keys())
+ exe = set(execute[s].keys())
+ match = entry.intersection(exe)
+
+ if not match:
+ # there are no valid entrypoints
+ invalid_trans = True
+ else:
+ # TODO try to improve the
+ # efficiency in this loop
+ for m in match:
+ if s in setexec or type_trans[s][m]:
+ # add key for each entrypoint
+ edge.entrypoint[m] += entrypoint[t][m]
+ edge.execute[m] += execute[s][m]
+
+ if type_trans[s][m][t]:
+ edge.type_transition[m] += type_trans[s][m][t]
+
+ if s in setexec:
+ edge.setexec.extend(setexec[s])
+
+ if not edge.setexec and not edge.type_transition:
+ invalid_trans = True
+ else:
+ invalid_trans = True
+
+ if edge.dyntransition:
+ if s in setcurrent:
+ edge.setcurrent.extend(setcurrent[s])
+ else:
+ invalid_dyntrans = True
+ else:
+ invalid_dyntrans = True
+
+ # cannot change the edges while iterating over them,
+ # so keep appropriate lists
+ if invalid_trans and invalid_dyntrans:
+ invalid_edge.append(edge)
+ elif invalid_trans:
+ clear_transition.append(edge)
+ elif invalid_dyntrans:
+ clear_dyntransition.append(edge)
+
+ # Remove invalid transitions
+ self.G.remove_edges_from(invalid_edge)
+ for edge in clear_transition:
+ # if only the regular transition is invalid,
+ # clear the relevant lists
+ del edge.transition
+ del edge.execute
+ del edge.entrypoint
+ del edge.type_transition
+ del edge.setexec
+ for edge in clear_dyntransition:
+ # if only the dynamic transition is invalid,
+ # clear the relevant lists
+ del edge.dyntransition
+ del edge.setcurrent
+
+ self.rebuildgraph = False
+ self.rebuildsubgraph = True
+ self.log.info("Completed building graph.")
+
+ def __remove_excluded_entrypoints(self):
+ invalid_edges = []
+ for source, target in self.subG.edges_iter():
+ edge = Edge(self.subG, source, target)
+ entrypoints = set(edge.entrypoint)
+ entrypoints.intersection_update(self.exclude)
+
+ if not entrypoints:
+ # short circuit if there are no
+ # excluded entrypoint types on
+ # this edge.
+ continue
+
+ for e in entrypoints:
+ # clear the entrypoint data
+ del edge.entrypoint[e]
+ del edge.execute[e]
+
+ try:
+ del edge.type_transition[e]
+ except KeyError: # setexec
+ pass
+
+ # cannot delete the edges while iterating over them
+ if not edge.entrypoint and not edge.dyntransition:
+ invalid_edges.append(edge)
+
+ self.subG.remove_edges_from(invalid_edges)
+
+ def _build_subgraph(self):
+ if self.rebuildgraph:
+ self._build_graph()
+
+ self.log.info("Building subgraph.")
+ self.log.debug("Excluding {0}".format(self.exclude))
+ self.log.debug("Reverse {0}".format(self.reverse))
+
+ # reverse graph for reverse DTA
+ if self.reverse:
+ self.subG = self.G.reverse(copy=True)
+ else:
+ self.subG = self.G.copy()
+
+ if self.exclude:
+ # delete excluded domains from subgraph
+ self.subG.remove_nodes_from(self.exclude)
+
+ # delete excluded entrypoints from subgraph
+ self.__remove_excluded_entrypoints()
+
+ self.rebuildsubgraph = False
+ self.log.info("Completed building subgraph.")
+
+
+class Edge(object):
+
+ """
+ A graph edge. Also used for returning domain transition steps.
+
+ Parameters:
+ source The source type of the edge.
+ target The target tyep of the edge.
+
+ Keyword Parameters:
+ create (T/F) create the edge if it does not exist.
+ The default is False.
+ """
+
+ transition = EdgeAttrList('transition')
+ setexec = EdgeAttrList('setexec')
+ dyntransition = EdgeAttrList('dyntransition')
+ setcurrent = EdgeAttrList('setcurrent')
+ entrypoint = EdgeAttrDict('entrypoint')
+ execute = EdgeAttrDict('execute')
+ type_transition = EdgeAttrDict('type_transition')
+
+ def __init__(self, graph, source, target, create=False):
+ self.G = graph
+ self.source = source
+ self.target = target
+
+ # a bit of a hack to make Edges work
+ # in NetworkX functions that work on
+ # 2-tuples of (source, target)
+ # (see __getitem__ below)
+ self.st_tuple = (source, target)
+
+ if not self.G.has_edge(source, target):
+ if not create:
+ raise ValueError("Edge does not exist in graph")
+ else:
+ self.G.add_edge(source, target)
+ self.transition = None
+ self.entrypoint = None
+ self.execute = None
+ self.type_transition = None
+ self.setexec = None
+ self.dyntransition = None
+ self.setcurrent = None
+
+ def __getitem__(self, key):
+ return self.st_tuple[key]
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/exception.py b/lib/python2.7/site-packages/setoolsgui/setools/exception.py
new file mode 100644
index 0000000..c3505cd
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/exception.py
@@ -0,0 +1,62 @@
+# Copyright 2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+
+#
+# Base class for exceptions
+#
+
+
+class SEToolsException(Exception):
+
+ """Base class for all SETools exceptions."""
+ pass
+
+#
+# Permission map exceptions
+#
+
+
+class PermissionMapException(SEToolsException):
+
+ """Base class for all permission map exceptions."""
+ pass
+
+
+class PermissionMapParseError(PermissionMapException):
+
+ """Exception for parse errors while reading permission map files."""
+ pass
+
+
+class RuleTypeError(PermissionMapException):
+
+ """Exception for using rules with incorrect rule type."""
+ pass
+
+
+class UnmappedClass(PermissionMapException):
+
+ """Exception for classes that are unmapped"""
+ pass
+
+
+class UnmappedPermission(PermissionMapException):
+
+ """Exception for permissions that are unmapped"""
+ pass
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/fsusequery.py b/lib/python2.7/site-packages/setoolsgui/setools/fsusequery.py
new file mode 100644
index 0000000..6825a45
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/fsusequery.py
@@ -0,0 +1,87 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import contextquery
+from .descriptors import CriteriaDescriptor, CriteriaSetDescriptor
+
+
+class FSUseQuery(contextquery.ContextQuery):
+
+ """
+ Query fs_use_* statements.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ ruletype The rule type(s) to match.
+ fs The criteria to match the file system type.
+ fs_regex If true, regular expression matching
+ will be used on the file system type.
+ user The criteria to match the context's user.
+ user_regex If true, regular expression matching
+ will be used on the user.
+ role The criteria to match the context's role.
+ role_regex If true, regular expression matching
+ will be used on the role.
+ type_ The criteria to match the context's type.
+ type_regex If true, regular expression matching
+ will be used on the type.
+ range_ The criteria to match the context's range.
+ range_subset If true, the criteria will match if it is a subset
+ of the context's range.
+ range_overlap If true, the criteria will match if it overlaps
+ any of the context's range.
+ range_superset If true, the criteria will match if it is a superset
+ of the context's range.
+ range_proper If true, use proper superset/subset operations.
+ No effect if not using set operations.
+ """
+
+ ruletype = None
+ fs = CriteriaDescriptor("fs_regex")
+ fs_regex = False
+
+ def results(self):
+ """Generator which yields all matching fs_use_* statements."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Ruletypes: {0.ruletype}".format(self))
+ self.log.debug("FS: {0.fs!r}, regex: {0.fs_regex}".format(self))
+ self.log.debug("User: {0.user!r}, regex: {0.user_regex}".format(self))
+ self.log.debug("Role: {0.role!r}, regex: {0.role_regex}".format(self))
+ self.log.debug("Type: {0.type_!r}, regex: {0.type_regex}".format(self))
+ self.log.debug("Range: {0.range_!r}, subset: {0.range_subset}, overlap: {0.range_overlap}, "
+ "superset: {0.range_superset}, proper: {0.range_proper}".format(self))
+
+ for fsu in self.policy.fs_uses():
+ if self.ruletype and fsu.ruletype not in self.ruletype:
+ continue
+
+ if self.fs and not self._match_regex(
+ fsu.fs,
+ self.fs,
+ self.fs_regex):
+ continue
+
+ if not self._match_context(fsu.context):
+ continue
+
+ yield fsu
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/genfsconquery.py b/lib/python2.7/site-packages/setoolsgui/setools/genfsconquery.py
new file mode 100644
index 0000000..c67dfd6
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/genfsconquery.py
@@ -0,0 +1,98 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import contextquery
+from .descriptors import CriteriaDescriptor
+
+
+class GenfsconQuery(contextquery.ContextQuery):
+
+ """
+ Query genfscon statements.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ fs The criteria to match the file system type.
+ fs_regex If true, regular expression matching
+ will be used on the file system type.
+ path The criteria to match the path.
+ path_regex If true, regular expression matching
+ will be used on the path.
+ user The criteria to match the context's user.
+ user_regex If true, regular expression matching
+ will be used on the user.
+ role The criteria to match the context's role.
+ role_regex If true, regular expression matching
+ will be used on the role.
+ type_ The criteria to match the context's type.
+ type_regex If true, regular expression matching
+ will be used on the type.
+ range_ The criteria to match the context's range.
+ range_subset If true, the criteria will match if it is a subset
+ of the context's range.
+ range_overlap If true, the criteria will match if it overlaps
+ any of the context's range.
+ range_superset If true, the criteria will match if it is a superset
+ of the context's range.
+ range_proper If true, use proper superset/subset operations.
+ No effect if not using set operations.
+ """
+
+ filetype = None
+ fs = CriteriaDescriptor("fs_regex")
+ fs_regex = False
+ path = CriteriaDescriptor("path_regex")
+ path_regex = False
+
+ def results(self):
+ """Generator which yields all matching genfscons."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("FS: {0.fs!r}, regex: {0.fs_regex}".format(self))
+ self.log.debug("Path: {0.path!r}, regex: {0.path_regex}".format(self))
+ self.log.debug("Filetype: {0.filetype!r}".format(self))
+ self.log.debug("User: {0.user!r}, regex: {0.user_regex}".format(self))
+ self.log.debug("Role: {0.role!r}, regex: {0.role_regex}".format(self))
+ self.log.debug("Type: {0.type_!r}, regex: {0.type_regex}".format(self))
+ self.log.debug("Range: {0.range_!r}, subset: {0.range_subset}, overlap: {0.range_overlap}, "
+ "superset: {0.range_superset}, proper: {0.range_proper}".format(self))
+
+ for genfs in self.policy.genfscons():
+ if self.fs and not self._match_regex(
+ genfs.fs,
+ self.fs,
+ self.fs_regex):
+ continue
+
+ if self.path and not self._match_regex(
+ genfs.path,
+ self.path,
+ self.path_regex):
+ continue
+
+ if self.filetype and not self.filetype == genfs.filetype:
+ continue
+
+ if not self._match_context(genfs.context):
+ continue
+
+ yield genfs
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/infoflow.py b/lib/python2.7/site-packages/setoolsgui/setools/infoflow.py
new file mode 100644
index 0000000..ea3ec32
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/infoflow.py
@@ -0,0 +1,403 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import itertools
+import logging
+from collections import namedtuple
+
+import networkx as nx
+from networkx.exception import NetworkXError, NetworkXNoPath
+
+from .descriptors import EdgeAttrIntMax, EdgeAttrList
+
+__all__ = ['InfoFlowAnalysis']
+
+# Return values for the analysis
+# are in the following tuple format:
+step_output = namedtuple("step", ["source",
+ "target",
+ "rules"])
+
+
+class InfoFlowAnalysis(object):
+
+ """Information flow analysis."""
+
+ def __init__(self, policy, perm_map, min_weight=1, exclude=None):
+ """
+ Parameters:
+ policy The policy to analyze.
+ perm_map The permission map or path to the permission map file.
+ minweight The minimum permission weight to include in the analysis.
+ (default is 1)
+ exclude The types excluded from the information flow analysis.
+ (default is none)
+ """
+ self.log = logging.getLogger(self.__class__.__name__)
+
+ self.policy = policy
+
+ self.min_weight = min_weight
+ self.perm_map = perm_map
+ self.exclude = exclude
+ self.rebuildgraph = True
+ self.rebuildsubgraph = True
+
+ self.G = nx.DiGraph()
+ self.subG = None
+
+ @property
+ def min_weight(self):
+ return self._min_weight
+
+ @min_weight.setter
+ def min_weight(self, weight):
+ if not 1 <= weight <= 10:
+ raise ValueError(
+ "Min information flow weight must be an integer 1-10.")
+
+ self._min_weight = weight
+ self.rebuildsubgraph = True
+
+ @property
+ def perm_map(self):
+ return self._perm_map
+
+ @perm_map.setter
+ def perm_map(self, perm_map):
+ self._perm_map = perm_map
+ self.rebuildgraph = True
+ self.rebuildsubgraph = True
+
+ @property
+ def exclude(self):
+ return self._exclude
+
+ @exclude.setter
+ def exclude(self, types):
+ if types:
+ self._exclude = [self.policy.lookup_type(t) for t in types]
+ else:
+ self._exclude = []
+
+ self.rebuildsubgraph = True
+
+ def shortest_path(self, source, target):
+ """
+ Generator which yields one shortest path between the source
+ and target types (there may be more).
+
+ Parameters:
+ source The source type.
+ target The target type.
+
+ Yield: generator(steps)
+
+ steps Yield: tuple(source, target, rules)
+
+ source The source type for this step of the information flow.
+ target The target type for this step of the information flow.
+ rules The list of rules creating this information flow step.
+ """
+ s = self.policy.lookup_type(source)
+ t = self.policy.lookup_type(target)
+
+ if self.rebuildsubgraph:
+ self._build_subgraph()
+
+ self.log.info("Generating one shortest path from {0} to {1}...".format(s, t))
+
+ try:
+ yield self.__generate_steps(nx.shortest_path(self.subG, s, t))
+ except (NetworkXNoPath, NetworkXError):
+ # NetworkXError: the type is valid but not in graph, e.g.
+ # excluded or disconnected due to min weight
+ # NetworkXNoPath: no paths or the target type is
+ # not in the graph
+ pass
+
+ def all_paths(self, source, target, maxlen=2):
+ """
+ Generator which yields all paths between the source and target
+ up to the specified maximum path length. This algorithm
+ tends to get very expensive above 3-5 steps, depending
+ on the policy complexity.
+
+ Parameters:
+ source The source type.
+ target The target type.
+ maxlen Maximum length of paths.
+
+ Yield: generator(steps)
+
+ steps Yield: tuple(source, target, rules)
+
+ source The source type for this step of the information flow.
+ target The target type for this step of the information flow.
+ rules The list of rules creating this information flow step.
+ """
+ if maxlen < 1:
+ raise ValueError("Maximum path length must be positive.")
+
+ s = self.policy.lookup_type(source)
+ t = self.policy.lookup_type(target)
+
+ if self.rebuildsubgraph:
+ self._build_subgraph()
+
+ self.log.info("Generating all paths from {0} to {1}, max len {2}...".format(s, t, maxlen))
+
+ try:
+ for path in nx.all_simple_paths(self.subG, s, t, maxlen):
+ yield self.__generate_steps(path)
+ except (NetworkXNoPath, NetworkXError):
+ # NetworkXError: the type is valid but not in graph, e.g.
+ # excluded or disconnected due to min weight
+ # NetworkXNoPath: no paths or the target type is
+ # not in the graph
+ pass
+
+ def all_shortest_paths(self, source, target):
+ """
+ Generator which yields all shortest paths between the source
+ and target types.
+
+ Parameters:
+ source The source type.
+ target The target type.
+
+ Yield: generator(steps)
+
+ steps Yield: tuple(source, target, rules)
+
+ source The source type for this step of the information flow.
+ target The target type for this step of the information flow.
+ rules The list of rules creating this information flow step.
+ """
+ s = self.policy.lookup_type(source)
+ t = self.policy.lookup_type(target)
+
+ if self.rebuildsubgraph:
+ self._build_subgraph()
+
+ self.log.info("Generating all shortest paths from {0} to {1}...".format(s, t))
+
+ try:
+ for path in nx.all_shortest_paths(self.subG, s, t):
+ yield self.__generate_steps(path)
+ except (NetworkXNoPath, NetworkXError, KeyError):
+ # NetworkXError: the type is valid but not in graph, e.g.
+ # excluded or disconnected due to min weight
+ # NetworkXNoPath: no paths or the target type is
+ # not in the graph
+ # KeyError: work around NetworkX bug
+ # when the source node is not in the graph
+ pass
+
+ def infoflows(self, type_, out=True):
+ """
+ Generator which yields all information flows in/out of a
+ specified source type.
+
+ Parameters:
+ source The starting type.
+
+ Keyword Parameters:
+ out If true, information flows out of the type will
+ be returned. If false, information flows in to the
+ type will be returned. Default is true.
+
+ Yield: generator(steps)
+
+ steps A generator that returns the tuple of
+ source, target, and rules for each
+ information flow.
+ """
+ s = self.policy.lookup_type(type_)
+
+ if self.rebuildsubgraph:
+ self._build_subgraph()
+
+ self.log.info("Generating all infoflows out of {0}...".format(s))
+
+ if out:
+ flows = self.subG.out_edges_iter(s)
+ else:
+ flows = self.subG.in_edges_iter(s)
+
+ try:
+ for source, target in flows:
+ edge = Edge(self.subG, source, target)
+ yield step_output(source, target, edge.rules)
+ except NetworkXError:
+ # NetworkXError: the type is valid but not in graph, e.g.
+ # excluded or disconnected due to min weight
+ pass
+
+ def get_stats(self): # pragma: no cover
+ """
+ Get the information flow graph statistics.
+
+ Return: tuple(nodes, edges)
+
+ nodes The number of nodes (types) in the graph.
+ edges The number of edges (information flows between types)
+ in the graph.
+ """
+ return (self.G.number_of_nodes(), self.G.number_of_edges())
+
+ #
+ # Internal functions follow
+ #
+
+ def __generate_steps(self, path):
+ """
+ Generator which returns the source, target, and associated rules
+ for each information flow step.
+
+ Parameter:
+ path A list of graph node names representing an information flow path.
+
+ Yield: tuple(source, target, rules)
+
+ source The source type for this step of the information flow.
+ target The target type for this step of the information flow.
+ rules The list of rules creating this information flow step.
+ """
+ for s in range(1, len(path)):
+ edge = Edge(self.subG, path[s - 1], path[s])
+ yield step_output(edge.source, edge.target, edge.rules)
+
+ #
+ #
+ # Graph building functions
+ #
+ #
+ # 1. _build_graph determines the flow in each direction for each TE
+ # rule and then expands the rule. All information flows are
+ # included in this main graph: memory is traded off for efficiency
+ # as the main graph should only need to be rebuilt if permission
+ # weights change.
+ # 2. _build_subgraph derives a subgraph which removes all excluded
+ # types (nodes) and edges (information flows) which are below the
+ # minimum weight. This subgraph is rebuilt only if the main graph
+ # is rebuilt or the minimum weight or excluded types change.
+
+ def _build_graph(self):
+ self.G.clear()
+
+ self.perm_map.map_policy(self.policy)
+
+ self.log.info("Building graph from {0}...".format(self.policy))
+
+ for rule in self.policy.terules():
+ if rule.ruletype != "allow":
+ continue
+
+ (rweight, wweight) = self.perm_map.rule_weight(rule)
+
+ for s, t in itertools.product(rule.source.expand(), rule.target.expand()):
+ # only add flows if they actually flow
+ # in or out of the source type type
+ if s != t:
+ if wweight:
+ edge = Edge(self.G, s, t, create=True)
+ edge.rules.append(rule)
+ edge.weight = wweight
+
+ if rweight:
+ edge = Edge(self.G, t, s, create=True)
+ edge.rules.append(rule)
+ edge.weight = rweight
+
+ self.rebuildgraph = False
+ self.rebuildsubgraph = True
+ self.log.info("Completed building graph.")
+
+ def _build_subgraph(self):
+ if self.rebuildgraph:
+ self._build_graph()
+
+ self.log.info("Building subgraph...")
+ self.log.debug("Excluding {0!r}".format(self.exclude))
+ self.log.debug("Min weight {0}".format(self.min_weight))
+
+ # delete excluded types from subgraph
+ nodes = [n for n in self.G.nodes() if n not in self.exclude]
+ self.subG = self.G.subgraph(nodes)
+
+ # delete edges below minimum weight.
+ # no need if weight is 1, since that
+ # does not exclude any edges.
+ if self.min_weight > 1:
+ delete_list = []
+ for s, t in self.subG.edges_iter():
+ edge = Edge(self.subG, s, t)
+ if edge.weight < self.min_weight:
+ delete_list.append(edge)
+
+ self.subG.remove_edges_from(delete_list)
+
+ self.rebuildsubgraph = False
+ self.log.info("Completed building subgraph.")
+
+
+class Edge(object):
+
+ """
+ A graph edge. Also used for returning information flow steps.
+
+ Parameters:
+ source The source type of the edge.
+ target The target type of the edge.
+
+ Keyword Parameters:
+ create (T/F) create the edge if it does not exist.
+ The default is False.
+ """
+
+ rules = EdgeAttrList('rules')
+
+ # use capacity to store the info flow weight so
+ # we can use network flow algorithms naturally.
+ # The weight for each edge is 1 since each info
+ # flow step is no more costly than another
+ # (see below add_edge() call)
+ weight = EdgeAttrIntMax('capacity')
+
+ def __init__(self, graph, source, target, create=False):
+ self.G = graph
+ self.source = source
+ self.target = target
+
+ # a bit of a hack to make edges work
+ # in NetworkX functions that work on
+ # 2-tuples of (source, target)
+ # (see __getitem__ below)
+ self.st_tuple = (source, target)
+
+ if not self.G.has_edge(source, target):
+ if create:
+ self.G.add_edge(source, target, weight=1)
+ self.rules = None
+ self.weight = None
+ else:
+ raise ValueError("Edge does not exist in graph")
+
+ def __getitem__(self, key):
+ return self.st_tuple[key]
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/initsidquery.py b/lib/python2.7/site-packages/setoolsgui/setools/initsidquery.py
new file mode 100644
index 0000000..1eb3790
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/initsidquery.py
@@ -0,0 +1,74 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+
+from . import compquery
+from . import contextquery
+
+
+class InitialSIDQuery(compquery.ComponentQuery, contextquery.ContextQuery):
+
+ """
+ Initial SID (Initial context) query.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The Initial SID name to match.
+ name_regex If true, regular expression matching
+ will be used on the Initial SID name.
+ user The criteria to match the context's user.
+ user_regex If true, regular expression matching
+ will be used on the user.
+ role The criteria to match the context's role.
+ role_regex If true, regular expression matching
+ will be used on the role.
+ type_ The criteria to match the context's type.
+ type_regex If true, regular expression matching
+ will be used on the type.
+ range_ The criteria to match the context's range.
+ range_subset If true, the criteria will match if it is a subset
+ of the context's range.
+ range_overlap If true, the criteria will match if it overlaps
+ any of the context's range.
+ range_superset If true, the criteria will match if it is a superset
+ of the context's range.
+ range_proper If true, use proper superset/subset operations.
+ No effect if not using set operations.
+ """
+
+ def results(self):
+ """Generator which yields all matching initial SIDs."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("User: {0.user!r}, regex: {0.user_regex}".format(self))
+ self.log.debug("Role: {0.role!r}, regex: {0.role_regex}".format(self))
+ self.log.debug("Type: {0.type_!r}, regex: {0.type_regex}".format(self))
+ self.log.debug("Range: {0.range_!r}, subset: {0.range_subset}, overlap: {0.range_overlap}, "
+ "superset: {0.range_superset}, proper: {0.range_proper}".format(self))
+
+ for i in self.policy.initialsids():
+ if not self._match_name(i):
+ continue
+
+ if not self._match_context(i.context):
+ continue
+
+ yield i
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/mixins.py b/lib/python2.7/site-packages/setoolsgui/setools/mixins.py
new file mode 100644
index 0000000..a31d420
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/mixins.py
@@ -0,0 +1,91 @@
+# Copyright 2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+# pylint: disable=attribute-defined-outside-init,no-member
+import re
+
+from .descriptors import CriteriaDescriptor, CriteriaSetDescriptor
+
+
+class MatchAlias(object):
+
+ """Mixin for matching an object's aliases."""
+
+ alias = CriteriaDescriptor("alias_regex")
+ alias_regex = False
+
+ def _match_alias(self, obj):
+ """
+ Match the alias criteria
+
+ Parameter:
+ obj An object with an alias generator method named "aliases"
+ """
+
+ if not self.alias:
+ # if there is no criteria, everything matches.
+ return True
+
+ return self._match_in_set(obj.aliases(), self.alias, self.alias_regex)
+
+
+class MatchObjClass(object):
+
+ """Mixin for matching an object's class."""
+
+ tclass = CriteriaSetDescriptor("tclass_regex", "lookup_class")
+ tclass_regex = False
+
+ def _match_object_class(self, obj):
+ """
+ Match the object class criteria
+
+ Parameter:
+ obj An object with an object class attribute named "tclass"
+ """
+
+ if not self.tclass:
+ # if there is no criteria, everything matches.
+ return True
+ elif self.tclass_regex:
+ return bool(self.tclass.search(str(obj.tclass)))
+ else:
+ return obj.tclass in self.tclass
+
+
+class MatchPermission(object):
+
+ """Mixin for matching an object's permissions."""
+
+ perms = CriteriaSetDescriptor("perms_regex")
+ perms_equal = False
+ perms_regex = False
+
+ def _match_perms(self, obj):
+ """
+ Match the permission criteria
+
+ Parameter:
+ obj An object with a permission set class attribute named "perms"
+ """
+
+ if not self.perms:
+ # if there is no criteria, everything matches.
+ return True
+
+ return self._match_regex_or_set(obj.perms, self.perms, self.perms_equal, self.perms_regex)
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/mlsrulequery.py b/lib/python2.7/site-packages/setoolsgui/setools/mlsrulequery.py
new file mode 100644
index 0000000..3a9e1bf
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/mlsrulequery.py
@@ -0,0 +1,115 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+
+from . import mixins, query
+from .descriptors import CriteriaDescriptor, CriteriaSetDescriptor, RuletypeDescriptor
+
+
+class MLSRuleQuery(mixins.MatchObjClass, query.PolicyQuery):
+
+ """
+ Query MLS rules.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ ruletype The list of rule type(s) to match.
+ source The name of the source type/attribute to match.
+ source_regex If true, regular expression matching will
+ be used on the source type/attribute.
+ target The name of the target type/attribute to match.
+ target_regex If true, regular expression matching will
+ be used on the target type/attribute.
+ tclass The object class(es) to match.
+ tclass_regex If true, use a regular expression for
+ matching the rule's object class.
+ """
+
+ ruletype = RuletypeDescriptor("validate_mls_ruletype")
+ source = CriteriaDescriptor("source_regex", "lookup_type_or_attr")
+ source_regex = False
+ target = CriteriaDescriptor("target_regex", "lookup_type_or_attr")
+ target_regex = False
+ tclass = CriteriaSetDescriptor("tclass_regex", "lookup_class")
+ tclass_regex = False
+ default = CriteriaDescriptor(lookup_function="lookup_range")
+ default_overlap = False
+ default_subset = False
+ default_superset = False
+ default_proper = False
+
+ def results(self):
+ """Generator which yields all matching MLS rules."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Ruletypes: {0.ruletype}".format(self))
+ self.log.debug("Source: {0.source!r}, regex: {0.source_regex}".format(self))
+ self.log.debug("Target: {0.target!r}, regex: {0.target_regex}".format(self))
+ self.log.debug("Class: {0.tclass!r}, regex: {0.tclass_regex}".format(self))
+ self.log.debug("Default: {0.default!r}, overlap: {0.default_overlap}, "
+ "subset: {0.default_subset}, superset: {0.default_superset}, "
+ "proper: {0.default_proper}".format(self))
+
+ for rule in self.policy.mlsrules():
+ #
+ # Matching on rule type
+ #
+ if self.ruletype:
+ if rule.ruletype not in self.ruletype:
+ continue
+
+ #
+ # Matching on source type
+ #
+ if self.source and not self._match_regex(
+ rule.source,
+ self.source,
+ self.source_regex):
+ continue
+
+ #
+ # Matching on target type
+ #
+ if self.target and not self._match_regex(
+ rule.target,
+ self.target,
+ self.target_regex):
+ continue
+
+ #
+ # Matching on object class
+ #
+ if not self._match_object_class(rule):
+ continue
+
+ #
+ # Matching on range
+ #
+ if self.default and not self._match_range(
+ rule.default,
+ self.default,
+ self.default_subset,
+ self.default_overlap,
+ self.default_superset,
+ self.default_proper):
+ continue
+
+ # if we get here, we have matched all available criteria
+ yield rule
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/netifconquery.py b/lib/python2.7/site-packages/setoolsgui/setools/netifconquery.py
new file mode 100644
index 0000000..30db977
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/netifconquery.py
@@ -0,0 +1,77 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+
+from . import compquery
+from . import contextquery
+
+
+class NetifconQuery(compquery.ComponentQuery, contextquery.ContextQuery):
+
+ """
+ Network interface context query.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The name of the network interface to match.
+ name_regex If true, regular expression matching will
+ be used for matching the name.
+ user The criteria to match the context's user.
+ user_regex If true, regular expression matching
+ will be used on the user.
+ role The criteria to match the context's role.
+ role_regex If true, regular expression matching
+ will be used on the role.
+ type_ The criteria to match the context's type.
+ type_regex If true, regular expression matching
+ will be used on the type.
+ range_ The criteria to match the context's range.
+ range_subset If true, the criteria will match if it is a subset
+ of the context's range.
+ range_overlap If true, the criteria will match if it overlaps
+ any of the context's range.
+ range_superset If true, the criteria will match if it is a superset
+ of the context's range.
+ range_proper If true, use proper superset/subset operations.
+ No effect if not using set operations.
+ """
+
+ def results(self):
+ """Generator which yields all matching netifcons."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("User: {0.user!r}, regex: {0.user_regex}".format(self))
+ self.log.debug("Role: {0.role!r}, regex: {0.role_regex}".format(self))
+ self.log.debug("Type: {0.type_!r}, regex: {0.type_regex}".format(self))
+ self.log.debug("Range: {0.range_!r}, subset: {0.range_subset}, overlap: {0.range_overlap}, "
+ "superset: {0.range_superset}, proper: {0.range_proper}".format(self))
+
+ for netif in self.policy.netifcons():
+ if self.name and not self._match_regex(
+ netif.netif,
+ self.name,
+ self.name_regex):
+ continue
+
+ if not self._match_context(netif.context):
+ continue
+
+ yield netif
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/nodeconquery.py b/lib/python2.7/site-packages/setoolsgui/setools/nodeconquery.py
new file mode 100644
index 0000000..eb21d81
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/nodeconquery.py
@@ -0,0 +1,148 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+try:
+ import ipaddress
+except ImportError: # pragma: no cover
+ pass
+
+import logging
+from socket import AF_INET, AF_INET6
+
+from . import contextquery
+
+
+class NodeconQuery(contextquery.ContextQuery):
+
+ """
+ Query nodecon statements.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ network The IPv4/IPv6 address or IPv4/IPv6 network address
+ with netmask, e.g. 192.168.1.0/255.255.255.0 or
+ "192.168.1.0/24".
+ network_overlap If true, the net will match if it overlaps with
+ the nodecon's network instead of equality.
+ ip_version The IP version of the nodecon to match. (socket.AF_INET
+ for IPv4 or socket.AF_INET6 for IPv6)
+ user The criteria to match the context's user.
+ user_regex If true, regular expression matching
+ will be used on the user.
+ role The criteria to match the context's role.
+ role_regex If true, regular expression matching
+ will be used on the role.
+ type_ The criteria to match the context's type.
+ type_regex If true, regular expression matching
+ will be used on the type.
+ range_ The criteria to match the context's range.
+ range_subset If true, the criteria will match if it is a subset
+ of the context's range.
+ range_overlap If true, the criteria will match if it overlaps
+ any of the context's range.
+ range_superset If true, the criteria will match if it is a superset
+ of the context's range.
+ range_proper If true, use proper superset/subset operations.
+ No effect if not using set operations.
+ """
+
+ _network = None
+ network_overlap = False
+ _ip_version = None
+
+ @property
+ def ip_version(self):
+ return self._ip_version
+
+ @ip_version.setter
+ def ip_version(self, value):
+ if value:
+ if not (value == AF_INET or value == AF_INET6):
+ raise ValueError(
+ "The address family must be {0} for IPv4 or {1} for IPv6.".
+ format(AF_INET, AF_INET6))
+
+ self._ip_version = value
+ else:
+ self._ip_version = None
+
+ @property
+ def network(self):
+ return self._network
+
+ @network.setter
+ def network(self, value):
+ if value:
+ try:
+ self._network = ipaddress.ip_network(value)
+ except NameError: # pragma: no cover
+ raise RuntimeError("Nodecon IP address/network functions require Python 3.3+.")
+ else:
+ self._network = None
+
+ def results(self):
+ """Generator which yields all matching nodecons."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Network: {0.network!r}, overlap: {0.network_overlap}".format(self))
+ self.log.debug("IP Version: {0.ip_version}".format(self))
+ self.log.debug("User: {0.user!r}, regex: {0.user_regex}".format(self))
+ self.log.debug("Role: {0.role!r}, regex: {0.role_regex}".format(self))
+ self.log.debug("Type: {0.type_!r}, regex: {0.type_regex}".format(self))
+ self.log.debug("Range: {0.range_!r}, subset: {0.range_subset}, overlap: {0.range_overlap}, "
+ "superset: {0.range_superset}, proper: {0.range_proper}".format(self))
+
+ for nodecon in self.policy.nodecons():
+
+ if self.network:
+ try:
+ netmask = ipaddress.ip_address(nodecon.netmask)
+ except NameError: # pragma: no cover
+ # Should never actually hit this since the self.network
+ # setter raises the same exception.
+ raise RuntimeError("Nodecon IP address/network functions require Python 3.3+.")
+
+ # Python 3.3's IPv6Network constructor does not support
+ # expanded netmasks, only CIDR numbers. Convert netmask
+ # into CIDR.
+ # This is Brian Kernighan's method for counting set bits.
+ # If the netmask happens to be invalid, this will
+ # not detect it.
+ CIDR = 0
+ int_netmask = int(netmask)
+ while int_netmask:
+ int_netmask &= int_netmask - 1
+ CIDR += 1
+
+ net = ipaddress.ip_network('{0}/{1}'.format(nodecon.address, CIDR))
+
+ if self.network_overlap:
+ if not self.network.overlaps(net):
+ continue
+ else:
+ if not net == self.network:
+ continue
+
+ if self.ip_version and self.ip_version != nodecon.ip_version:
+ continue
+
+ if not self._match_context(nodecon.context):
+ continue
+
+ yield nodecon
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/objclassquery.py b/lib/python2.7/site-packages/setoolsgui/setools/objclassquery.py
new file mode 100644
index 0000000..8f40df8
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/objclassquery.py
@@ -0,0 +1,101 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import compquery
+from .descriptors import CriteriaDescriptor, CriteriaSetDescriptor
+from .policyrep.exception import NoCommon
+
+
+class ObjClassQuery(compquery.ComponentQuery):
+
+ """
+ Query object classes.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The name of the object set to match.
+ name_regex If true, regular expression matching will
+ be used for matching the name.
+ common The name of the inherited common to match.
+ common_regex If true, regular expression matching will
+ be used for matching the common name.
+ perms The permissions to match.
+ perms_equal If true, only commons with permission sets
+ that are equal to the criteria will
+ match. Otherwise, any intersection
+ will match.
+ perms_regex If true, regular expression matching
+ will be used on the permission names instead
+ of set logic.
+ comparison will not be used.
+ perms_indirect If false, permissions inherited from a common
+ permission set not will be evaluated. Default
+ is true.
+ """
+
+ common = CriteriaDescriptor("common_regex", "lookup_common")
+ common_regex = False
+ perms = CriteriaSetDescriptor("perms_regex")
+ perms_equal = False
+ perms_indirect = True
+ perms_regex = False
+
+ def results(self):
+ """Generator which yields all matching object classes."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("Common: {0.common!r}, regex: {0.common_regex}".format(self))
+ self.log.debug("Perms: {0.perms}, regex: {0.perms_regex}, "
+ "eq: {0.perms_equal}, indirect: {0.perms_indirect}".format(self))
+
+ for class_ in self.policy.classes():
+ if not self._match_name(class_):
+ continue
+
+ if self.common:
+ try:
+ if not self._match_regex(
+ class_.common,
+ self.common,
+ self.common_regex):
+ continue
+ except NoCommon:
+ continue
+
+ if self.perms:
+ perms = class_.perms
+
+ if self.perms_indirect:
+ try:
+ perms |= class_.common.perms
+ except NoCommon:
+ pass
+
+ if not self._match_regex_or_set(
+ perms,
+ self.perms,
+ self.perms_equal,
+ self.perms_regex):
+ continue
+
+ yield class_
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/permmap.py b/lib/python2.7/site-packages/setoolsgui/setools/permmap.py
new file mode 100644
index 0000000..54cd9f9
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/permmap.py
@@ -0,0 +1,363 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import sys
+import logging
+from errno import ENOENT
+
+from . import exception
+from . import policyrep
+
+
+class PermissionMap(object):
+
+ """Permission Map for information flow analysis."""
+
+ valid_infoflow_directions = ["r", "w", "b", "n", "u"]
+ min_weight = 1
+ max_weight = 10
+
+ def __init__(self, permmapfile=None):
+ """
+ Parameter:
+ permmapfile The path to the permission map to load.
+ """
+ self.log = logging.getLogger(self.__class__.__name__)
+
+ if permmapfile:
+ self.load(permmapfile)
+ else:
+ for path in ["data/", sys.prefix + "/share/setools/"]:
+ try:
+ self.load(path + "perm_map")
+ break
+ except (IOError, OSError) as err:
+ if err.errno != ENOENT:
+ raise
+ else:
+ raise RuntimeError("Unable to load default permission map.")
+
+ def load(self, permmapfile):
+ """
+ Parameter:
+ permmapfile The path to the permission map to load.
+ """
+ self.log.info("Opening permission map \"{0}\"".format(permmapfile))
+
+ # state machine
+ # 1 = read number of classes
+ # 2 = read class name and number of perms
+ # 3 = read perms
+ with open(permmapfile, "r") as mapfile:
+ class_count = 0
+ num_classes = 0
+ state = 1
+
+ self.permmap = dict()
+
+ for line_num, line in enumerate(mapfile, start=1):
+ entry = line.split()
+
+ if len(entry) == 0 or entry[0][0] == '#':
+ continue
+
+ if state == 1:
+ try:
+ num_classes = int(entry[0])
+ except ValueError:
+ raise exception.PermissionMapParseError(
+ "{0}:{1}:Invalid number of classes: {2}".
+ format(permmapfile, line_num, entry[0]))
+
+ if num_classes < 1:
+ raise exception.PermissionMapParseError(
+ "{0}:{1}:Number of classes must be positive: {2}".
+ format(permmapfile, line_num, entry[0]))
+
+ state = 2
+
+ elif state == 2:
+ if len(entry) != 3 or entry[0] != "class":
+ raise exception.PermissionMapParseError(
+ "{0}:{1}:Invalid class declaration: {2}".
+ format(permmapfile, line_num, entry))
+
+ class_name = str(entry[1])
+
+ try:
+ num_perms = int(entry[2])
+ except ValueError:
+ raise exception.PermissionMapParseError(
+ "{0}:{1}:Invalid number of permissions: {2}".
+ format(permmapfile, line_num, entry[2]))
+
+ if num_perms < 1:
+ raise exception.PermissionMapParseError(
+ "{0}:{1}:Number of permissions must be positive: {2}".
+ format(permmapfile, line_num, entry[2]))
+
+ class_count += 1
+ if class_count > num_classes:
+ raise exception.PermissionMapParseError(
+ "{0}:{1}:Extra class found: {2}".
+ format(permmapfile, line_num, class_name))
+
+ self.permmap[class_name] = dict()
+ perm_count = 0
+ state = 3
+
+ elif state == 3:
+ perm_name = str(entry[0])
+
+ flow_direction = str(entry[1])
+ if flow_direction not in self.valid_infoflow_directions:
+ raise exception.PermissionMapParseError(
+ "{0}:{1}:Invalid information flow direction: {2}".
+ format(permmapfile, line_num, entry[1]))
+
+ try:
+ weight = int(entry[2])
+ except ValueError:
+ raise exception.PermissionMapParseError(
+ "{0}:{1}:Invalid permission weight: {2}".
+ format(permmapfile, line_num, entry[2]))
+
+ if not self.min_weight <= weight <= self.max_weight:
+ raise exception.PermissionMapParseError(
+ "{0}:{1}:Permission weight must be {3}-{4}: {2}".
+ format(permmapfile, line_num, entry[2],
+ self.min_weight, self.max_weight))
+
+ self.permmap[class_name][perm_name] = {'direction': flow_direction,
+ 'weight': weight,
+ 'enabled': True}
+
+ perm_count += 1
+ if perm_count >= num_perms:
+ state = 2
+
+ def exclude_class(self, class_):
+ """
+ Exclude all permissions in an object class for calculating rule weights.
+
+ Parameter:
+ class_ The object class to exclude.
+
+ Exceptions:
+ UnmappedClass The specified object class is not mapped.
+ """
+
+ classname = str(class_)
+
+ try:
+ for perm in self.permmap[classname]:
+ self.permmap[classname][perm]['enabled'] = False
+ except KeyError:
+ raise exception.UnmappedClass("{0} is not mapped.".format(classname))
+
+ def exclude_permission(self, class_, permission):
+ """
+ Exclude a permission for calculating rule weights.
+
+ Parameter:
+ class_ The object class of the permission.
+ permission The permission name to exclude.
+
+ Exceptions:
+ UnmappedClass The specified object class is not mapped.
+ UnmappedPermission The specified permission is not mapped for the object class.
+ """
+ classname = str(class_)
+
+ if classname not in self.permmap:
+ raise exception.UnmappedClass("{0} is not mapped.".format(classname))
+
+ try:
+ self.permmap[classname][permission]['enabled'] = False
+ except KeyError:
+ raise exception.UnmappedPermission("{0}:{1} is not mapped.".
+ format(classname, permission))
+
+ def include_class(self, class_):
+ """
+ Include all permissions in an object class for calculating rule weights.
+
+ Parameter:
+ class_ The object class to include.
+
+ Exceptions:
+ UnmappedClass The specified object class is not mapped.
+ """
+
+ classname = str(class_)
+
+ try:
+ for perm in self.permmap[classname]:
+ self.permmap[classname][perm]['enabled'] = True
+ except KeyError:
+ raise exception.UnmappedClass("{0} is not mapped.".format(classname))
+
+ def include_permission(self, class_, permission):
+ """
+ Include a permission for calculating rule weights.
+
+ Parameter:
+ class_ The object class of the permission.
+ permission The permission name to include.
+
+ Exceptions:
+ UnmappedClass The specified object class is not mapped.
+ UnmappedPermission The specified permission is not mapped for the object class.
+ """
+
+ classname = str(class_)
+
+ if classname not in self.permmap:
+ raise exception.UnmappedClass("{0} is not mapped.".format(classname))
+
+ try:
+ self.permmap[classname][permission]['enabled'] = True
+ except KeyError:
+ raise exception.UnmappedPermission("{0}:{1} is not mapped.".
+ format(classname, permission))
+
+ def map_policy(self, policy):
+ """Create mappings for all classes and permissions in the specified policy."""
+ for class_ in policy.classes():
+ class_name = str(class_)
+
+ if class_name not in self.permmap:
+ self.log.info("Adding unmapped class {0} from {1}".format(class_name, policy))
+ self.permmap[class_name] = dict()
+
+ perms = class_.perms
+
+ try:
+ perms |= class_.common.perms
+ except policyrep.exception.NoCommon:
+ pass
+
+ for perm_name in perms:
+ if perm_name not in self.permmap[class_name]:
+ self.log.info("Adding unmapped permission {0} in {1} from {2}".
+ format(perm_name, class_name, policy))
+ self.permmap[class_name][perm_name] = {'direction': 'u',
+ 'weight': 1,
+ 'enabled': True}
+
+ def rule_weight(self, rule):
+ """
+ Get the type enforcement rule's information flow read and write weights.
+
+ Parameter:
+ rule A type enforcement rule.
+
+ Return: Tuple(read_weight, write_weight)
+ read_weight The type enforcement rule's read weight.
+ write_weight The type enforcement rule's write weight.
+ """
+
+ write_weight = 0
+ read_weight = 0
+ class_name = str(rule.tclass)
+
+ if rule.ruletype != 'allow':
+ raise exception.RuleTypeError("{0} rules cannot be used for calculating a weight".
+ format(rule.ruletype))
+
+ if class_name not in self.permmap:
+ raise exception.UnmappedClass("{0} is not mapped.".format(class_name))
+
+ # iterate over the permissions and determine the
+ # weight of the rule in each direction. The result
+ # is the largest-weight permission in each direction
+ for perm_name in rule.perms:
+ try:
+ mapping = self.permmap[class_name][perm_name]
+ except KeyError:
+ raise exception.UnmappedPermission("{0}:{1} is not mapped.".
+ format(class_name, perm_name))
+
+ if not mapping['enabled']:
+ continue
+
+ if mapping['direction'] == "r":
+ read_weight = max(read_weight, mapping['weight'])
+ elif mapping['direction'] == "w":
+ write_weight = max(write_weight, mapping['weight'])
+ elif mapping['direction'] == "b":
+ read_weight = max(read_weight, mapping['weight'])
+ write_weight = max(write_weight, mapping['weight'])
+
+ return (read_weight, write_weight)
+
+ def set_direction(self, class_, permission, direction):
+ """
+ Set the information flow direction of a permission.
+
+ Parameter:
+ class_ The object class of the permission.
+ permission The permission name.
+ direction The information flow direction the permission (r/w/b/n).
+
+ Exceptions:
+ UnmappedClass The specified object class is not mapped.
+ UnmappedPermission The specified permission is not mapped for the object class.
+ """
+
+ if direction not in self.valid_infoflow_directions:
+ raise ValueError("Invalid information flow direction: {0}".format(direction))
+
+ classname = str(class_)
+
+ if classname not in self.permmap:
+ raise exception.UnmappedClass("{0} is not mapped.".format(classname))
+
+ try:
+ self.permmap[classname][permission]['direction'] = direction
+ except KeyError:
+ raise exception.UnmappedPermission("{0}:{1} is not mapped.".
+ format(classname, permission))
+
+ def set_weight(self, class_, permission, weight):
+ """
+ Set the weight of a permission.
+
+ Parameter:
+ class_ The object class of the permission.
+ permission The permission name.
+ weight The weight of the permission (1-10).
+
+ Exceptions:
+ UnmappedClass The specified object class is not mapped.
+ UnmappedPermission The specified permission is not mapped for the object class.
+ """
+
+ if not self.min_weight <= weight <= self.max_weight:
+ raise ValueError("Permission weights must be 1-10: {0}".format(weight))
+
+ classname = str(class_)
+
+ if classname not in self.permmap:
+ raise exception.UnmappedClass("{0} is not mapped.".format(classname))
+
+ try:
+ self.permmap[classname][permission]['weight'] = weight
+ except KeyError:
+ raise exception.UnmappedPermission("{0}:{1} is not mapped.".
+ format(classname, permission))
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/polcapquery.py b/lib/python2.7/site-packages/setoolsgui/setools/polcapquery.py
new file mode 100644
index 0000000..e024b05
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/polcapquery.py
@@ -0,0 +1,47 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+
+from . import compquery
+
+
+class PolCapQuery(compquery.ComponentQuery):
+
+ """
+ Query SELinux policy capabilities
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The name of the policy capability to match.
+ name_regex If true, regular expression matching will
+ be used for matching the name.
+ """
+
+ def results(self):
+ """Generator which yields all matching policy capabilities."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+
+ for cap in self.policy.polcaps():
+ if not self._match_name(cap):
+ continue
+
+ yield cap
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/__init__.py b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/__init__.py
new file mode 100644
index 0000000..b03e524
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/__init__.py
@@ -0,0 +1,568 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+# pylint: disable=too-many-public-methods
+#
+# Create a Python representation of the policy.
+# The idea is that this is module provides convenient
+# abstractions and methods for accessing the policy
+# structures.
+import logging
+from itertools import chain
+from errno import ENOENT
+
+try:
+ import selinux
+except ImportError:
+ pass
+
+from . import qpol
+
+# The libqpol SWIG class is not quite natural for
+# Python the policy is repeatedly referenced in the
+# function calls, which makes sense for C code
+# but not for python code, so each object keeps
+# a reference to the policy for internal use.
+# This also makes sense since an object would only
+# be valid for the policy it comes from.
+
+# Exceptions
+from . import exception
+
+# Components
+from . import boolcond
+from . import default
+from . import mls
+from . import objclass
+from . import polcap
+from . import role
+from . import typeattr
+from . import user
+
+# Rules
+from . import mlsrule
+from . import rbacrule
+from . import terule
+
+# Constraints
+from . import constraint
+
+# In-policy Labeling
+from . import fscontext
+from . import initsid
+from . import netcontext
+
+
+class SELinuxPolicy(object):
+
+ """The complete SELinux policy."""
+
+ def __init__(self, policyfile=None):
+ """
+ Parameter:
+ policyfile Path to a policy to open.
+ """
+
+ self.log = logging.getLogger(self.__class__.__name__)
+ self.policy = None
+ self.filename = None
+
+ if policyfile:
+ self._load_policy(policyfile)
+ else:
+ try:
+ self._load_running_policy()
+ except NameError:
+ raise RuntimeError("Loading the running policy requires libselinux Python bindings")
+
+ def __repr__(self):
+ return "<SELinuxPolicy(\"{0}\")>".format(self.filename)
+
+ def __str__(self):
+ return self.filename
+
+ def __deepcopy__(self, memo):
+ # shallow copy as all of the members are immutable
+ newobj = SELinuxPolicy.__new__(SELinuxPolicy)
+ newobj.policy = self.policy
+ newobj.filename = self.filename
+ memo[id(self)] = newobj
+ return newobj
+
+ #
+ # Policy loading functions
+ #
+
+ def _load_policy(self, filename):
+ """Load the specified policy."""
+ self.log.info("Opening SELinux policy \"{0}\"".format(filename))
+
+ try:
+ self.policy = qpol.qpol_policy_factory(str(filename))
+ except SyntaxError as err:
+ raise exception.InvalidPolicy("Error opening policy file \"{0}\": {1}".
+ format(filename, err))
+
+ self.log.info("Successfully opened SELinux policy \"{0}\"".format(filename))
+ self.filename = filename
+
+ @staticmethod
+ def _potential_policies():
+ """Generate a list of potential policies to use."""
+ # Start with binary policies in the standard location
+ base_policy_path = selinux.selinux_binary_policy_path()
+ for version in range(qpol.QPOL_POLICY_MAX_VERSION, qpol.QPOL_POLICY_MIN_VERSION-1, -1):
+ yield "{0}.{1}".format(base_policy_path, version)
+
+ # Last chance, try selinuxfs. This is not first, to avoid
+ # holding kernel memory for a long time
+ if selinux.selinuxfs_exists():
+ yield selinux.selinux_current_policy_path()
+
+ def _load_running_policy(self):
+ """Try to load the current running policy."""
+ self.log.info("Attempting to locate current running policy.")
+
+ for filename in self._potential_policies():
+ try:
+ self._load_policy(filename)
+ except OSError as err:
+ if err.errno != ENOENT:
+ raise
+ else:
+ break
+ else:
+ raise RuntimeError("Unable to locate an SELinux policy to load.")
+
+ #
+ # Policy properties
+ #
+ @property
+ def handle_unknown(self):
+ """The handle unknown permissions setting (allow,deny,reject)"""
+ return self.policy.handle_unknown()
+
+ @property
+ def mls(self):
+ """(T/F) The policy has MLS enabled."""
+ return mls.enabled(self.policy)
+
+ @property
+ def version(self):
+ """The policy database version (e.g. v29)"""
+ return self.policy.version()
+
+ #
+ # Policy statistics
+ #
+
+ @property
+ def allow_count(self):
+ """The number of (type) allow rules."""
+ return self.policy.avrule_allow_count()
+
+ @property
+ def auditallow_count(self):
+ """The number of auditallow rules."""
+ return self.policy.avrule_auditallow_count()
+
+ @property
+ def boolean_count(self):
+ """The number of Booleans."""
+ return self.policy.bool_count()
+
+ @property
+ def category_count(self):
+ """The number of categories."""
+ return sum(1 for _ in self.categories())
+
+ @property
+ def class_count(self):
+ """The number of object classes."""
+ return self.policy.class_count()
+
+ @property
+ def common_count(self):
+ """The number of common permission sets."""
+ return self.policy.common_count()
+
+ @property
+ def conditional_count(self):
+ """The number of conditionals."""
+ return self.policy.cond_count()
+
+ @property
+ def constraint_count(self):
+ """The number of standard constraints."""
+ return sum(1 for c in self.constraints() if c.ruletype == "constrain")
+
+ @property
+ def dontaudit_count(self):
+ """The number of dontaudit rules."""
+ return self.policy.avrule_dontaudit_count()
+
+ @property
+ def fs_use_count(self):
+ """fs_use_* statements."""
+ return self.policy.fs_use_count()
+
+ @property
+ def genfscon_count(self):
+ """The number of genfscon statements."""
+ return self.policy.genfscon_count()
+
+ @property
+ def initialsids_count(self):
+ """The number of initial sid statements."""
+ return self.policy.isid_count()
+
+ @property
+ def level_count(self):
+ """The number of levels."""
+ return sum(1 for _ in self.levels())
+
+ @property
+ def mlsconstraint_count(self):
+ """The number of MLS constraints."""
+ return sum(1 for c in self.constraints() if c.ruletype == "mlsconstrain")
+
+ @property
+ def mlsvalidatetrans_count(self):
+ """The number of MLS validatetrans."""
+ return sum(1 for v in self.constraints() if v.ruletype == "mlsvalidatetrans")
+
+ @property
+ def netifcon_count(self):
+ """The number of netifcon statements."""
+ return self.policy.netifcon_count()
+
+ @property
+ def neverallow_count(self):
+ """The number of neverallow rules."""
+ return self.policy.avrule_neverallow_count()
+
+ @property
+ def nodecon_count(self):
+ """The number of nodecon statements."""
+ return self.policy.nodecon_count()
+
+ @property
+ def permission_count(self):
+ """The number of permissions."""
+ return sum(len(c.perms) for c in chain(self.commons(), self.classes()))
+
+ @property
+ def permissives_count(self):
+ """The number of permissive types."""
+ return self.policy.permissive_count()
+
+ @property
+ def polcap_count(self):
+ """The number of policy capabilities."""
+ return self.policy.polcap_count()
+
+ @property
+ def portcon_count(self):
+ """The number of portcon statements."""
+ return self.policy.portcon_count()
+
+ @property
+ def range_transition_count(self):
+ """The number of range_transition rules."""
+ return self.policy.range_trans_count()
+
+ @property
+ def role_count(self):
+ """The number of roles."""
+ return self.policy.role_count()
+
+ @property
+ def role_allow_count(self):
+ """The number of (role) allow rules."""
+ return self.policy.role_allow_count()
+
+ @property
+ def role_transition_count(self):
+ """The number of role_transition rules."""
+ return self.policy.role_trans_count()
+
+ @property
+ def type_attribute_count(self):
+ """The number of (type) attributes."""
+ return sum(1 for _ in self.typeattributes())
+
+ @property
+ def type_count(self):
+ """The number of types."""
+ return sum(1 for _ in self.types())
+
+ @property
+ def type_change_count(self):
+ """The number of type_change rules."""
+ return self.policy.terule_change_count()
+
+ @property
+ def type_member_count(self):
+ """The number of type_member rules."""
+ return self.policy.terule_member_count()
+
+ @property
+ def type_transition_count(self):
+ """The number of type_transition rules."""
+ return self.policy.terule_trans_count() + self.policy.filename_trans_count()
+
+ @property
+ def user_count(self):
+ """The number of users."""
+ return self.policy.user_count()
+
+ @property
+ def validatetrans_count(self):
+ """The number of validatetrans."""
+ return sum(1 for v in self.constraints() if v.ruletype == "validatetrans")
+
+ #
+ # Policy components lookup functions
+ #
+ def lookup_boolean(self, name):
+ """Look up a Boolean."""
+ return boolcond.boolean_factory(self.policy, name)
+
+ def lookup_class(self, name):
+ """Look up an object class."""
+ return objclass.class_factory(self.policy, name)
+
+ def lookup_common(self, name):
+ """Look up a common permission set."""
+ return objclass.common_factory(self.policy, name)
+
+ def lookup_initialsid(self, name):
+ """Look up an initial sid."""
+ return initsid.initialsid_factory(self.policy, name)
+
+ def lookup_level(self, level):
+ """Look up a MLS level."""
+ return mls.level_factory(self.policy, level)
+
+ def lookup_sensitivity(self, name):
+ """Look up a MLS sensitivity by name."""
+ return mls.sensitivity_factory(self.policy, name)
+
+ def lookup_range(self, range_):
+ """Look up a MLS range."""
+ return mls.range_factory(self.policy, range_)
+
+ def lookup_role(self, name):
+ """Look up a role by name."""
+ return role.role_factory(self.policy, name)
+
+ def lookup_type(self, name):
+ """Look up a type by name."""
+ return typeattr.type_factory(self.policy, name, deref=True)
+
+ def lookup_type_or_attr(self, name):
+ """Look up a type or type attribute by name."""
+ return typeattr.type_or_attr_factory(self.policy, name, deref=True)
+
+ def lookup_typeattr(self, name):
+ """Look up a type attribute by name."""
+ return typeattr.attribute_factory(self.policy, name)
+
+ def lookup_user(self, name):
+ """Look up a user by name."""
+ return user.user_factory(self.policy, name)
+
+ #
+ # Policy components generators
+ #
+
+ def bools(self):
+ """Generator which yields all Booleans."""
+ for bool_ in self.policy.bool_iter():
+ yield boolcond.boolean_factory(self.policy, bool_)
+
+ def categories(self):
+ """Generator which yields all MLS categories."""
+ for cat in self.policy.cat_iter():
+ try:
+ yield mls.category_factory(self.policy, cat)
+ except TypeError:
+ # libqpol unfortunately iterates over aliases too
+ pass
+
+ def classes(self):
+ """Generator which yields all object classes."""
+ for class_ in self.policy.class_iter():
+ yield objclass.class_factory(self.policy, class_)
+
+ def commons(self):
+ """Generator which yields all commons."""
+ for common in self.policy.common_iter():
+ yield objclass.common_factory(self.policy, common)
+
+ def defaults(self):
+ """Generator which yields all default_* statements."""
+ for default_ in self.policy.default_iter():
+ try:
+ for default_obj in default.default_factory(self.policy, default_):
+ yield default_obj
+ except exception.NoDefaults:
+ # qpol iterates over all classes. Handle case
+ # where a class has no default_* settings.
+ pass
+
+ def levels(self):
+ """Generator which yields all level declarations."""
+ for level in self.policy.level_iter():
+
+ try:
+ yield mls.level_decl_factory(self.policy, level)
+ except TypeError:
+ # libqpol unfortunately iterates over levels and sens aliases
+ pass
+
+ def polcaps(self):
+ """Generator which yields all policy capabilities."""
+ for cap in self.policy.polcap_iter():
+ yield polcap.polcap_factory(self.policy, cap)
+
+ def roles(self):
+ """Generator which yields all roles."""
+ for role_ in self.policy.role_iter():
+ yield role.role_factory(self.policy, role_)
+
+ def sensitivities(self):
+ """Generator which yields all sensitivities."""
+ # see mls.py for more info on why level_iter is used here.
+ for sens in self.policy.level_iter():
+ try:
+ yield mls.sensitivity_factory(self.policy, sens)
+ except TypeError:
+ # libqpol unfortunately iterates over sens and aliases
+ pass
+
+ def types(self):
+ """Generator which yields all types."""
+ for type_ in self.policy.type_iter():
+ try:
+ yield typeattr.type_factory(self.policy, type_)
+ except TypeError:
+ # libqpol unfortunately iterates over attributes and aliases
+ pass
+
+ def typeattributes(self):
+ """Generator which yields all (type) attributes."""
+ for type_ in self.policy.type_iter():
+ try:
+ yield typeattr.attribute_factory(self.policy, type_)
+ except TypeError:
+ # libqpol unfortunately iterates over attributes and aliases
+ pass
+
+ def users(self):
+ """Generator which yields all users."""
+ for user_ in self.policy.user_iter():
+ yield user.user_factory(self.policy, user_)
+
+ #
+ # Policy rules generators
+ #
+ def mlsrules(self):
+ """Generator which yields all MLS rules."""
+ for rule in self.policy.range_trans_iter():
+ yield mlsrule.mls_rule_factory(self.policy, rule)
+
+ def rbacrules(self):
+ """Generator which yields all RBAC rules."""
+ for rule in chain(self.policy.role_allow_iter(),
+ self.policy.role_trans_iter()):
+ yield rbacrule.rbac_rule_factory(self.policy, rule)
+
+ def terules(self):
+ """Generator which yields all type enforcement rules."""
+ for rule in chain(self.policy.avrule_iter(),
+ self.policy.terule_iter(),
+ self.policy.filename_trans_iter()):
+ yield terule.te_rule_factory(self.policy, rule)
+
+ #
+ # Policy rule type validators
+ #
+ @staticmethod
+ def validate_constraint_ruletype(types):
+ """Validate constraint types."""
+ constraint.validate_ruletype(types)
+
+ @staticmethod
+ def validate_mls_ruletype(types):
+ """Validate MLS rule types."""
+ mlsrule.validate_ruletype(types)
+
+ @staticmethod
+ def validate_rbac_ruletype(types):
+ """Validate RBAC rule types."""
+ rbacrule.validate_ruletype(types)
+
+ @staticmethod
+ def validate_te_ruletype(types):
+ """Validate type enforcement rule types."""
+ terule.validate_ruletype(types)
+
+ #
+ # Constraints generators
+ #
+
+ def constraints(self):
+ """Generator which yields all constraints (regular and MLS)."""
+ for constraint_ in chain(self.policy.constraint_iter(),
+ self.policy.validatetrans_iter()):
+
+ yield constraint.constraint_factory(self.policy, constraint_)
+
+ #
+ # In-policy Labeling statement generators
+ #
+ def fs_uses(self):
+ """Generator which yields all fs_use_* statements."""
+ for fs_use in self.policy.fs_use_iter():
+ yield fscontext.fs_use_factory(self.policy, fs_use)
+
+ def genfscons(self):
+ """Generator which yields all genfscon statements."""
+ for fscon in self.policy.genfscon_iter():
+ yield fscontext.genfscon_factory(self.policy, fscon)
+
+ def initialsids(self):
+ """Generator which yields all initial SID statements."""
+ for sid in self.policy.isid_iter():
+ yield initsid.initialsid_factory(self.policy, sid)
+
+ def netifcons(self):
+ """Generator which yields all netifcon statements."""
+ for ifcon in self.policy.netifcon_iter():
+ yield netcontext.netifcon_factory(self.policy, ifcon)
+
+ def nodecons(self):
+ """Generator which yields all nodecon statements."""
+ for node in self.policy.nodecon_iter():
+ yield netcontext.nodecon_factory(self.policy, node)
+
+ def portcons(self):
+ """Generator which yields all portcon statements."""
+ for port in self.policy.portcon_iter():
+ yield netcontext.portcon_factory(self.policy, port)
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/_qpol.so b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/_qpol.so
new file mode 100755
index 0000000..aaccf28
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/_qpol.so
Binary files differ
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/boolcond.py b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/boolcond.py
new file mode 100644
index 0000000..c3c0608
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/boolcond.py
@@ -0,0 +1,167 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import qpol
+from . import symbol
+
+
+def boolean_factory(policy, name):
+ """Factory function for creating Boolean statement objects."""
+
+ if isinstance(name, Boolean):
+ assert name.policy == policy
+ return name
+ elif isinstance(name, qpol.qpol_bool_t):
+ return Boolean(policy, name)
+
+ try:
+ return Boolean(policy, qpol.qpol_bool_t(policy, str(name)))
+ except ValueError:
+ raise exception.InvalidBoolean("{0} is not a valid Boolean".format(name))
+
+
+def condexpr_factory(policy, name):
+ """Factory function for creating conditional expression objects."""
+
+ if not isinstance(name, qpol.qpol_cond_t):
+ raise TypeError("Conditional expressions cannot be looked up.")
+
+ return ConditionalExpr(policy, name)
+
+
+class Boolean(symbol.PolicySymbol):
+
+ """A Boolean."""
+
+ @property
+ def state(self):
+ """The default state of the Boolean."""
+ return bool(self.qpol_symbol.state(self.policy))
+
+ def statement(self):
+ """The policy statement."""
+ return "bool {0} {1};".format(self, str(self.state).lower())
+
+
+class ConditionalExpr(symbol.PolicySymbol):
+
+ """A conditional policy expression."""
+
+ _cond_expr_val_to_text = {
+ qpol.QPOL_COND_EXPR_NOT: "!",
+ qpol.QPOL_COND_EXPR_OR: "||",
+ qpol.QPOL_COND_EXPR_AND: "&&",
+ qpol.QPOL_COND_EXPR_XOR: "^",
+ qpol.QPOL_COND_EXPR_EQ: "==",
+ qpol.QPOL_COND_EXPR_NEQ: "!="}
+
+ _cond_expr_val_to_precedence = {
+ qpol.QPOL_COND_EXPR_NOT: 5,
+ qpol.QPOL_COND_EXPR_OR: 1,
+ qpol.QPOL_COND_EXPR_AND: 3,
+ qpol.QPOL_COND_EXPR_XOR: 2,
+ qpol.QPOL_COND_EXPR_EQ: 4,
+ qpol.QPOL_COND_EXPR_NEQ: 4}
+
+ def __contains__(self, other):
+ for expr_node in self.qpol_symbol.expr_node_iter(self.policy):
+ expr_node_type = expr_node.expr_type(self.policy)
+
+ if expr_node_type == qpol.QPOL_COND_EXPR_BOOL and other == \
+ boolean_factory(self.policy, expr_node.get_boolean(self.policy)):
+ return True
+
+ return False
+
+ def __str__(self):
+ # qpol representation is in postfix notation. This code
+ # converts it to infix notation. Parentheses are added
+ # to ensure correct expressions, though they may end up
+ # being overused. Set previous operator at start to the
+ # highest precedence (NOT) so if there is a single binary
+ # operator, no parentheses are output
+ stack = []
+ prev_op_precedence = self._cond_expr_val_to_precedence[qpol.QPOL_COND_EXPR_NOT]
+ for expr_node in self.qpol_symbol.expr_node_iter(self.policy):
+ expr_node_type = expr_node.expr_type(self.policy)
+
+ if expr_node_type == qpol.QPOL_COND_EXPR_BOOL:
+ # append the boolean name
+ nodebool = boolean_factory(
+ self.policy, expr_node.get_boolean(self.policy))
+ stack.append(str(nodebool))
+ elif expr_node_type == qpol.QPOL_COND_EXPR_NOT: # unary operator
+ operand = stack.pop()
+ operator = self._cond_expr_val_to_text[expr_node_type]
+ op_precedence = self._cond_expr_val_to_precedence[expr_node_type]
+
+ # NOT is the highest precedence, so only need
+ # parentheses if the operand is a subexpression
+ if isinstance(operand, list):
+ subexpr = [operator, "(", operand, ")"]
+ else:
+ subexpr = [operator, operand]
+
+ stack.append(subexpr)
+ prev_op_precedence = op_precedence
+ else:
+ operand1 = stack.pop()
+ operand2 = stack.pop()
+ operator = self._cond_expr_val_to_text[expr_node_type]
+ op_precedence = self._cond_expr_val_to_precedence[expr_node_type]
+
+ if prev_op_precedence > op_precedence:
+ # if previous operator is of higher precedence
+ # no parentheses are needed.
+ subexpr = [operand1, operator, operand2]
+ else:
+ subexpr = ["(", operand1, operator, operand2, ")"]
+
+ stack.append(subexpr)
+ prev_op_precedence = op_precedence
+
+ return self.__unwind_subexpression(stack)
+
+ def __unwind_subexpression(self, expr):
+ ret = []
+
+ # do a string.join on sublists (subexpressions)
+ for i in expr:
+ if isinstance(i, list):
+ ret.append(self.__unwind_subexpression(i))
+ else:
+ ret.append(i)
+
+ return ' '.join(ret)
+
+ @property
+ def booleans(self):
+ """The set of Booleans in the expression."""
+ bools = set()
+
+ for expr_node in self.qpol_symbol.expr_node_iter(self.policy):
+ expr_node_type = expr_node.expr_type(self.policy)
+
+ if expr_node_type == qpol.QPOL_COND_EXPR_BOOL:
+ bools.add(boolean_factory(self.policy, expr_node.get_boolean(self.policy)))
+
+ return bools
+
+ def statement(self):
+ raise exception.NoStatement
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/constraint.py b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/constraint.py
new file mode 100644
index 0000000..9994c5b
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/constraint.py
@@ -0,0 +1,297 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import qpol
+from . import role
+from . import symbol
+from . import objclass
+from . import typeattr
+from . import user
+
+
+def _is_mls(policy, sym):
+ """Determine if this is a regular or MLS constraint/validatetrans."""
+ # this can only be determined by inspecting the expression.
+ for expr_node in sym.expr_iter(policy):
+ sym_type = expr_node.sym_type(policy)
+ expr_type = expr_node.expr_type(policy)
+
+ if expr_type == qpol.QPOL_CEXPR_TYPE_ATTR and sym_type >= qpol.QPOL_CEXPR_SYM_L1L2:
+ return True
+
+ return False
+
+
+def validate_ruletype(types):
+ """Validate constraint rule types."""
+ for t in types:
+ if t not in ["constrain", "mlsconstrain", "validatetrans", "mlsvalidatetrans"]:
+ raise exception.InvalidConstraintType("{0} is not a valid constraint type.".format(t))
+
+
+def constraint_factory(policy, sym):
+ """Factory function for creating constraint objects."""
+
+ try:
+ if _is_mls(policy, sym):
+ if isinstance(sym, qpol.qpol_constraint_t):
+ return Constraint(policy, sym, "mlsconstrain")
+ else:
+ return Validatetrans(policy, sym, "mlsvalidatetrans")
+ else:
+ if isinstance(sym, qpol.qpol_constraint_t):
+ return Constraint(policy, sym, "constrain")
+ else:
+ return Validatetrans(policy, sym, "validatetrans")
+
+ except AttributeError:
+ raise TypeError("Constraints cannot be looked-up.")
+
+
+class BaseConstraint(symbol.PolicySymbol):
+
+ """Base class for constraint rules."""
+
+ _expr_type_to_text = {
+ qpol.QPOL_CEXPR_TYPE_NOT: "not",
+ qpol.QPOL_CEXPR_TYPE_AND: "and",
+ qpol.QPOL_CEXPR_TYPE_OR: "\n\tor"}
+
+ _expr_op_to_text = {
+ qpol.QPOL_CEXPR_OP_EQ: "==",
+ qpol.QPOL_CEXPR_OP_NEQ: "!=",
+ qpol.QPOL_CEXPR_OP_DOM: "dom",
+ qpol.QPOL_CEXPR_OP_DOMBY: "domby",
+ qpol.QPOL_CEXPR_OP_INCOMP: "incomp"}
+
+ _sym_to_text = {
+ qpol.QPOL_CEXPR_SYM_USER: "u1",
+ qpol.QPOL_CEXPR_SYM_ROLE: "r1",
+ qpol.QPOL_CEXPR_SYM_TYPE: "t1",
+ qpol.QPOL_CEXPR_SYM_USER + qpol.QPOL_CEXPR_SYM_TARGET: "u2",
+ qpol.QPOL_CEXPR_SYM_ROLE + qpol.QPOL_CEXPR_SYM_TARGET: "r2",
+ qpol.QPOL_CEXPR_SYM_TYPE + qpol.QPOL_CEXPR_SYM_TARGET: "t2",
+ qpol.QPOL_CEXPR_SYM_USER + qpol.QPOL_CEXPR_SYM_XTARGET: "u3",
+ qpol.QPOL_CEXPR_SYM_ROLE + qpol.QPOL_CEXPR_SYM_XTARGET: "r3",
+ qpol.QPOL_CEXPR_SYM_TYPE + qpol.QPOL_CEXPR_SYM_XTARGET: "t3",
+ qpol.QPOL_CEXPR_SYM_L1L2: "l1",
+ qpol.QPOL_CEXPR_SYM_L1H2: "l1",
+ qpol.QPOL_CEXPR_SYM_H1L2: "h1",
+ qpol.QPOL_CEXPR_SYM_H1H2: "h1",
+ qpol.QPOL_CEXPR_SYM_L1H1: "l1",
+ qpol.QPOL_CEXPR_SYM_L2H2: "l2",
+ qpol.QPOL_CEXPR_SYM_L1L2 + qpol.QPOL_CEXPR_SYM_TARGET: "l2",
+ qpol.QPOL_CEXPR_SYM_L1H2 + qpol.QPOL_CEXPR_SYM_TARGET: "h2",
+ qpol.QPOL_CEXPR_SYM_H1L2 + qpol.QPOL_CEXPR_SYM_TARGET: "l2",
+ qpol.QPOL_CEXPR_SYM_H1H2 + qpol.QPOL_CEXPR_SYM_TARGET: "h2",
+ qpol.QPOL_CEXPR_SYM_L1H1 + qpol.QPOL_CEXPR_SYM_TARGET: "h1",
+ qpol.QPOL_CEXPR_SYM_L2H2 + qpol.QPOL_CEXPR_SYM_TARGET: "h2"}
+
+ # Boolean operators
+ _expr_type_to_precedence = {
+ qpol.QPOL_CEXPR_TYPE_NOT: 3,
+ qpol.QPOL_CEXPR_TYPE_AND: 2,
+ qpol.QPOL_CEXPR_TYPE_OR: 1}
+
+ # Logical operators have the same precedence
+ _logical_op_precedence = 4
+
+ def __init__(self, policy, qpol_symbol, ruletype):
+ symbol.PolicySymbol.__init__(self, policy, qpol_symbol)
+ self.ruletype = ruletype
+
+ def __str__(self):
+ raise NotImplementedError
+
+ def _build_expression(self):
+ # qpol representation is in postfix notation. This code
+ # converts it to infix notation. Parentheses are added
+ # to ensure correct expressions, though they may end up
+ # being overused. Set previous operator at start to the
+ # highest precedence (op) so if there is a single binary
+ # operator, no parentheses are output
+
+ stack = []
+ prev_op_precedence = self._logical_op_precedence
+ for expr_node in self.qpol_symbol.expr_iter(self.policy):
+ op = expr_node.op(self.policy)
+ sym_type = expr_node.sym_type(self.policy)
+ expr_type = expr_node.expr_type(self.policy)
+
+ if expr_type == qpol.QPOL_CEXPR_TYPE_ATTR:
+ # logical operator with symbol (e.g. u1 == u2)
+ operand1 = self._sym_to_text[sym_type]
+ operand2 = self._sym_to_text[sym_type + qpol.QPOL_CEXPR_SYM_TARGET]
+ operator = self._expr_op_to_text[op]
+
+ stack.append([operand1, operator, operand2])
+
+ prev_op_precedence = self._logical_op_precedence
+ elif expr_type == qpol.QPOL_CEXPR_TYPE_NAMES:
+ # logical operator with type or attribute list (e.g. t1 == { spam_t eggs_t })
+ operand1 = self._sym_to_text[sym_type]
+ operator = self._expr_op_to_text[op]
+
+ names = list(expr_node.names_iter(self.policy))
+
+ if not names:
+ operand2 = "<empty set>"
+ elif len(names) == 1:
+ operand2 = names[0]
+ else:
+ operand2 = "{{ {0} }}".format(' '.join(names))
+
+ stack.append([operand1, operator, operand2])
+
+ prev_op_precedence = self._logical_op_precedence
+ elif expr_type == qpol.QPOL_CEXPR_TYPE_NOT:
+ # unary operator (not)
+ operand = stack.pop()
+ operator = self._expr_type_to_text[expr_type]
+
+ stack.append([operator, "(", operand, ")"])
+
+ prev_op_precedence = self._expr_type_to_precedence[expr_type]
+ else:
+ # binary operator (and/or)
+ operand1 = stack.pop()
+ operand2 = stack.pop()
+ operator = self._expr_type_to_text[expr_type]
+ op_precedence = self._expr_type_to_precedence[expr_type]
+
+ # if previous operator is of higher precedence
+ # no parentheses are needed.
+ if op_precedence < prev_op_precedence:
+ stack.append([operand1, operator, operand2])
+ else:
+ stack.append(["(", operand1, operator, operand2, ")"])
+
+ prev_op_precedence = op_precedence
+
+ return self.__unwind_subexpression(stack)
+
+ def _get_symbols(self, syms, factory):
+ """
+ Internal generator for getting users/roles/types in a constraint
+ expression. Symbols will be yielded multiple times if they appear
+ in the expression multiple times.
+
+ Parameters:
+ syms List of qpol symbol types.
+ factory The factory function related to these symbols.
+ """
+ for expr_node in self.qpol_symbol.expr_iter(self.policy):
+ sym_type = expr_node.sym_type(self.policy)
+ expr_type = expr_node.expr_type(self.policy)
+
+ if expr_type == qpol.QPOL_CEXPR_TYPE_NAMES and sym_type in syms:
+ for s in expr_node.names_iter(self.policy):
+ yield factory(self.policy, s)
+
+ def __unwind_subexpression(self, expr):
+ ret = []
+
+ # do a string.join on sublists (subexpressions)
+ for i in expr:
+ if isinstance(i, list):
+ ret.append(self.__unwind_subexpression(i))
+ else:
+ ret.append(i)
+
+ return ' '.join(ret)
+
+ # There is no levels function as specific
+ # levels cannot be used in expressions, only
+ # the l1, h1, etc. symbols
+
+ @property
+ def roles(self):
+ """The roles used in the expression."""
+ role_syms = [qpol.QPOL_CEXPR_SYM_ROLE,
+ qpol.QPOL_CEXPR_SYM_ROLE + qpol.QPOL_CEXPR_SYM_TARGET,
+ qpol.QPOL_CEXPR_SYM_ROLE + qpol.QPOL_CEXPR_SYM_XTARGET]
+
+ return set(self._get_symbols(role_syms, role.role_factory))
+
+ @property
+ def perms(self):
+ raise NotImplementedError
+
+ def statement(self):
+ return str(self)
+
+ @property
+ def tclass(self):
+ """Object class for this constraint."""
+ return objclass.class_factory(self.policy, self.qpol_symbol.object_class(self.policy))
+
+ @property
+ def types(self):
+ """The types and type attributes used in the expression."""
+ type_syms = [qpol.QPOL_CEXPR_SYM_TYPE,
+ qpol.QPOL_CEXPR_SYM_TYPE + qpol.QPOL_CEXPR_SYM_TARGET,
+ qpol.QPOL_CEXPR_SYM_TYPE + qpol.QPOL_CEXPR_SYM_XTARGET]
+
+ return set(self._get_symbols(type_syms, typeattr.type_or_attr_factory))
+
+ @property
+ def users(self):
+ """The users used in the expression."""
+ user_syms = [qpol.QPOL_CEXPR_SYM_USER,
+ qpol.QPOL_CEXPR_SYM_USER + qpol.QPOL_CEXPR_SYM_TARGET,
+ qpol.QPOL_CEXPR_SYM_USER + qpol.QPOL_CEXPR_SYM_XTARGET]
+
+ return set(self._get_symbols(user_syms, user.user_factory))
+
+
+class Constraint(BaseConstraint):
+
+ """A constraint rule (constrain/mlsconstrain)."""
+
+ def __str__(self):
+ rule_string = "{0.ruletype} {0.tclass} ".format(self)
+
+ perms = self.perms
+ if len(perms) > 1:
+ rule_string += "{{ {0} }} (\n".format(' '.join(perms))
+ else:
+ # convert to list since sets cannot be indexed
+ rule_string += "{0} (\n".format(list(perms)[0])
+
+ rule_string += "\t{0}\n);".format(self._build_expression())
+
+ return rule_string
+
+ @property
+ def perms(self):
+ """The constraint's permission set."""
+ return set(self.qpol_symbol.perm_iter(self.policy))
+
+
+class Validatetrans(BaseConstraint):
+
+ """A validatetrans rule (validatetrans/mlsvalidatetrans)."""
+
+ def __str__(self):
+ return "{0.ruletype} {0.tclass}\n\t{1}\n);".format(self, self._build_expression())
+
+ @property
+ def perms(self):
+ raise exception.ConstraintUseError("{0} rules do not have permissions.".
+ format(self.ruletype))
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/context.py b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/context.py
new file mode 100644
index 0000000..f2f3fc7
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/context.py
@@ -0,0 +1,68 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import qpol
+from . import symbol
+from . import user
+from . import role
+from . import typeattr
+from . import mls
+
+
+def context_factory(policy, name):
+ """Factory function for creating context objects."""
+
+ if not isinstance(name, qpol.qpol_context_t):
+ raise TypeError("Contexts cannot be looked-up.")
+
+ return Context(policy, name)
+
+
+class Context(symbol.PolicySymbol):
+
+ """A SELinux security context/security attribute."""
+
+ def __str__(self):
+ try:
+ return "{0.user}:{0.role}:{0.type_}:{0.range_}".format(self)
+ except exception.MLSDisabled:
+ return "{0.user}:{0.role}:{0.type_}".format(self)
+
+ @property
+ def user(self):
+ """The user portion of the context."""
+ return user.user_factory(self.policy, self.qpol_symbol.user(self.policy))
+
+ @property
+ def role(self):
+ """The role portion of the context."""
+ return role.role_factory(self.policy, self.qpol_symbol.role(self.policy))
+
+ @property
+ def type_(self):
+ """The type portion of the context."""
+ return typeattr.type_factory(self.policy, self.qpol_symbol.type_(self.policy))
+
+ @property
+ def range_(self):
+ """The MLS range of the context."""
+ return mls.range_factory(self.policy, self.qpol_symbol.range(self.policy))
+
+ def statement(self):
+ raise exception.NoStatement
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/default.py b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/default.py
new file mode 100644
index 0000000..175b709
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/default.py
@@ -0,0 +1,128 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import symbol
+from . import objclass
+from . import qpol
+
+
+def default_factory(policy, sym):
+ """Factory generator for creating default_* statement objects."""
+
+ # The low level policy groups default_* settings by object class.
+ # Since each class can have up to four default_* statements,
+ # this factory function is a generator which yields up to
+ # four Default objects.
+
+ if not isinstance(sym, qpol.qpol_default_object_t):
+ raise NotImplementedError
+
+ # qpol will essentially iterate over all classes
+ # and emit None for classes that don't set a default
+ if not sym.object_class(policy):
+ raise exception.NoDefaults
+
+ if sym.user_default(policy):
+ yield UserDefault(policy, sym)
+
+ if sym.role_default(policy):
+ yield RoleDefault(policy, sym)
+
+ if sym.type_default(policy):
+ yield TypeDefault(policy, sym)
+
+ if sym.range_default(policy):
+ yield RangeDefault(policy, sym)
+
+
+class Default(symbol.PolicySymbol):
+
+ """Base class for default_* statements."""
+
+ def __str__(self):
+ raise NotImplementedError
+
+ @property
+ def object_class(self):
+ """The object class."""
+ return objclass.class_factory(self.policy, self.qpol_symbol.object_class(self.policy))
+
+ @property
+ def default(self):
+ raise NotImplementedError
+
+ def statement(self):
+ return str(self)
+
+
+class UserDefault(Default):
+
+ """A default_user statement."""
+
+ def __str__(self):
+ return "default_user {0.object_class} {0.default};".format(self)
+
+ @property
+ def default(self):
+ """The default user location (source/target)."""
+ return self.qpol_symbol.user_default(self.policy)
+
+
+class RoleDefault(Default):
+
+ """A default_role statement."""
+
+ def __str__(self):
+ return "default_role {0.object_class} {0.default};".format(self)
+
+ @property
+ def default(self):
+ """The default role location (source/target)."""
+ return self.qpol_symbol.role_default(self.policy)
+
+
+class TypeDefault(Default):
+
+ """A default_type statement."""
+
+ def __str__(self):
+ return "default_type {0.object_class} {0.default};".format(self)
+
+ @property
+ def default(self):
+ """The default type location (source/target)."""
+ return self.qpol_symbol.type_default(self.policy)
+
+
+class RangeDefault(Default):
+
+ """A default_range statement."""
+
+ def __str__(self):
+ return "default_range {0.object_class} {0.default} {0.default_range};".format(self)
+
+ @property
+ def default(self):
+ """The default range location (source/target)."""
+ return self.qpol_symbol.range_default(self.policy).split()[0]
+
+ @property
+ def default_range(self):
+ """The default range setting (low/high/low_high)."""
+ return self.qpol_symbol.range_default(self.policy).split()[1]
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/exception.py b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/exception.py
new file mode 100644
index 0000000..ce367c0
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/exception.py
@@ -0,0 +1,248 @@
+# Copyright 2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from ..exception import SEToolsException
+
+#
+# Policyrep base exception
+#
+
+
+class PolicyrepException(SEToolsException):
+
+ """Base class for all policyrep exceptions."""
+ pass
+
+
+#
+# General Policyrep exceptions
+#
+
+
+class InvalidPolicy(SyntaxError, PolicyrepException):
+
+ """Exception for invalid policy."""
+ pass
+
+
+class MLSDisabled(PolicyrepException):
+
+ """
+ Exception when MLS is disabled.
+ """
+ pass
+
+
+#
+# Invalid component exceptions
+#
+class InvalidSymbol(ValueError, PolicyrepException):
+
+ """
+ Base class for invalid symbols. Typically this is attempting to
+ look up an object in the policy, but it does not exist.
+ """
+ pass
+
+
+class InvalidBoolean(InvalidSymbol):
+
+ """Exception for invalid Booleans."""
+ pass
+
+
+class InvalidCategory(InvalidSymbol):
+
+ """Exception for invalid MLS categories."""
+ pass
+
+
+class InvalidClass(InvalidSymbol):
+
+ """Exception for invalid object classes."""
+ pass
+
+
+class InvalidCommon(InvalidSymbol):
+
+ """Exception for invalid common permission sets."""
+ pass
+
+
+class InvalidInitialSid(InvalidSymbol):
+
+ """Exception for invalid initial sids."""
+ pass
+
+
+class InvalidLevel(InvalidSymbol):
+
+ """
+ Exception for an invalid level.
+ """
+ pass
+
+
+class InvalidLevelDecl(InvalidSymbol):
+
+ """
+ Exception for an invalid level declaration.
+ """
+ pass
+
+
+class InvalidRange(InvalidSymbol):
+
+ """
+ Exception for an invalid range.
+ """
+ pass
+
+
+class InvalidRole(InvalidSymbol):
+
+ """Exception for invalid roles."""
+ pass
+
+
+class InvalidSensitivity(InvalidSymbol):
+
+ """
+ Exception for an invalid sensitivity.
+ """
+ pass
+
+
+class InvalidType(InvalidSymbol):
+
+ """Exception for invalid types and attributes."""
+ pass
+
+
+class InvalidUser(InvalidSymbol):
+
+ """Exception for invalid users."""
+ pass
+
+#
+# Rule type exceptions
+#
+
+
+class InvalidRuleType(InvalidSymbol):
+
+ """Exception for invalid rule types."""
+ pass
+
+
+class InvalidConstraintType(InvalidSymbol):
+
+ """Exception for invalid constraint types."""
+ # This is not a rule but is similar.
+ pass
+
+
+class InvalidMLSRuleType(InvalidRuleType):
+
+ """Exception for invalid MLS rule types."""
+ pass
+
+
+class InvalidRBACRuleType(InvalidRuleType):
+
+ """Exception for invalid RBAC rule types."""
+ pass
+
+
+class InvalidTERuleType(InvalidRuleType):
+
+ """Exception for invalid TE rule types."""
+ pass
+
+
+#
+# Object use errors
+#
+class SymbolUseError(PolicyrepException):
+
+ """
+ Base class for incorrectly using an object. Typically this is
+ for classes with strong similarities, but with slight variances in
+ functionality, e.g. allow vs type_transition rules.
+ """
+ pass
+
+
+class RuleUseError(SymbolUseError):
+
+ """
+ Base class for incorrect parameters for a rule. For
+ example, trying to get the permissions of a rule that has no
+ permissions.
+ """
+ pass
+
+
+class ConstraintUseError(SymbolUseError):
+
+ """Exception when getting permissions from a validatetrans."""
+ pass
+
+
+class NoStatement(SymbolUseError):
+
+ """
+ Exception for objects that have no inherent statement, such
+ as conditional expressions and MLS ranges.
+ """
+ pass
+
+
+#
+# Other exceptions
+#
+class NoCommon(PolicyrepException):
+
+ """
+ Exception when a class does not inherit a common permission set.
+ """
+ pass
+
+
+class NoDefaults(InvalidSymbol):
+
+ """Exception for classes that have no default_* statements."""
+ pass
+
+
+class RuleNotConditional(PolicyrepException):
+
+ """
+ Exception when getting the conditional expression for rules
+ that are unconditional (not conditional).
+ """
+ pass
+
+
+class TERuleNoFilename(PolicyrepException):
+
+ """
+ Exception when getting the file name of a
+ type_transition rule that has no file name.
+ """
+ pass
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/fscontext.py b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/fscontext.py
new file mode 100644
index 0000000..a17b0bc
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/fscontext.py
@@ -0,0 +1,123 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import stat
+
+from . import qpol
+from . import symbol
+from . import context
+
+
+def fs_use_factory(policy, name):
+ """Factory function for creating fs_use_* objects."""
+
+ if not isinstance(name, qpol.qpol_fs_use_t):
+ raise TypeError("fs_use_* cannot be looked-up.")
+
+ return FSUse(policy, name)
+
+
+def genfscon_factory(policy, name):
+ """Factory function for creating genfscon objects."""
+
+ if not isinstance(name, qpol.qpol_genfscon_t):
+ raise TypeError("Genfscons cannot be looked-up.")
+
+ return Genfscon(policy, name)
+
+
+class FSContext(symbol.PolicySymbol):
+
+ """Base class for in-policy labeling rules."""
+
+ def __str__(self):
+ raise NotImplementedError
+
+ @property
+ def fs(self):
+ """The filesystem type for this statement."""
+ return self.qpol_symbol.name(self.policy)
+
+ @property
+ def context(self):
+ """The context for this statement."""
+ return context.context_factory(self.policy, self.qpol_symbol.context(self.policy))
+
+ def statement(self):
+ return str(self)
+
+
+class Genfscon(FSContext):
+
+ """A genfscon statement."""
+
+ _filetype_to_text = {
+ 0: "",
+ stat.S_IFBLK: "-b",
+ stat.S_IFCHR: "-c",
+ stat.S_IFDIR: "-d",
+ stat.S_IFIFO: "-p",
+ stat.S_IFREG: "--",
+ stat.S_IFLNK: "-l",
+ stat.S_IFSOCK: "-s"}
+
+ def __str__(self):
+ return "genfscon {0.fs} {0.path} {1} {0.context}".format(
+ self, self._filetype_to_text[self.filetype])
+
+ def __eq__(self, other):
+ # Libqpol allocates new C objects in the
+ # genfscons iterator, so pointer comparison
+ # in the PolicySymbol object doesn't work.
+ try:
+ return (self.fs == other.fs and
+ self.path == other.path and
+ self.filetype == other.filetype and
+ self.context == other.context)
+ except AttributeError:
+ return str(self) == str(other)
+
+ @property
+ def filetype(self):
+ """The file type (e.g. stat.S_IFBLK) for this genfscon statement."""
+ return self.qpol_symbol.object_class(self.policy)
+
+ @property
+ def path(self):
+ """The path for this genfscon statement."""
+ return self.qpol_symbol.path(self.policy)
+
+
+class FSUse(FSContext):
+
+ """A fs_use_* statement."""
+
+ # there are more rule types, but modern SELinux
+ # only supports these three.
+ _ruletype_to_text = {
+ qpol.QPOL_FS_USE_XATTR: 'fs_use_xattr',
+ qpol.QPOL_FS_USE_TRANS: 'fs_use_trans',
+ qpol.QPOL_FS_USE_TASK: 'fs_use_task'}
+
+ def __str__(self):
+ return "{0.ruletype} {0.fs} {0.context};".format(self)
+
+ @property
+ def ruletype(self):
+ """The rule type for this fs_use_* statement."""
+ return self._ruletype_to_text[self.qpol_symbol.behavior(self.policy)]
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/initsid.py b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/initsid.py
new file mode 100644
index 0000000..0197c74
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/initsid.py
@@ -0,0 +1,50 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import qpol
+from . import symbol
+from . import context
+
+
+def initialsid_factory(policy, name):
+ """Factory function for creating initial sid objects."""
+
+ if isinstance(name, InitialSID):
+ assert name.policy == policy
+ return name
+ elif isinstance(name, qpol.qpol_isid_t):
+ return InitialSID(policy, name)
+
+ try:
+ return InitialSID(policy, qpol.qpol_isid_t(policy, name))
+ except ValueError:
+ raise exception.InvalidInitialSid("{0} is not a valid initial sid".format(name))
+
+
+class InitialSID(symbol.PolicySymbol):
+
+ """An initial SID statement."""
+
+ @property
+ def context(self):
+ """The context for this initial SID."""
+ return context.context_factory(self.policy, self.qpol_symbol.context(self.policy))
+
+ def statement(self):
+ return "sid {0} {0.context}".format(self)
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/mls.py b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/mls.py
new file mode 100644
index 0000000..2541704
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/mls.py
@@ -0,0 +1,463 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+# pylint: disable=protected-access
+import itertools
+
+from . import exception
+from . import qpol
+from . import symbol
+
+# qpol does not expose an equivalent of a sensitivity declaration.
+# qpol_level_t is equivalent to the level declaration:
+# level s0:c0.c1023;
+
+# qpol_mls_level_t represents a level as used in contexts,
+# such as range_transitions or labeling statements such as
+# portcon and nodecon.
+
+# Here qpol_level_t is also used for MLSSensitivity
+# since it has the sensitivity name, dominance, and there
+# is a 1:1 correspondence between the sensitivity declarations
+# and level declarations.
+
+# Hashing has to be handled below because the qpol references,
+# normally used for a hash key, are not the same for multiple
+# instances of the same object (except for level decl).
+
+
+def enabled(policy):
+ """Determine if MLS is enabled."""
+ return policy.capability(qpol.QPOL_CAP_MLS)
+
+
+def category_factory(policy, sym):
+ """Factory function for creating MLS category objects."""
+
+ if not enabled(policy):
+ raise exception.MLSDisabled
+
+ if isinstance(sym, Category):
+ assert sym.policy == policy
+ return sym
+ elif isinstance(sym, qpol.qpol_cat_t):
+ if sym.isalias(policy):
+ raise TypeError("{0} is an alias".format(sym.name(policy)))
+
+ return Category(policy, sym)
+
+ try:
+ return Category(policy, qpol.qpol_cat_t(policy, str(sym)))
+ except ValueError:
+ raise exception.InvalidCategory("{0} is not a valid category".format(sym))
+
+
+def sensitivity_factory(policy, sym):
+ """Factory function for creating MLS sensitivity objects."""
+
+ if not enabled(policy):
+ raise exception.MLSDisabled
+
+ if isinstance(sym, Sensitivity):
+ assert sym.policy == policy
+ return sym
+ elif isinstance(sym, qpol.qpol_level_t):
+ if sym.isalias(policy):
+ raise TypeError("{0} is an alias".format(sym.name(policy)))
+
+ return Sensitivity(policy, sym)
+
+ try:
+ return Sensitivity(policy, qpol.qpol_level_t(policy, str(sym)))
+ except ValueError:
+ raise exception.InvalidSensitivity("{0} is not a valid sensitivity".format(sym))
+
+
+def level_factory(policy, sym):
+ """
+ Factory function for creating MLS level objects (e.g. levels used
+ in contexts of labeling statements)
+ """
+
+ if not enabled(policy):
+ raise exception.MLSDisabled
+
+ if isinstance(sym, Level):
+ assert sym.policy == policy
+ return sym
+ elif isinstance(sym, qpol.qpol_mls_level_t):
+ return Level(policy, sym)
+
+ sens_split = str(sym).split(":")
+
+ sens = sens_split[0]
+ try:
+ semantic_level = qpol.qpol_semantic_level_t(policy, sens)
+ except ValueError:
+ raise exception.InvalidLevel("{0} is invalid ({1} is not a valid sensitivity)".
+ format(sym, sens))
+
+ try:
+ cats = sens_split[1]
+ except IndexError:
+ pass
+ else:
+ for group in cats.split(","):
+ catrange = group.split(".")
+
+ if len(catrange) == 2:
+ try:
+ semantic_level.add_cats(policy, catrange[0], catrange[1])
+ except ValueError:
+ raise exception.InvalidLevel(
+ "{0} is invalid ({1} is not a valid category range)".format(sym, group))
+ elif len(catrange) == 1:
+ try:
+ semantic_level.add_cats(policy, catrange[0], catrange[0])
+ except ValueError:
+ raise exception.InvalidLevel("{0} is invalid ({1} is not a valid category)".
+ format(sym, group))
+ else:
+ raise exception.InvalidLevel("{0} is invalid (level parsing error)".format(sym))
+
+ # convert to level object
+ try:
+ policy_level = qpol.qpol_mls_level_t(policy, semantic_level)
+ except ValueError:
+ raise exception.InvalidLevel(
+ "{0} is invalid (one or more categories are not associated with the sensitivity)".
+ format(sym))
+
+ return Level(policy, policy_level)
+
+
+def level_decl_factory(policy, sym):
+ """
+ Factory function for creating MLS level declaration objects.
+ (level statements) Lookups are only by sensitivity name.
+ """
+
+ if not enabled(policy):
+ raise exception.MLSDisabled
+
+ if isinstance(sym, LevelDecl):
+ assert sym.policy == policy
+ return sym
+ elif isinstance(sym, qpol.qpol_level_t):
+ if sym.isalias(policy):
+ raise TypeError("{0} is an alias".format(sym.name(policy)))
+
+ return LevelDecl(policy, sym)
+
+ try:
+ return LevelDecl(policy, qpol.qpol_level_t(policy, str(sym)))
+ except ValueError:
+ raise exception.InvalidLevelDecl("{0} is not a valid sensitivity".format(sym))
+
+
+def range_factory(policy, sym):
+ """Factory function for creating MLS range objects."""
+
+ if not enabled(policy):
+ raise exception.MLSDisabled
+
+ if isinstance(sym, Range):
+ assert sym.policy == policy
+ return sym
+ elif isinstance(sym, qpol.qpol_mls_range_t):
+ return Range(policy, sym)
+
+ # build range:
+ levels = str(sym).split("-")
+
+ # strip() levels to handle ranges with spaces in them,
+ # e.g. s0:c1 - s0:c0.c255
+ try:
+ low = level_factory(policy, levels[0].strip())
+ except exception.InvalidLevel as ex:
+ raise exception.InvalidRange("{0} is not a valid range ({1}).".format(sym, ex))
+
+ try:
+ high = level_factory(policy, levels[1].strip())
+ except exception.InvalidLevel as ex:
+ raise exception.InvalidRange("{0} is not a valid range ({1}).".format(sym, ex))
+ except IndexError:
+ high = low
+
+ # convert to range object
+ try:
+ policy_range = qpol.qpol_mls_range_t(policy, low.qpol_symbol, high.qpol_symbol)
+ except ValueError:
+ raise exception.InvalidRange("{0} is not a valid range ({1} is not dominated by {2})".
+ format(sym, low, high))
+
+ return Range(policy, policy_range)
+
+
+class BaseMLSComponent(symbol.PolicySymbol):
+
+ """Base class for sensitivities and categories."""
+
+ @property
+ def _value(self):
+ """
+ The value of the component.
+
+ This is a low-level policy detail exposed for internal use only.
+ """
+ return self.qpol_symbol.value(self.policy)
+
+ def aliases(self):
+ """Generator that yields all aliases for this category."""
+
+ for alias in self.qpol_symbol.alias_iter(self.policy):
+ yield alias
+
+
+class Category(BaseMLSComponent):
+
+ """An MLS category."""
+
+ def statement(self):
+ aliases = list(self.aliases())
+ stmt = "category {0}".format(self)
+ if aliases:
+ if len(aliases) > 1:
+ stmt += " alias {{ {0} }}".format(' '.join(aliases))
+ else:
+ stmt += " alias {0}".format(aliases[0])
+ stmt += ";"
+ return stmt
+
+
+class Sensitivity(BaseMLSComponent):
+
+ """An MLS sensitivity"""
+
+ def __eq__(self, other):
+ try:
+ return self._value == other._value
+ except AttributeError:
+ return str(self) == str(other)
+
+ def __ge__(self, other):
+ return self._value >= other._value
+
+ def __gt__(self, other):
+ return self._value > other._value
+
+ def __le__(self, other):
+ return self._value <= other._value
+
+ def __lt__(self, other):
+ return self._value < other._value
+
+ def statement(self):
+ aliases = list(self.aliases())
+ stmt = "sensitivity {0}".format(self)
+ if aliases:
+ if len(aliases) > 1:
+ stmt += " alias {{ {0} }}".format(' '.join(aliases))
+ else:
+ stmt += " alias {0}".format(aliases[0])
+ stmt += ";"
+ return stmt
+
+
+class BaseMLSLevel(symbol.PolicySymbol):
+
+ """Base class for MLS levels."""
+
+ def __str__(self):
+ lvl = str(self.sensitivity)
+
+ # sort by policy declaration order
+ cats = sorted(self.categories(), key=lambda k: k._value)
+
+ if cats:
+ # generate short category notation
+ shortlist = []
+ for _, i in itertools.groupby(cats, key=lambda k,
+ c=itertools.count(): k._value - next(c)):
+ group = list(i)
+ if len(group) > 1:
+ shortlist.append("{0}.{1}".format(group[0], group[-1]))
+ else:
+ shortlist.append(str(group[0]))
+
+ lvl += ":" + ','.join(shortlist)
+
+ return lvl
+
+ @property
+ def sensitivity(self):
+ raise NotImplementedError
+
+ def categories(self):
+ """
+ Generator that yields all individual categories for this level.
+ All categories are yielded, not a compact notation such as
+ c0.c255
+ """
+
+ for cat in self.qpol_symbol.cat_iter(self.policy):
+ yield category_factory(self.policy, cat)
+
+
+class LevelDecl(BaseMLSLevel):
+
+ """
+ The declaration statement for MLS levels, e.g:
+
+ level s7:c0.c1023;
+ """
+ # below comparisons are only based on sensitivity
+ # dominance since, in this context, the allowable
+ # category set is being defined for the level.
+ # object type is asserted here because this cannot
+ # be compared to a Level instance.
+
+ def __eq__(self, other):
+ assert not isinstance(other, Level), "Levels cannot be compared to level declarations"
+
+ try:
+ return self.sensitivity == other.sensitivity
+ except AttributeError:
+ return str(self) == str(other)
+
+ def __ge__(self, other):
+ assert not isinstance(other, Level), "Levels cannot be compared to level declarations"
+ return self.sensitivity >= other.sensitivity
+
+ def __gt__(self, other):
+ assert not isinstance(other, Level), "Levels cannot be compared to level declarations"
+ return self.sensitivity > other.sensitivity
+
+ def __le__(self, other):
+ assert not isinstance(other, Level), "Levels cannot be compared to level declarations"
+ return self.sensitivity <= other.sensitivity
+
+ def __lt__(self, other):
+ assert not isinstance(other, Level), "Levels cannot be compared to level declarations"
+ return self.sensitivity < other.sensitivity
+
+ @property
+ def sensitivity(self):
+ """The sensitivity of the level."""
+ # since the qpol symbol for levels is also used for
+ # MLSSensitivity objects, use self's qpol symbol
+ return sensitivity_factory(self.policy, self.qpol_symbol)
+
+ def statement(self):
+ return "level {0};".format(self)
+
+
+class Level(BaseMLSLevel):
+
+ """An MLS level used in contexts."""
+
+ def __hash__(self):
+ return hash(str(self))
+
+ def __eq__(self, other):
+ try:
+ othercats = set(other.categories())
+ except AttributeError:
+ return str(self) == str(other)
+ else:
+ selfcats = set(self.categories())
+ return self.sensitivity == other.sensitivity and selfcats == othercats
+
+ def __ge__(self, other):
+ """Dom operator."""
+ selfcats = set(self.categories())
+ othercats = set(other.categories())
+ return self.sensitivity >= other.sensitivity and selfcats >= othercats
+
+ def __gt__(self, other):
+ selfcats = set(self.categories())
+ othercats = set(other.categories())
+ return ((self.sensitivity > other.sensitivity and selfcats >= othercats) or
+ (self.sensitivity >= other.sensitivity and selfcats > othercats))
+
+ def __le__(self, other):
+ """Domby operator."""
+ selfcats = set(self.categories())
+ othercats = set(other.categories())
+ return self.sensitivity <= other.sensitivity and selfcats <= othercats
+
+ def __lt__(self, other):
+ selfcats = set(self.categories())
+ othercats = set(other.categories())
+ return ((self.sensitivity < other.sensitivity and selfcats <= othercats) or
+ (self.sensitivity <= other.sensitivity and selfcats < othercats))
+
+ def __xor__(self, other):
+ """Incomp operator."""
+ return not (self >= other or self <= other)
+
+ @property
+ def sensitivity(self):
+ """The sensitivity of the level."""
+ return sensitivity_factory(self.policy, self.qpol_symbol.sens_name(self.policy))
+
+ def statement(self):
+ raise exception.NoStatement
+
+
+class Range(symbol.PolicySymbol):
+
+ """An MLS range"""
+
+ def __str__(self):
+ high = self.high
+ low = self.low
+ if high == low:
+ return str(low)
+
+ return "{0} - {1}".format(low, high)
+
+ def __hash__(self):
+ return hash(str(self))
+
+ def __eq__(self, other):
+ try:
+ return self.low == other.low and self.high == other.high
+ except AttributeError:
+ # remove all spaces in the string representations
+ # to handle cases where the other object does not
+ # have spaces around the '-'
+ other_str = str(other).replace(" ", "")
+ self_str = str(self).replace(" ", "")
+ return self_str == other_str
+
+ def __contains__(self, other):
+ return self.low <= other <= self.high
+
+ @property
+ def high(self):
+ """The high end/clearance level of this range."""
+ return level_factory(self.policy, self.qpol_symbol.high_level(self.policy))
+
+ @property
+ def low(self):
+ """The low end/current level of this range."""
+ return level_factory(self.policy, self.qpol_symbol.low_level(self.policy))
+
+ def statement(self):
+ raise exception.NoStatement
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/mlsrule.py b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/mlsrule.py
new file mode 100644
index 0000000..5c91c59
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/mlsrule.py
@@ -0,0 +1,62 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import qpol
+from . import rule
+from . import typeattr
+from . import mls
+
+
+def mls_rule_factory(policy, symbol):
+ """Factory function for creating MLS rule objects."""
+ if not isinstance(symbol, qpol.qpol_range_trans_t):
+ raise TypeError("MLS rules cannot be looked-up.")
+
+ return MLSRule(policy, symbol)
+
+
+def validate_ruletype(types):
+ """Validate MLS rule types."""
+ for t in types:
+ if t not in ["range_transition"]:
+ raise exception.InvalidMLSRuleType("{0} is not a valid MLS rule type.".format(t))
+
+
+class MLSRule(rule.PolicyRule):
+
+ """An MLS rule."""
+
+ def __str__(self):
+ # TODO: If we ever get more MLS rules, fix this format.
+ return "range_transition {0.source} {0.target}:{0.tclass} {0.default};".format(self)
+
+ @property
+ def source(self):
+ """The rule's source type/attribute."""
+ return typeattr.type_or_attr_factory(self.policy, self.qpol_symbol.source_type(self.policy))
+
+ @property
+ def target(self):
+ """The rule's target type/attribute."""
+ return typeattr.type_or_attr_factory(self.policy, self.qpol_symbol.target_type(self.policy))
+
+ @property
+ def default(self):
+ """The rule's default range."""
+ return mls.range_factory(self.policy, self.qpol_symbol.range(self.policy))
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/netcontext.py b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/netcontext.py
new file mode 100644
index 0000000..5aeed5c
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/netcontext.py
@@ -0,0 +1,167 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import socket
+from collections import namedtuple
+
+from . import qpol
+from . import symbol
+from . import context
+
+port_range = namedtuple("port_range", ["low", "high"])
+
+
+def netifcon_factory(policy, name):
+ """Factory function for creating netifcon objects."""
+
+ if not isinstance(name, qpol.qpol_netifcon_t):
+ raise NotImplementedError
+
+ return Netifcon(policy, name)
+
+
+def nodecon_factory(policy, name):
+ """Factory function for creating nodecon objects."""
+
+ if not isinstance(name, qpol.qpol_nodecon_t):
+ raise NotImplementedError
+
+ return Nodecon(policy, name)
+
+
+def portcon_factory(policy, name):
+ """Factory function for creating portcon objects."""
+
+ if not isinstance(name, qpol.qpol_portcon_t):
+ raise NotImplementedError
+
+ return Portcon(policy, name)
+
+
+class NetContext(symbol.PolicySymbol):
+
+ """Base class for in-policy network labeling rules."""
+
+ def __str__(self):
+ raise NotImplementedError
+
+ @property
+ def context(self):
+ """The context for this statement."""
+ return context.context_factory(self.policy, self.qpol_symbol.context(self.policy))
+
+ def statement(self):
+ return str(self)
+
+
+class Netifcon(NetContext):
+
+ """A netifcon statement."""
+
+ def __str__(self):
+ return "netifcon {0.netif} {0.context} {0.packet}".format(self)
+
+ @property
+ def netif(self):
+ """The network interface name."""
+ return self.qpol_symbol.name(self.policy)
+
+ @property
+ def context(self):
+ """The context for the interface."""
+ return context.context_factory(self.policy, self.qpol_symbol.if_con(self.policy))
+
+ @property
+ def packet(self):
+ """The context for the packets."""
+ return context.context_factory(self.policy, self.qpol_symbol.msg_con(self.policy))
+
+
+class Nodecon(NetContext):
+
+ """A nodecon statement."""
+
+ def __str__(self):
+ return "nodecon {0.address} {0.netmask} {0.context}".format(self)
+
+ def __eq__(self, other):
+ # Libqpol allocates new C objects in the
+ # nodecons iterator, so pointer comparison
+ # in the PolicySymbol object doesn't work.
+ try:
+ return (self.address == other.address and
+ self.netmask == other.netmask and
+ self.context == other.context)
+ except AttributeError:
+ return (str(self) == str(other))
+
+ @property
+ def ip_version(self):
+ """
+ The IP version for the nodecon (socket.AF_INET or
+ socket.AF_INET6).
+ """
+ return self.qpol_symbol.protocol(self.policy)
+
+ @property
+ def address(self):
+ """The network address for the nodecon."""
+ return self.qpol_symbol.addr(self.policy)
+
+ @property
+ def netmask(self):
+ """The network mask for the nodecon."""
+ return self.qpol_symbol.mask(self.policy)
+
+
+class Portcon(NetContext):
+
+ """A portcon statement."""
+
+ _proto_to_text = {socket.IPPROTO_TCP: 'tcp',
+ socket.IPPROTO_UDP: 'udp'}
+
+ def __str__(self):
+ low, high = self.ports
+ proto = self._proto_to_text[self.protocol]
+
+ if low == high:
+ return "portcon {0} {1} {2}".format(proto, low, self.context)
+ else:
+ return "portcon {0} {1}-{2} {3}".format(proto, low, high, self.context)
+
+ @property
+ def protocol(self):
+ """
+ The protocol number for the portcon (socket.IPPROTO_TCP
+ or socket.IPPROTO_UDP).
+ """
+ return self.qpol_symbol.protocol(self.policy)
+
+ @property
+ def ports(self):
+ """
+ The port range for this portcon.
+
+ Return: Tuple(low, high)
+ low The low port of the range.
+ high The high port of the range.
+ """
+ low = self.qpol_symbol.low_port(self.policy)
+ high = self.qpol_symbol.high_port(self.policy)
+ return port_range(low, high)
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/objclass.py b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/objclass.py
new file mode 100644
index 0000000..bf9a553
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/objclass.py
@@ -0,0 +1,110 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import symbol
+from . import qpol
+
+
+def common_factory(policy, name):
+ """Factory function for creating common permission set objects."""
+
+ if isinstance(name, Common):
+ assert name.policy == policy
+ return name
+ elif isinstance(name, qpol.qpol_common_t):
+ return Common(policy, name)
+
+ try:
+ return Common(policy, qpol.qpol_common_t(policy, str(name)))
+ except ValueError:
+ raise exception.InvalidCommon("{0} is not a valid common".format(name))
+
+
+def class_factory(policy, name):
+ """Factory function for creating object class objects."""
+
+ if isinstance(name, ObjClass):
+ assert name.policy == policy
+ return name
+ elif isinstance(name, qpol.qpol_class_t):
+ return ObjClass(policy, name)
+
+ try:
+ return ObjClass(policy, qpol.qpol_class_t(policy, str(name)))
+ except ValueError:
+ raise exception.InvalidClass("{0} is not a valid object class".format(name))
+
+
+class Common(symbol.PolicySymbol):
+
+ """A common permission set."""
+
+ def __contains__(self, other):
+ return other in self.perms
+
+ @property
+ def perms(self):
+ """The list of the common's permissions."""
+ return set(self.qpol_symbol.perm_iter(self.policy))
+
+ def statement(self):
+ return "common {0}\n{{\n\t{1}\n}}".format(self, '\n\t'.join(self.perms))
+
+
+class ObjClass(Common):
+
+ """An object class."""
+
+ def __contains__(self, other):
+ try:
+ if other in self.common.perms:
+ return True
+ except exception.NoCommon:
+ pass
+
+ return other in self.perms
+
+ @property
+ def common(self):
+ """
+ The common that the object class inherits.
+
+ Exceptions:
+ NoCommon The object class does not inherit a common.
+ """
+
+ try:
+ return common_factory(self.policy, self.qpol_symbol.common(self.policy))
+ except ValueError:
+ raise exception.NoCommon("{0} does not inherit a common.".format(self))
+
+ def statement(self):
+ stmt = "class {0}\n".format(self)
+
+ try:
+ stmt += "inherits {0}\n".format(self.common)
+ except exception.NoCommon:
+ pass
+
+ # a class that inherits may not have additional permissions
+ perms = self.perms
+ if len(perms) > 0:
+ stmt += "{{\n\t{0}\n}}".format('\n\t'.join(perms))
+
+ return stmt
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/polcap.py b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/polcap.py
new file mode 100644
index 0000000..8ab164d
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/polcap.py
@@ -0,0 +1,40 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import qpol
+from . import symbol
+
+
+def polcap_factory(policy, name):
+ """Factory function for creating policy capability objects."""
+
+ if isinstance(name, PolicyCapability):
+ assert name.policy == policy
+ return name
+ elif isinstance(name, qpol.qpol_polcap_t):
+ return PolicyCapability(policy, name)
+ else:
+ raise TypeError("Policy capabilities cannot be looked up.")
+
+
+class PolicyCapability(symbol.PolicySymbol):
+
+ """A policy capability."""
+
+ def statement(self):
+ return "policycap {0};".format(self)
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/qpol.py b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/qpol.py
new file mode 100644
index 0000000..97e602b
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/qpol.py
@@ -0,0 +1,1114 @@
+# This file was automatically generated by SWIG (http://www.swig.org).
+# Version 2.0.11
+#
+# Do not make changes to this file unless you know what you are doing--modify
+# the SWIG interface file instead.
+
+
+
+
+
+from sys import version_info
+if version_info >= (2,6,0):
+ def swig_import_helper():
+ from os.path import dirname
+ import imp
+ fp = None
+ try:
+ fp, pathname, description = imp.find_module('_qpol', [dirname(__file__)])
+ except ImportError:
+ import _qpol
+ return _qpol
+ if fp is not None:
+ try:
+ _mod = imp.load_module('_qpol', fp, pathname, description)
+ finally:
+ fp.close()
+ return _mod
+ _qpol = swig_import_helper()
+ del swig_import_helper
+else:
+ import _qpol
+del version_info
+try:
+ _swig_property = property
+except NameError:
+ pass # Python < 2.2 doesn't have 'property'.
+def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
+ if (name == "thisown"): return self.this.own(value)
+ if (name == "this"):
+ if type(value).__name__ == 'SwigPyObject':
+ self.__dict__[name] = value
+ return
+ method = class_type.__swig_setmethods__.get(name,None)
+ if method: return method(self,value)
+ if (not static):
+ self.__dict__[name] = value
+ else:
+ raise AttributeError("You cannot add attributes to %s" % self)
+
+def _swig_setattr(self,class_type,name,value):
+ return _swig_setattr_nondynamic(self,class_type,name,value,0)
+
+def _swig_getattr(self,class_type,name):
+ if (name == "thisown"): return self.this.own()
+ method = class_type.__swig_getmethods__.get(name,None)
+ if method: return method(self)
+ raise AttributeError(name)
+
+def _swig_repr(self):
+ try: strthis = "proxy of " + self.this.__repr__()
+ except: strthis = ""
+ return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
+
+try:
+ _object = object
+ _newclass = 1
+except AttributeError:
+ class _object : pass
+ _newclass = 0
+
+
+
+def to_str(*args):
+ return _qpol.to_str(*args)
+to_str = _qpol.to_str
+import logging
+from functools import wraps
+
+def QpolGenerator(cast):
+ """
+ A decorator which converts qpol iterators into Python generators.
+
+ Qpol iterators use void* to be generic about their contents.
+ The purpose of the _from_void functions below is to wrap
+ the pointer casting, hence the "cast" variable name here.
+
+ Decorator parameter:
+ cast A wrapper function which casts the qpol iterator return pointer
+ to the proper C data type pointer. The Python function
+ reference to the C Python extension is used, for example:
+
+ @QpolGenerator(_qpol.qpol_type_from_void)
+ """
+
+ def decorate(func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ qpol_iter = func(*args)
+ while not qpol_iter.isend():
+ yield cast(qpol_iter.item())
+ qpol_iter.next_()
+
+ return wrapper
+ return decorate
+
+def qpol_logger(level, msg):
+ """Log qpol messages via Python logging."""
+ logging.getLogger("libqpol").debug(msg)
+
+def qpol_policy_factory(path):
+ """Factory function for qpol policy objects."""
+ # The main purpose here is to hook in the
+ # above logger callback.
+ return qpol_policy_t(path, 0, qpol_logger)
+
+QPOL_POLICY_OPTION_NO_NEVERALLOWS = _qpol.QPOL_POLICY_OPTION_NO_NEVERALLOWS
+QPOL_POLICY_OPTION_NO_RULES = _qpol.QPOL_POLICY_OPTION_NO_RULES
+QPOL_POLICY_OPTION_MATCH_SYSTEM = _qpol.QPOL_POLICY_OPTION_MATCH_SYSTEM
+QPOL_POLICY_MAX_VERSION = _qpol.QPOL_POLICY_MAX_VERSION
+QPOL_POLICY_MIN_VERSION = _qpol.QPOL_POLICY_MIN_VERSION
+class qpol_policy_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_policy_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_policy_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_policy_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_policy_t
+ __del__ = lambda self : None;
+ def version(self): return _qpol.qpol_policy_t_version(self)
+ def handle_unknown(self): return _qpol.qpol_policy_t_handle_unknown(self)
+ def capability(self, *args): return _qpol.qpol_policy_t_capability(self, *args)
+ @QpolGenerator(_qpol.qpol_type_from_void)
+ def type_iter(self): return _qpol.qpol_policy_t_type_iter(self)
+ def type_count(self): return _qpol.qpol_policy_t_type_count(self)
+ @QpolGenerator(_qpol.qpol_role_from_void)
+ def role_iter(self): return _qpol.qpol_policy_t_role_iter(self)
+ def role_count(self): return _qpol.qpol_policy_t_role_count(self)
+ @QpolGenerator(_qpol.qpol_level_from_void)
+ def level_iter(self): return _qpol.qpol_policy_t_level_iter(self)
+ def level_count(self): return _qpol.qpol_policy_t_level_count(self)
+ @QpolGenerator(_qpol.qpol_cat_from_void)
+ def cat_iter(self): return _qpol.qpol_policy_t_cat_iter(self)
+ def cat_count(self): return _qpol.qpol_policy_t_cat_count(self)
+ @QpolGenerator(_qpol.qpol_user_from_void)
+ def user_iter(self): return _qpol.qpol_policy_t_user_iter(self)
+ def user_count(self): return _qpol.qpol_policy_t_user_count(self)
+ @QpolGenerator(_qpol.qpol_bool_from_void)
+ def bool_iter(self): return _qpol.qpol_policy_t_bool_iter(self)
+ def bool_count(self): return _qpol.qpol_policy_t_bool_count(self)
+ @QpolGenerator(_qpol.qpol_class_from_void)
+ def class_iter(self, perm=None): return _qpol.qpol_policy_t_class_iter(self, perm)
+ def class_count(self): return _qpol.qpol_policy_t_class_count(self)
+ @QpolGenerator(_qpol.qpol_common_from_void)
+ def common_iter(self, perm=None): return _qpol.qpol_policy_t_common_iter(self, perm)
+ def common_count(self): return _qpol.qpol_policy_t_common_count(self)
+ @QpolGenerator(_qpol.qpol_fs_use_from_void)
+ def fs_use_iter(self): return _qpol.qpol_policy_t_fs_use_iter(self)
+ def fs_use_count(self): return _qpol.qpol_policy_t_fs_use_count(self)
+ @QpolGenerator(_qpol.qpol_genfscon_from_void)
+ def genfscon_iter(self): return _qpol.qpol_policy_t_genfscon_iter(self)
+ def genfscon_count(self): return _qpol.qpol_policy_t_genfscon_count(self)
+ @QpolGenerator(_qpol.qpol_isid_from_void)
+ def isid_iter(self): return _qpol.qpol_policy_t_isid_iter(self)
+ def isid_count(self): return _qpol.qpol_policy_t_isid_count(self)
+ @QpolGenerator(_qpol.qpol_netifcon_from_void)
+ def netifcon_iter(self): return _qpol.qpol_policy_t_netifcon_iter(self)
+ def netifcon_count(self): return _qpol.qpol_policy_t_netifcon_count(self)
+ @QpolGenerator(_qpol.qpol_nodecon_from_void)
+ def nodecon_iter(self): return _qpol.qpol_policy_t_nodecon_iter(self)
+ def nodecon_count(self): return _qpol.qpol_policy_t_nodecon_count(self)
+ @QpolGenerator(_qpol.qpol_portcon_from_void)
+ def portcon_iter(self): return _qpol.qpol_policy_t_portcon_iter(self)
+ def portcon_count(self): return _qpol.qpol_policy_t_portcon_count(self)
+ @QpolGenerator(_qpol.qpol_constraint_from_void)
+ def constraint_iter(self): return _qpol.qpol_policy_t_constraint_iter(self)
+ def constraint_count(self): return _qpol.qpol_policy_t_constraint_count(self)
+ @QpolGenerator(_qpol.qpol_validatetrans_from_void)
+ def validatetrans_iter(self): return _qpol.qpol_policy_t_validatetrans_iter(self)
+ def validatetrans_count(self): return _qpol.qpol_policy_t_validatetrans_count(self)
+ @QpolGenerator(_qpol.qpol_role_allow_from_void)
+ def role_allow_iter(self): return _qpol.qpol_policy_t_role_allow_iter(self)
+ def role_allow_count(self): return _qpol.qpol_policy_t_role_allow_count(self)
+ @QpolGenerator(_qpol.qpol_role_trans_from_void)
+ def role_trans_iter(self): return _qpol.qpol_policy_t_role_trans_iter(self)
+ def role_trans_count(self): return _qpol.qpol_policy_t_role_trans_count(self)
+ @QpolGenerator(_qpol.qpol_range_trans_from_void)
+ def range_trans_iter(self): return _qpol.qpol_policy_t_range_trans_iter(self)
+ def range_trans_count(self): return _qpol.qpol_policy_t_range_trans_count(self)
+ @QpolGenerator(_qpol.qpol_avrule_from_void)
+ def avrule_iter(self): return _qpol.qpol_policy_t_avrule_iter(self)
+ def avrule_allow_count(self): return _qpol.qpol_policy_t_avrule_allow_count(self)
+ def avrule_auditallow_count(self): return _qpol.qpol_policy_t_avrule_auditallow_count(self)
+ def avrule_neverallow_count(self): return _qpol.qpol_policy_t_avrule_neverallow_count(self)
+ def avrule_dontaudit_count(self): return _qpol.qpol_policy_t_avrule_dontaudit_count(self)
+ @QpolGenerator(_qpol.qpol_terule_from_void)
+ def terule_iter(self): return _qpol.qpol_policy_t_terule_iter(self)
+ def terule_trans_count(self): return _qpol.qpol_policy_t_terule_trans_count(self)
+ def terule_change_count(self): return _qpol.qpol_policy_t_terule_change_count(self)
+ def terule_member_count(self): return _qpol.qpol_policy_t_terule_member_count(self)
+ def cond_iter(self): return _qpol.qpol_policy_t_cond_iter(self)
+ def cond_count(self): return _qpol.qpol_policy_t_cond_count(self)
+ @QpolGenerator(_qpol.qpol_filename_trans_from_void)
+ def filename_trans_iter(self): return _qpol.qpol_policy_t_filename_trans_iter(self)
+ def filename_trans_count(self): return _qpol.qpol_policy_t_filename_trans_count(self)
+ @QpolGenerator(_qpol.qpol_type_from_void)
+ def permissive_iter(self): return _qpol.qpol_policy_t_permissive_iter(self)
+ def permissive_count(self): return _qpol.qpol_policy_t_permissive_count(self)
+ def typebounds_iter(self): return _qpol.qpol_policy_t_typebounds_iter(self)
+ def typebounds_count(self): return _qpol.qpol_policy_t_typebounds_count(self)
+ @QpolGenerator(_qpol.qpol_polcap_from_void)
+ def polcap_iter(self): return _qpol.qpol_policy_t_polcap_iter(self)
+ def polcap_count(self): return _qpol.qpol_policy_t_polcap_count(self)
+ @QpolGenerator(_qpol.qpol_default_object_from_void)
+ def default_iter(self): return _qpol.qpol_policy_t_default_iter(self)
+qpol_policy_t_swigregister = _qpol.qpol_policy_t_swigregister
+qpol_policy_t_swigregister(qpol_policy_t)
+
+QPOL_CAP_ATTRIB_NAMES = _qpol.QPOL_CAP_ATTRIB_NAMES
+QPOL_CAP_SYN_RULES = _qpol.QPOL_CAP_SYN_RULES
+QPOL_CAP_LINE_NUMBERS = _qpol.QPOL_CAP_LINE_NUMBERS
+QPOL_CAP_CONDITIONALS = _qpol.QPOL_CAP_CONDITIONALS
+QPOL_CAP_MLS = _qpol.QPOL_CAP_MLS
+QPOL_CAP_MODULES = _qpol.QPOL_CAP_MODULES
+QPOL_CAP_RULES_LOADED = _qpol.QPOL_CAP_RULES_LOADED
+QPOL_CAP_SOURCE = _qpol.QPOL_CAP_SOURCE
+QPOL_CAP_NEVERALLOW = _qpol.QPOL_CAP_NEVERALLOW
+QPOL_CAP_POLCAPS = _qpol.QPOL_CAP_POLCAPS
+QPOL_CAP_BOUNDS = _qpol.QPOL_CAP_BOUNDS
+QPOL_CAP_DEFAULT_OBJECTS = _qpol.QPOL_CAP_DEFAULT_OBJECTS
+QPOL_CAP_DEFAULT_TYPE = _qpol.QPOL_CAP_DEFAULT_TYPE
+QPOL_CAP_PERMISSIVE = _qpol.QPOL_CAP_PERMISSIVE
+QPOL_CAP_FILENAME_TRANS = _qpol.QPOL_CAP_FILENAME_TRANS
+QPOL_CAP_ROLETRANS = _qpol.QPOL_CAP_ROLETRANS
+class qpol_iterator_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_iterator_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_iterator_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_iterator_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_iterator_t
+ __del__ = lambda self : None;
+ def item(self): return _qpol.qpol_iterator_t_item(self)
+ def next_(self): return _qpol.qpol_iterator_t_next_(self)
+ def isend(self): return _qpol.qpol_iterator_t_isend(self)
+ def size(self): return _qpol.qpol_iterator_t_size(self)
+qpol_iterator_t_swigregister = _qpol.qpol_iterator_t_swigregister
+qpol_iterator_t_swigregister(qpol_iterator_t)
+
+class qpol_type_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_type_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_type_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_type_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_type_t
+ __del__ = lambda self : None;
+ def name(self, *args): return _qpol.qpol_type_t_name(self, *args)
+ def value(self, *args): return _qpol.qpol_type_t_value(self, *args)
+ def isalias(self, *args): return _qpol.qpol_type_t_isalias(self, *args)
+ def isattr(self, *args): return _qpol.qpol_type_t_isattr(self, *args)
+ def ispermissive(self, *args): return _qpol.qpol_type_t_ispermissive(self, *args)
+ @QpolGenerator(_qpol.qpol_type_from_void)
+ def type_iter(self, *args): return _qpol.qpol_type_t_type_iter(self, *args)
+ @QpolGenerator(_qpol.qpol_type_from_void)
+ def attr_iter(self, *args): return _qpol.qpol_type_t_attr_iter(self, *args)
+ @QpolGenerator(_qpol.to_str)
+ def alias_iter(self, *args): return _qpol.qpol_type_t_alias_iter(self, *args)
+qpol_type_t_swigregister = _qpol.qpol_type_t_swigregister
+qpol_type_t_swigregister(qpol_type_t)
+
+
+def qpol_type_from_void(*args):
+ return _qpol.qpol_type_from_void(*args)
+qpol_type_from_void = _qpol.qpol_type_from_void
+class qpol_role_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_role_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_role_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_role_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_role_t
+ __del__ = lambda self : None;
+ def value(self, *args): return _qpol.qpol_role_t_value(self, *args)
+ def name(self, *args): return _qpol.qpol_role_t_name(self, *args)
+ @QpolGenerator(_qpol.qpol_type_from_void)
+ def type_iter(self, *args): return _qpol.qpol_role_t_type_iter(self, *args)
+ def dominate_iter(self, *args): return _qpol.qpol_role_t_dominate_iter(self, *args)
+qpol_role_t_swigregister = _qpol.qpol_role_t_swigregister
+qpol_role_t_swigregister(qpol_role_t)
+
+
+def qpol_role_from_void(*args):
+ return _qpol.qpol_role_from_void(*args)
+qpol_role_from_void = _qpol.qpol_role_from_void
+class qpol_level_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_level_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_level_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_level_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_level_t
+ __del__ = lambda self : None;
+ def isalias(self, *args): return _qpol.qpol_level_t_isalias(self, *args)
+ def value(self, *args): return _qpol.qpol_level_t_value(self, *args)
+ def name(self, *args): return _qpol.qpol_level_t_name(self, *args)
+ @QpolGenerator(_qpol.qpol_cat_from_void)
+ def cat_iter(self, *args): return _qpol.qpol_level_t_cat_iter(self, *args)
+ @QpolGenerator(_qpol.to_str)
+ def alias_iter(self, *args): return _qpol.qpol_level_t_alias_iter(self, *args)
+qpol_level_t_swigregister = _qpol.qpol_level_t_swigregister
+qpol_level_t_swigregister(qpol_level_t)
+
+
+def qpol_level_from_void(*args):
+ return _qpol.qpol_level_from_void(*args)
+qpol_level_from_void = _qpol.qpol_level_from_void
+class qpol_cat_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_cat_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_cat_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_cat_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_cat_t
+ __del__ = lambda self : None;
+ def isalias(self, *args): return _qpol.qpol_cat_t_isalias(self, *args)
+ def value(self, *args): return _qpol.qpol_cat_t_value(self, *args)
+ def name(self, *args): return _qpol.qpol_cat_t_name(self, *args)
+ @QpolGenerator(_qpol.to_str)
+ def alias_iter(self, *args): return _qpol.qpol_cat_t_alias_iter(self, *args)
+qpol_cat_t_swigregister = _qpol.qpol_cat_t_swigregister
+qpol_cat_t_swigregister(qpol_cat_t)
+
+
+def qpol_cat_from_void(*args):
+ return _qpol.qpol_cat_from_void(*args)
+qpol_cat_from_void = _qpol.qpol_cat_from_void
+class qpol_mls_range_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_mls_range_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_mls_range_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_mls_range_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_mls_range_t
+ __del__ = lambda self : None;
+ def high_level(self, *args): return _qpol.qpol_mls_range_t_high_level(self, *args)
+ def low_level(self, *args): return _qpol.qpol_mls_range_t_low_level(self, *args)
+qpol_mls_range_t_swigregister = _qpol.qpol_mls_range_t_swigregister
+qpol_mls_range_t_swigregister(qpol_mls_range_t)
+
+
+def qpol_mls_range_from_void(*args):
+ return _qpol.qpol_mls_range_from_void(*args)
+qpol_mls_range_from_void = _qpol.qpol_mls_range_from_void
+class qpol_semantic_level_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_semantic_level_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_semantic_level_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_semantic_level_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_semantic_level_t
+ __del__ = lambda self : None;
+ def add_cats(self, *args): return _qpol.qpol_semantic_level_t_add_cats(self, *args)
+qpol_semantic_level_t_swigregister = _qpol.qpol_semantic_level_t_swigregister
+qpol_semantic_level_t_swigregister(qpol_semantic_level_t)
+
+class qpol_mls_level_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_mls_level_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_mls_level_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_mls_level_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_mls_level_t
+ __del__ = lambda self : None;
+ def sens_name(self, *args): return _qpol.qpol_mls_level_t_sens_name(self, *args)
+ @QpolGenerator(_qpol.qpol_cat_from_void)
+ def cat_iter(self, *args): return _qpol.qpol_mls_level_t_cat_iter(self, *args)
+qpol_mls_level_t_swigregister = _qpol.qpol_mls_level_t_swigregister
+qpol_mls_level_t_swigregister(qpol_mls_level_t)
+
+
+def qpol_mls_level_from_void(*args):
+ return _qpol.qpol_mls_level_from_void(*args)
+qpol_mls_level_from_void = _qpol.qpol_mls_level_from_void
+class qpol_user_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_user_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_user_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_user_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_user_t
+ __del__ = lambda self : None;
+ def value(self, *args): return _qpol.qpol_user_t_value(self, *args)
+ @QpolGenerator(_qpol.qpol_role_from_void)
+ def role_iter(self, *args): return _qpol.qpol_user_t_role_iter(self, *args)
+ def range(self, *args): return _qpol.qpol_user_t_range(self, *args)
+ def name(self, *args): return _qpol.qpol_user_t_name(self, *args)
+ def dfltlevel(self, *args): return _qpol.qpol_user_t_dfltlevel(self, *args)
+qpol_user_t_swigregister = _qpol.qpol_user_t_swigregister
+qpol_user_t_swigregister(qpol_user_t)
+
+
+def qpol_user_from_void(*args):
+ return _qpol.qpol_user_from_void(*args)
+qpol_user_from_void = _qpol.qpol_user_from_void
+class qpol_bool_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_bool_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_bool_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_bool_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_bool_t
+ __del__ = lambda self : None;
+ def value(self, *args): return _qpol.qpol_bool_t_value(self, *args)
+ def state(self, *args): return _qpol.qpol_bool_t_state(self, *args)
+ def name(self, *args): return _qpol.qpol_bool_t_name(self, *args)
+qpol_bool_t_swigregister = _qpol.qpol_bool_t_swigregister
+qpol_bool_t_swigregister(qpol_bool_t)
+
+
+def qpol_bool_from_void(*args):
+ return _qpol.qpol_bool_from_void(*args)
+qpol_bool_from_void = _qpol.qpol_bool_from_void
+class qpol_context_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_context_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_context_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_context_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_context_t
+ __del__ = lambda self : None;
+ def user(self, *args): return _qpol.qpol_context_t_user(self, *args)
+ def role(self, *args): return _qpol.qpol_context_t_role(self, *args)
+ def type_(self, *args): return _qpol.qpol_context_t_type_(self, *args)
+ def range(self, *args): return _qpol.qpol_context_t_range(self, *args)
+qpol_context_t_swigregister = _qpol.qpol_context_t_swigregister
+qpol_context_t_swigregister(qpol_context_t)
+
+
+def qpol_context_from_void(*args):
+ return _qpol.qpol_context_from_void(*args)
+qpol_context_from_void = _qpol.qpol_context_from_void
+class qpol_class_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_class_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_class_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_class_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_class_t
+ __del__ = lambda self : None;
+ def value(self, *args): return _qpol.qpol_class_t_value(self, *args)
+ def common(self, *args): return _qpol.qpol_class_t_common(self, *args)
+ @QpolGenerator(_qpol.to_str)
+ def perm_iter(self, *args): return _qpol.qpol_class_t_perm_iter(self, *args)
+ @QpolGenerator(_qpol.qpol_constraint_from_void)
+ def constraint_iter(self, *args): return _qpol.qpol_class_t_constraint_iter(self, *args)
+ @QpolGenerator(_qpol.qpol_validatetrans_from_void)
+ def validatetrans_iter(self, *args): return _qpol.qpol_class_t_validatetrans_iter(self, *args)
+ def name(self, *args): return _qpol.qpol_class_t_name(self, *args)
+qpol_class_t_swigregister = _qpol.qpol_class_t_swigregister
+qpol_class_t_swigregister(qpol_class_t)
+
+
+def qpol_class_from_void(*args):
+ return _qpol.qpol_class_from_void(*args)
+qpol_class_from_void = _qpol.qpol_class_from_void
+class qpol_common_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_common_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_common_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_common_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_common_t
+ __del__ = lambda self : None;
+ def value(self, *args): return _qpol.qpol_common_t_value(self, *args)
+ @QpolGenerator(_qpol.to_str)
+ def perm_iter(self, *args): return _qpol.qpol_common_t_perm_iter(self, *args)
+ def name(self, *args): return _qpol.qpol_common_t_name(self, *args)
+qpol_common_t_swigregister = _qpol.qpol_common_t_swigregister
+qpol_common_t_swigregister(qpol_common_t)
+
+
+def qpol_common_from_void(*args):
+ return _qpol.qpol_common_from_void(*args)
+qpol_common_from_void = _qpol.qpol_common_from_void
+QPOL_FS_USE_XATTR = _qpol.QPOL_FS_USE_XATTR
+QPOL_FS_USE_TRANS = _qpol.QPOL_FS_USE_TRANS
+QPOL_FS_USE_TASK = _qpol.QPOL_FS_USE_TASK
+QPOL_FS_USE_GENFS = _qpol.QPOL_FS_USE_GENFS
+QPOL_FS_USE_NONE = _qpol.QPOL_FS_USE_NONE
+QPOL_FS_USE_PSID = _qpol.QPOL_FS_USE_PSID
+class qpol_fs_use_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_fs_use_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_fs_use_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_fs_use_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_fs_use_t
+ __del__ = lambda self : None;
+ def name(self, *args): return _qpol.qpol_fs_use_t_name(self, *args)
+ def behavior(self, *args): return _qpol.qpol_fs_use_t_behavior(self, *args)
+ def context(self, *args): return _qpol.qpol_fs_use_t_context(self, *args)
+qpol_fs_use_t_swigregister = _qpol.qpol_fs_use_t_swigregister
+qpol_fs_use_t_swigregister(qpol_fs_use_t)
+
+
+def qpol_fs_use_from_void(*args):
+ return _qpol.qpol_fs_use_from_void(*args)
+qpol_fs_use_from_void = _qpol.qpol_fs_use_from_void
+QPOL_CLASS_ALL = _qpol.QPOL_CLASS_ALL
+QPOL_CLASS_BLK_FILE = _qpol.QPOL_CLASS_BLK_FILE
+QPOL_CLASS_CHR_FILE = _qpol.QPOL_CLASS_CHR_FILE
+QPOL_CLASS_DIR = _qpol.QPOL_CLASS_DIR
+QPOL_CLASS_FIFO_FILE = _qpol.QPOL_CLASS_FIFO_FILE
+QPOL_CLASS_FILE = _qpol.QPOL_CLASS_FILE
+QPOL_CLASS_LNK_FILE = _qpol.QPOL_CLASS_LNK_FILE
+QPOL_CLASS_SOCK_FILE = _qpol.QPOL_CLASS_SOCK_FILE
+class qpol_genfscon_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_genfscon_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_genfscon_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_genfscon_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_genfscon_t
+ __del__ = lambda self : None;
+ def name(self, *args): return _qpol.qpol_genfscon_t_name(self, *args)
+ def path(self, *args): return _qpol.qpol_genfscon_t_path(self, *args)
+ def object_class(self, *args): return _qpol.qpol_genfscon_t_object_class(self, *args)
+ def context(self, *args): return _qpol.qpol_genfscon_t_context(self, *args)
+qpol_genfscon_t_swigregister = _qpol.qpol_genfscon_t_swigregister
+qpol_genfscon_t_swigregister(qpol_genfscon_t)
+
+
+def qpol_genfscon_from_void(*args):
+ return _qpol.qpol_genfscon_from_void(*args)
+qpol_genfscon_from_void = _qpol.qpol_genfscon_from_void
+class qpol_isid_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_isid_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_isid_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_isid_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_isid_t
+ __del__ = lambda self : None;
+ def name(self, *args): return _qpol.qpol_isid_t_name(self, *args)
+ def context(self, *args): return _qpol.qpol_isid_t_context(self, *args)
+qpol_isid_t_swigregister = _qpol.qpol_isid_t_swigregister
+qpol_isid_t_swigregister(qpol_isid_t)
+
+
+def qpol_isid_from_void(*args):
+ return _qpol.qpol_isid_from_void(*args)
+qpol_isid_from_void = _qpol.qpol_isid_from_void
+class qpol_netifcon_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_netifcon_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_netifcon_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_netifcon_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_netifcon_t
+ __del__ = lambda self : None;
+ def name(self, *args): return _qpol.qpol_netifcon_t_name(self, *args)
+ def msg_con(self, *args): return _qpol.qpol_netifcon_t_msg_con(self, *args)
+ def if_con(self, *args): return _qpol.qpol_netifcon_t_if_con(self, *args)
+qpol_netifcon_t_swigregister = _qpol.qpol_netifcon_t_swigregister
+qpol_netifcon_t_swigregister(qpol_netifcon_t)
+
+
+def qpol_netifcon_from_void(*args):
+ return _qpol.qpol_netifcon_from_void(*args)
+qpol_netifcon_from_void = _qpol.qpol_netifcon_from_void
+QPOL_IPV4 = _qpol.QPOL_IPV4
+QPOL_IPV6 = _qpol.QPOL_IPV6
+class qpol_nodecon_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_nodecon_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_nodecon_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_nodecon_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_nodecon_t
+ __del__ = lambda self : None;
+ def addr(self, *args): return _qpol.qpol_nodecon_t_addr(self, *args)
+ def mask(self, *args): return _qpol.qpol_nodecon_t_mask(self, *args)
+ def protocol(self, *args): return _qpol.qpol_nodecon_t_protocol(self, *args)
+ def context(self, *args): return _qpol.qpol_nodecon_t_context(self, *args)
+qpol_nodecon_t_swigregister = _qpol.qpol_nodecon_t_swigregister
+qpol_nodecon_t_swigregister(qpol_nodecon_t)
+
+
+def qpol_nodecon_from_void(*args):
+ return _qpol.qpol_nodecon_from_void(*args)
+qpol_nodecon_from_void = _qpol.qpol_nodecon_from_void
+IPPROTO_TCP = _qpol.IPPROTO_TCP
+IPPROTO_UDP = _qpol.IPPROTO_UDP
+class qpol_portcon_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_portcon_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_portcon_t, name)
+ __repr__ = _swig_repr
+ def __init__(self, *args):
+ this = _qpol.new_qpol_portcon_t(*args)
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_portcon_t
+ __del__ = lambda self : None;
+ def low_port(self, *args): return _qpol.qpol_portcon_t_low_port(self, *args)
+ def high_port(self, *args): return _qpol.qpol_portcon_t_high_port(self, *args)
+ def protocol(self, *args): return _qpol.qpol_portcon_t_protocol(self, *args)
+ def context(self, *args): return _qpol.qpol_portcon_t_context(self, *args)
+qpol_portcon_t_swigregister = _qpol.qpol_portcon_t_swigregister
+qpol_portcon_t_swigregister(qpol_portcon_t)
+
+
+def qpol_portcon_from_void(*args):
+ return _qpol.qpol_portcon_from_void(*args)
+qpol_portcon_from_void = _qpol.qpol_portcon_from_void
+class qpol_constraint_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_constraint_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_constraint_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_constraint_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_constraint_t
+ __del__ = lambda self : None;
+ def object_class(self, *args): return _qpol.qpol_constraint_t_object_class(self, *args)
+ @QpolGenerator(_qpol.to_str)
+ def perm_iter(self, *args): return _qpol.qpol_constraint_t_perm_iter(self, *args)
+ @QpolGenerator(_qpol.qpol_constraint_expr_node_from_void)
+ def expr_iter(self, *args): return _qpol.qpol_constraint_t_expr_iter(self, *args)
+qpol_constraint_t_swigregister = _qpol.qpol_constraint_t_swigregister
+qpol_constraint_t_swigregister(qpol_constraint_t)
+
+
+def qpol_constraint_from_void(*args):
+ return _qpol.qpol_constraint_from_void(*args)
+qpol_constraint_from_void = _qpol.qpol_constraint_from_void
+class qpol_validatetrans_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_validatetrans_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_validatetrans_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_validatetrans_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_validatetrans_t
+ __del__ = lambda self : None;
+ def object_class(self, *args): return _qpol.qpol_validatetrans_t_object_class(self, *args)
+ @QpolGenerator(_qpol.qpol_constraint_expr_node_from_void)
+ def expr_iter(self, *args): return _qpol.qpol_validatetrans_t_expr_iter(self, *args)
+qpol_validatetrans_t_swigregister = _qpol.qpol_validatetrans_t_swigregister
+qpol_validatetrans_t_swigregister(qpol_validatetrans_t)
+
+
+def qpol_validatetrans_from_void(*args):
+ return _qpol.qpol_validatetrans_from_void(*args)
+qpol_validatetrans_from_void = _qpol.qpol_validatetrans_from_void
+QPOL_CEXPR_TYPE_NOT = _qpol.QPOL_CEXPR_TYPE_NOT
+QPOL_CEXPR_TYPE_AND = _qpol.QPOL_CEXPR_TYPE_AND
+QPOL_CEXPR_TYPE_OR = _qpol.QPOL_CEXPR_TYPE_OR
+QPOL_CEXPR_TYPE_ATTR = _qpol.QPOL_CEXPR_TYPE_ATTR
+QPOL_CEXPR_TYPE_NAMES = _qpol.QPOL_CEXPR_TYPE_NAMES
+QPOL_CEXPR_SYM_USER = _qpol.QPOL_CEXPR_SYM_USER
+QPOL_CEXPR_SYM_ROLE = _qpol.QPOL_CEXPR_SYM_ROLE
+QPOL_CEXPR_SYM_TYPE = _qpol.QPOL_CEXPR_SYM_TYPE
+QPOL_CEXPR_SYM_TARGET = _qpol.QPOL_CEXPR_SYM_TARGET
+QPOL_CEXPR_SYM_XTARGET = _qpol.QPOL_CEXPR_SYM_XTARGET
+QPOL_CEXPR_SYM_L1L2 = _qpol.QPOL_CEXPR_SYM_L1L2
+QPOL_CEXPR_SYM_L1H2 = _qpol.QPOL_CEXPR_SYM_L1H2
+QPOL_CEXPR_SYM_H1L2 = _qpol.QPOL_CEXPR_SYM_H1L2
+QPOL_CEXPR_SYM_H1H2 = _qpol.QPOL_CEXPR_SYM_H1H2
+QPOL_CEXPR_SYM_L1H1 = _qpol.QPOL_CEXPR_SYM_L1H1
+QPOL_CEXPR_SYM_L2H2 = _qpol.QPOL_CEXPR_SYM_L2H2
+QPOL_CEXPR_OP_EQ = _qpol.QPOL_CEXPR_OP_EQ
+QPOL_CEXPR_OP_NEQ = _qpol.QPOL_CEXPR_OP_NEQ
+QPOL_CEXPR_OP_DOM = _qpol.QPOL_CEXPR_OP_DOM
+QPOL_CEXPR_OP_DOMBY = _qpol.QPOL_CEXPR_OP_DOMBY
+QPOL_CEXPR_OP_INCOMP = _qpol.QPOL_CEXPR_OP_INCOMP
+class qpol_constraint_expr_node_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_constraint_expr_node_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_constraint_expr_node_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_constraint_expr_node_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_constraint_expr_node_t
+ __del__ = lambda self : None;
+ def expr_type(self, *args): return _qpol.qpol_constraint_expr_node_t_expr_type(self, *args)
+ def sym_type(self, *args): return _qpol.qpol_constraint_expr_node_t_sym_type(self, *args)
+ def op(self, *args): return _qpol.qpol_constraint_expr_node_t_op(self, *args)
+ @QpolGenerator(_qpol.to_str)
+ def names_iter(self, *args): return _qpol.qpol_constraint_expr_node_t_names_iter(self, *args)
+qpol_constraint_expr_node_t_swigregister = _qpol.qpol_constraint_expr_node_t_swigregister
+qpol_constraint_expr_node_t_swigregister(qpol_constraint_expr_node_t)
+
+
+def qpol_constraint_expr_node_from_void(*args):
+ return _qpol.qpol_constraint_expr_node_from_void(*args)
+qpol_constraint_expr_node_from_void = _qpol.qpol_constraint_expr_node_from_void
+class qpol_role_allow_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_role_allow_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_role_allow_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_role_allow_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_role_allow_t
+ __del__ = lambda self : None;
+ def rule_type(self,policy):
+ return "allow"
+
+ def source_role(self, *args): return _qpol.qpol_role_allow_t_source_role(self, *args)
+ def target_role(self, *args): return _qpol.qpol_role_allow_t_target_role(self, *args)
+qpol_role_allow_t_swigregister = _qpol.qpol_role_allow_t_swigregister
+qpol_role_allow_t_swigregister(qpol_role_allow_t)
+
+
+def qpol_role_allow_from_void(*args):
+ return _qpol.qpol_role_allow_from_void(*args)
+qpol_role_allow_from_void = _qpol.qpol_role_allow_from_void
+class qpol_role_trans_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_role_trans_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_role_trans_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_role_trans_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_role_trans_t
+ __del__ = lambda self : None;
+ def rule_type(self,policy):
+ return "role_transition"
+
+ def source_role(self, *args): return _qpol.qpol_role_trans_t_source_role(self, *args)
+ def target_type(self, *args): return _qpol.qpol_role_trans_t_target_type(self, *args)
+ def object_class(self, *args): return _qpol.qpol_role_trans_t_object_class(self, *args)
+ def default_role(self, *args): return _qpol.qpol_role_trans_t_default_role(self, *args)
+qpol_role_trans_t_swigregister = _qpol.qpol_role_trans_t_swigregister
+qpol_role_trans_t_swigregister(qpol_role_trans_t)
+
+
+def qpol_role_trans_from_void(*args):
+ return _qpol.qpol_role_trans_from_void(*args)
+qpol_role_trans_from_void = _qpol.qpol_role_trans_from_void
+class qpol_range_trans_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_range_trans_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_range_trans_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_range_trans_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_range_trans_t
+ __del__ = lambda self : None;
+ def rule_type(self,policy):
+ return "range_transition"
+
+ def source_type(self, *args): return _qpol.qpol_range_trans_t_source_type(self, *args)
+ def target_type(self, *args): return _qpol.qpol_range_trans_t_target_type(self, *args)
+ def object_class(self, *args): return _qpol.qpol_range_trans_t_object_class(self, *args)
+ def range(self, *args): return _qpol.qpol_range_trans_t_range(self, *args)
+qpol_range_trans_t_swigregister = _qpol.qpol_range_trans_t_swigregister
+qpol_range_trans_t_swigregister(qpol_range_trans_t)
+
+
+def qpol_range_trans_from_void(*args):
+ return _qpol.qpol_range_trans_from_void(*args)
+qpol_range_trans_from_void = _qpol.qpol_range_trans_from_void
+QPOL_RULE_ALLOW = _qpol.QPOL_RULE_ALLOW
+QPOL_RULE_NEVERALLOW = _qpol.QPOL_RULE_NEVERALLOW
+QPOL_RULE_AUDITALLOW = _qpol.QPOL_RULE_AUDITALLOW
+QPOL_RULE_DONTAUDIT = _qpol.QPOL_RULE_DONTAUDIT
+class qpol_avrule_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_avrule_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_avrule_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_avrule_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_avrule_t
+ __del__ = lambda self : None;
+ def rule_type(self, *args): return _qpol.qpol_avrule_t_rule_type(self, *args)
+ def source_type(self, *args): return _qpol.qpol_avrule_t_source_type(self, *args)
+ def target_type(self, *args): return _qpol.qpol_avrule_t_target_type(self, *args)
+ def object_class(self, *args): return _qpol.qpol_avrule_t_object_class(self, *args)
+ @QpolGenerator(_qpol.to_str)
+ def perm_iter(self, *args): return _qpol.qpol_avrule_t_perm_iter(self, *args)
+ def cond(self, *args): return _qpol.qpol_avrule_t_cond(self, *args)
+ def is_enabled(self, *args): return _qpol.qpol_avrule_t_is_enabled(self, *args)
+ def which_list(self, *args): return _qpol.qpol_avrule_t_which_list(self, *args)
+ def syn_avrule_iter(self, *args): return _qpol.qpol_avrule_t_syn_avrule_iter(self, *args)
+qpol_avrule_t_swigregister = _qpol.qpol_avrule_t_swigregister
+qpol_avrule_t_swigregister(qpol_avrule_t)
+
+
+def qpol_avrule_from_void(*args):
+ return _qpol.qpol_avrule_from_void(*args)
+qpol_avrule_from_void = _qpol.qpol_avrule_from_void
+QPOL_RULE_TYPE_TRANS = _qpol.QPOL_RULE_TYPE_TRANS
+QPOL_RULE_TYPE_CHANGE = _qpol.QPOL_RULE_TYPE_CHANGE
+QPOL_RULE_TYPE_MEMBER = _qpol.QPOL_RULE_TYPE_MEMBER
+class qpol_terule_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_terule_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_terule_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_terule_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_terule_t
+ __del__ = lambda self : None;
+ def rule_type(self, *args): return _qpol.qpol_terule_t_rule_type(self, *args)
+ def source_type(self, *args): return _qpol.qpol_terule_t_source_type(self, *args)
+ def target_type(self, *args): return _qpol.qpol_terule_t_target_type(self, *args)
+ def object_class(self, *args): return _qpol.qpol_terule_t_object_class(self, *args)
+ def default_type(self, *args): return _qpol.qpol_terule_t_default_type(self, *args)
+ def cond(self, *args): return _qpol.qpol_terule_t_cond(self, *args)
+ def is_enabled(self, *args): return _qpol.qpol_terule_t_is_enabled(self, *args)
+ def which_list(self, *args): return _qpol.qpol_terule_t_which_list(self, *args)
+ def syn_terule_iter(self, *args): return _qpol.qpol_terule_t_syn_terule_iter(self, *args)
+qpol_terule_t_swigregister = _qpol.qpol_terule_t_swigregister
+qpol_terule_t_swigregister(qpol_terule_t)
+
+
+def qpol_terule_from_void(*args):
+ return _qpol.qpol_terule_from_void(*args)
+qpol_terule_from_void = _qpol.qpol_terule_from_void
+class qpol_cond_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_cond_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_cond_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_cond_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_cond_t
+ __del__ = lambda self : None;
+ @QpolGenerator(_qpol.qpol_cond_expr_node_from_void)
+ def expr_node_iter(self, *args): return _qpol.qpol_cond_t_expr_node_iter(self, *args)
+ def av_true_iter(self, *args): return _qpol.qpol_cond_t_av_true_iter(self, *args)
+ def av_false_iter(self, *args): return _qpol.qpol_cond_t_av_false_iter(self, *args)
+ def te_true_iter(self, *args): return _qpol.qpol_cond_t_te_true_iter(self, *args)
+ def te_false_iter(self, *args): return _qpol.qpol_cond_t_te_false_iter(self, *args)
+ def evaluate(self, *args): return _qpol.qpol_cond_t_evaluate(self, *args)
+qpol_cond_t_swigregister = _qpol.qpol_cond_t_swigregister
+qpol_cond_t_swigregister(qpol_cond_t)
+
+
+def qpol_cond_from_void(*args):
+ return _qpol.qpol_cond_from_void(*args)
+qpol_cond_from_void = _qpol.qpol_cond_from_void
+QPOL_COND_EXPR_BOOL = _qpol.QPOL_COND_EXPR_BOOL
+QPOL_COND_EXPR_NOT = _qpol.QPOL_COND_EXPR_NOT
+QPOL_COND_EXPR_OR = _qpol.QPOL_COND_EXPR_OR
+QPOL_COND_EXPR_AND = _qpol.QPOL_COND_EXPR_AND
+QPOL_COND_EXPR_XOR = _qpol.QPOL_COND_EXPR_XOR
+QPOL_COND_EXPR_EQ = _qpol.QPOL_COND_EXPR_EQ
+QPOL_COND_EXPR_NEQ = _qpol.QPOL_COND_EXPR_NEQ
+class qpol_cond_expr_node_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_cond_expr_node_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_cond_expr_node_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_cond_expr_node_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_cond_expr_node_t
+ __del__ = lambda self : None;
+ def expr_type(self, *args): return _qpol.qpol_cond_expr_node_t_expr_type(self, *args)
+ def get_boolean(self, *args): return _qpol.qpol_cond_expr_node_t_get_boolean(self, *args)
+qpol_cond_expr_node_t_swigregister = _qpol.qpol_cond_expr_node_t_swigregister
+qpol_cond_expr_node_t_swigregister(qpol_cond_expr_node_t)
+
+
+def qpol_cond_expr_node_from_void(*args):
+ return _qpol.qpol_cond_expr_node_from_void(*args)
+qpol_cond_expr_node_from_void = _qpol.qpol_cond_expr_node_from_void
+class qpol_filename_trans_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_filename_trans_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_filename_trans_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_filename_trans_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_filename_trans_t
+ __del__ = lambda self : None;
+ def rule_type(self,policy):
+ return "type_transition"
+
+ def source_type(self, *args): return _qpol.qpol_filename_trans_t_source_type(self, *args)
+ def target_type(self, *args): return _qpol.qpol_filename_trans_t_target_type(self, *args)
+ def object_class(self, *args): return _qpol.qpol_filename_trans_t_object_class(self, *args)
+ def default_type(self, *args): return _qpol.qpol_filename_trans_t_default_type(self, *args)
+ def filename(self, *args): return _qpol.qpol_filename_trans_t_filename(self, *args)
+qpol_filename_trans_t_swigregister = _qpol.qpol_filename_trans_t_swigregister
+qpol_filename_trans_t_swigregister(qpol_filename_trans_t)
+
+
+def qpol_filename_trans_from_void(*args):
+ return _qpol.qpol_filename_trans_from_void(*args)
+qpol_filename_trans_from_void = _qpol.qpol_filename_trans_from_void
+class qpol_polcap_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_polcap_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_polcap_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_polcap_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_polcap_t
+ __del__ = lambda self : None;
+ def name(self, *args): return _qpol.qpol_polcap_t_name(self, *args)
+qpol_polcap_t_swigregister = _qpol.qpol_polcap_t_swigregister
+qpol_polcap_t_swigregister(qpol_polcap_t)
+
+
+def qpol_polcap_from_void(*args):
+ return _qpol.qpol_polcap_from_void(*args)
+qpol_polcap_from_void = _qpol.qpol_polcap_from_void
+class qpol_typebounds_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_typebounds_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_typebounds_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_typebounds_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_typebounds_t
+ __del__ = lambda self : None;
+ def parent_name(self, *args): return _qpol.qpol_typebounds_t_parent_name(self, *args)
+ def child_name(self, *args): return _qpol.qpol_typebounds_t_child_name(self, *args)
+qpol_typebounds_t_swigregister = _qpol.qpol_typebounds_t_swigregister
+qpol_typebounds_t_swigregister(qpol_typebounds_t)
+
+
+def qpol_typebounds_from_void(*args):
+ return _qpol.qpol_typebounds_from_void(*args)
+qpol_typebounds_from_void = _qpol.qpol_typebounds_from_void
+class qpol_rolebounds_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_rolebounds_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_rolebounds_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_rolebounds_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_rolebounds_t
+ __del__ = lambda self : None;
+ def parent_name(self, *args): return _qpol.qpol_rolebounds_t_parent_name(self, *args)
+ def child_name(self, *args): return _qpol.qpol_rolebounds_t_child_name(self, *args)
+qpol_rolebounds_t_swigregister = _qpol.qpol_rolebounds_t_swigregister
+qpol_rolebounds_t_swigregister(qpol_rolebounds_t)
+
+
+def qpol_rolebounds_from_void(*args):
+ return _qpol.qpol_rolebounds_from_void(*args)
+qpol_rolebounds_from_void = _qpol.qpol_rolebounds_from_void
+class qpol_userbounds_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_userbounds_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_userbounds_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_userbounds_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_userbounds_t
+ __del__ = lambda self : None;
+ def parent_name(self, *args): return _qpol.qpol_userbounds_t_parent_name(self, *args)
+ def child_name(self, *args): return _qpol.qpol_userbounds_t_child_name(self, *args)
+qpol_userbounds_t_swigregister = _qpol.qpol_userbounds_t_swigregister
+qpol_userbounds_t_swigregister(qpol_userbounds_t)
+
+
+def qpol_userbounds_from_void(*args):
+ return _qpol.qpol_userbounds_from_void(*args)
+qpol_userbounds_from_void = _qpol.qpol_userbounds_from_void
+class qpol_default_object_t(_object):
+ __swig_setmethods__ = {}
+ __setattr__ = lambda self, name, value: _swig_setattr(self, qpol_default_object_t, name, value)
+ __swig_getmethods__ = {}
+ __getattr__ = lambda self, name: _swig_getattr(self, qpol_default_object_t, name)
+ __repr__ = _swig_repr
+ def __init__(self):
+ this = _qpol.new_qpol_default_object_t()
+ try: self.this.append(this)
+ except: self.this = this
+ __swig_destroy__ = _qpol.delete_qpol_default_object_t
+ __del__ = lambda self : None;
+ def object_class(self, *args): return _qpol.qpol_default_object_t_object_class(self, *args)
+ def user_default(self, *args): return _qpol.qpol_default_object_t_user_default(self, *args)
+ def role_default(self, *args): return _qpol.qpol_default_object_t_role_default(self, *args)
+ def type_default(self, *args): return _qpol.qpol_default_object_t_type_default(self, *args)
+ def range_default(self, *args): return _qpol.qpol_default_object_t_range_default(self, *args)
+qpol_default_object_t_swigregister = _qpol.qpol_default_object_t_swigregister
+qpol_default_object_t_swigregister(qpol_default_object_t)
+
+
+def qpol_default_object_from_void(*args):
+ return _qpol.qpol_default_object_from_void(*args)
+qpol_default_object_from_void = _qpol.qpol_default_object_from_void
+# This file is compatible with both classic and new-style classes.
+
+
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/rbacrule.py b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/rbacrule.py
new file mode 100644
index 0000000..aa6a0d0
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/rbacrule.py
@@ -0,0 +1,92 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import qpol
+from . import rule
+from . import role
+from . import typeattr
+
+
+def rbac_rule_factory(policy, name):
+ """Factory function for creating RBAC rule objects."""
+
+ if isinstance(name, qpol.qpol_role_allow_t):
+ return RoleAllow(policy, name)
+ elif isinstance(name, qpol.qpol_role_trans_t):
+ return RoleTransition(policy, name)
+ else:
+ raise TypeError("RBAC rules cannot be looked up.")
+
+
+def validate_ruletype(types):
+ """Validate RBAC rule types."""
+ for t in types:
+ if t not in ["allow", "role_transition"]:
+ raise exception.InvalidRBACRuleType("{0} is not a valid RBAC rule type.".format(t))
+
+
+class RoleAllow(rule.PolicyRule):
+
+ """A role allow rule."""
+
+ def __str__(self):
+ return "allow {0.source} {0.target};".format(self)
+
+ @property
+ def source(self):
+ """The rule's source role."""
+ return role.role_factory(self.policy, self.qpol_symbol.source_role(self.policy))
+
+ @property
+ def target(self):
+ """The rule's target role."""
+ return role.role_factory(self.policy, self.qpol_symbol.target_role(self.policy))
+
+ @property
+ def tclass(self):
+ """The rule's object class."""
+ raise exception.RuleUseError("Role allow rules do not have an object class.")
+
+ @property
+ def default(self):
+ """The rule's default role."""
+ raise exception.RuleUseError("Role allow rules do not have a default role.")
+
+
+class RoleTransition(rule.PolicyRule):
+
+ """A role_transition rule."""
+
+ def __str__(self):
+ return "role_transition {0.source} {0.target}:{0.tclass} {0.default};".format(self)
+
+ @property
+ def source(self):
+ """The rule's source role."""
+ return role.role_factory(self.policy, self.qpol_symbol.source_role(self.policy))
+
+ @property
+ def target(self):
+ """The rule's target type/attribute."""
+ return typeattr.type_or_attr_factory(self.policy, self.qpol_symbol.target_type(self.policy))
+
+ @property
+ def default(self):
+ """The rule's default role."""
+ return role.role_factory(self.policy, self.qpol_symbol.default_role(self.policy))
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/role.py b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/role.py
new file mode 100644
index 0000000..1d9fbe1
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/role.py
@@ -0,0 +1,81 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import qpol
+from . import symbol
+from . import typeattr
+
+
+def role_factory(qpol_policy, name):
+ """Factory function for creating Role objects."""
+
+ if isinstance(name, Role):
+ assert name.policy == qpol_policy
+ return name
+ elif isinstance(name, qpol.qpol_role_t):
+ return Role(qpol_policy, name)
+
+ try:
+ return Role(qpol_policy, qpol.qpol_role_t(qpol_policy, str(name)))
+ except ValueError:
+ raise exception.InvalidRole("{0} is not a valid role".format(name))
+
+
+class BaseRole(symbol.PolicySymbol):
+
+ """Role/role attribute base class."""
+
+ def expand(self):
+ raise NotImplementedError
+
+ def types(self):
+ raise NotImplementedError
+
+
+class Role(BaseRole):
+
+ """A role."""
+
+ def expand(self):
+ """Generator that expands this into its member roles."""
+ yield self
+
+ def types(self):
+ """Generator which yields the role's set of types."""
+
+ for type_ in self.qpol_symbol.type_iter(self.policy):
+ yield typeattr.type_or_attr_factory(self.policy, type_)
+
+ def statement(self):
+ types = list(str(t) for t in self.types())
+ stmt = "role {0}".format(self)
+ if types:
+ if (len(types) > 1):
+ stmt += " types {{ {0} }}".format(' '.join(types))
+ else:
+ stmt += " types {0}".format(types[0])
+ stmt += ";"
+ return stmt
+
+
+class RoleAttribute(BaseRole):
+
+ """A role attribute."""
+
+ pass
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/rule.py b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/rule.py
new file mode 100644
index 0000000..73fc812
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/rule.py
@@ -0,0 +1,72 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import symbol
+from . import objclass
+
+
+class PolicyRule(symbol.PolicySymbol):
+
+ """This is base class for policy rules."""
+
+ def __str__(self):
+ raise NotImplementedError
+
+ @property
+ def ruletype(self):
+ """The rule type for the rule."""
+ return self.qpol_symbol.rule_type(self.policy)
+
+ @property
+ def source(self):
+ """
+ The source for the rule. This should be overridden by
+ subclasses.
+ """
+ raise NotImplementedError
+
+ @property
+ def target(self):
+ """
+ The target for the rule. This should be overridden by
+ subclasses.
+ """
+ raise NotImplementedError
+
+ @property
+ def tclass(self):
+ """The object class for the rule."""
+ return objclass.class_factory(self.policy, self.qpol_symbol.object_class(self.policy))
+
+ @property
+ def default(self):
+ """
+ The default for the rule. This should be overridden by
+ subclasses.
+ """
+ raise NotImplementedError
+
+ @property
+ def conditional(self):
+ """The conditional expression for this rule."""
+ # Most rules cannot be conditional.
+ raise exception.RuleNotConditional
+
+ def statement(self):
+ return str(self)
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/symbol.py b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/symbol.py
new file mode 100644
index 0000000..4712d7f
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/symbol.py
@@ -0,0 +1,74 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+
+
+class PolicySymbol(object):
+
+ """This is a base class for all policy objects."""
+
+ def __init__(self, policy, qpol_symbol):
+ """
+ Parameters:
+ policy The low-level policy object.
+ qpol_symbol The low-level policy symbol object.
+ """
+
+ assert qpol_symbol
+
+ self.policy = policy
+ self.qpol_symbol = qpol_symbol
+
+ def __str__(self):
+ return self.qpol_symbol.name(self.policy)
+
+ def __hash__(self):
+ return hash(self.qpol_symbol.name(self.policy))
+
+ def __eq__(self, other):
+ try:
+ return self.qpol_symbol.this == other.qpol_symbol.this
+ except AttributeError:
+ return str(self) == str(other)
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __lt__(self, other):
+ """Comparison used by Python sorting functions."""
+ return str(self) < str(other)
+
+ def __repr__(self):
+ return "<{0.__class__.__name__}(<qpol_policy_t id={1}>,\"{0}\")>".format(
+ self, id(self.policy))
+
+ def __deepcopy__(self, memo):
+ # shallow copy as all of the members are immutable
+ cls = self.__class__
+ newobj = cls.__new__(cls)
+ newobj.policy = self.policy
+ newobj.qpol_symbol = self.qpol_symbol
+ memo[id(self)] = newobj
+ return newobj
+
+ def statement(self):
+ """
+ A rendering of the policy statement. This should be
+ overridden by subclasses.
+ """
+ raise NotImplementedError
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/terule.py b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/terule.py
new file mode 100644
index 0000000..d8a9e94
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/terule.py
@@ -0,0 +1,155 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import qpol
+from . import rule
+from . import typeattr
+from . import boolcond
+
+
+def te_rule_factory(policy, symbol):
+ """Factory function for creating TE rule objects."""
+
+ if isinstance(symbol, qpol.qpol_avrule_t):
+ return AVRule(policy, symbol)
+ elif isinstance(symbol, (qpol.qpol_terule_t, qpol.qpol_filename_trans_t)):
+ return TERule(policy, symbol)
+ else:
+ raise TypeError("TE rules cannot be looked-up.")
+
+
+def validate_ruletype(types):
+ """Validate TE Rule types."""
+ for t in types:
+ if t not in ["allow", "auditallow", "dontaudit", "neverallow",
+ "type_transition", "type_member", "type_change"]:
+ raise exception.InvalidTERuleType("{0} is not a valid TE rule type.".format(t))
+
+
+class BaseTERule(rule.PolicyRule):
+
+ """A type enforcement rule."""
+
+ @property
+ def source(self):
+ """The rule's source type/attribute."""
+ return typeattr.type_or_attr_factory(self.policy, self.qpol_symbol.source_type(self.policy))
+
+ @property
+ def target(self):
+ """The rule's target type/attribute."""
+ return typeattr.type_or_attr_factory(self.policy, self.qpol_symbol.target_type(self.policy))
+
+ @property
+ def filename(self):
+ raise NotImplementedError
+
+ @property
+ def conditional(self):
+ """The rule's conditional expression."""
+ try:
+ return boolcond.condexpr_factory(self.policy, self.qpol_symbol.cond(self.policy))
+ except (AttributeError, ValueError):
+ # AttributeError: name filetrans rules cannot be conditional
+ # so no member function
+ # ValueError: The rule is not conditional
+ raise exception.RuleNotConditional
+
+
+class AVRule(BaseTERule):
+
+ """An access vector type enforcement rule."""
+
+ def __str__(self):
+ rule_string = "{0.ruletype} {0.source} {0.target}:{0.tclass} ".format(
+ self)
+
+ perms = self.perms
+
+ # allow/dontaudit/auditallow/neverallow rules
+ if len(perms) > 1:
+ rule_string += "{{ {0} }};".format(' '.join(perms))
+ else:
+ # convert to list since sets cannot be indexed
+ rule_string += "{0};".format(list(perms)[0])
+
+ try:
+ rule_string += " [ {0} ]".format(self.conditional)
+ except exception.RuleNotConditional:
+ pass
+
+ return rule_string
+
+ @property
+ def perms(self):
+ """The rule's permission set."""
+ return set(self.qpol_symbol.perm_iter(self.policy))
+
+ @property
+ def default(self):
+ """The rule's default type."""
+ raise exception.RuleUseError("{0} rules do not have a default type.".format(self.ruletype))
+
+ @property
+ def filename(self):
+ raise exception.RuleUseError("{0} rules do not have file names".format(self.ruletype))
+
+
+class TERule(BaseTERule):
+
+ """A type_* type enforcement rule."""
+
+ def __str__(self):
+ rule_string = "{0.ruletype} {0.source} {0.target}:{0.tclass} {0.default}".format(self)
+
+ try:
+ rule_string += " \"{0}\";".format(self.filename)
+ except (exception.TERuleNoFilename, exception.RuleUseError):
+ # invalid use for type_change/member
+ rule_string += ";"
+
+ try:
+ rule_string += " [ {0} ]".format(self.conditional)
+ except exception.RuleNotConditional:
+ pass
+
+ return rule_string
+
+ @property
+ def perms(self):
+ """The rule's permission set."""
+ raise exception.RuleUseError(
+ "{0} rules do not have a permission set.".format(self.ruletype))
+
+ @property
+ def default(self):
+ """The rule's default type."""
+ return typeattr.type_factory(self.policy, self.qpol_symbol.default_type(self.policy))
+
+ @property
+ def filename(self):
+ """The type_transition rule's file name."""
+ try:
+ return self.qpol_symbol.filename(self.policy)
+ except AttributeError:
+ if self.ruletype == "type_transition":
+ raise exception.TERuleNoFilename
+ else:
+ raise exception.RuleUseError("{0} rules do not have file names".
+ format(self.ruletype))
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/typeattr.py b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/typeattr.py
new file mode 100644
index 0000000..a52c69a
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/typeattr.py
@@ -0,0 +1,174 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import qpol
+from . import symbol
+
+
+def _symbol_lookup(qpol_policy, name):
+ """Look up the low-level qpol policy reference"""
+ if isinstance(name, qpol.qpol_type_t):
+ return name
+
+ try:
+ return qpol.qpol_type_t(qpol_policy, str(name))
+ except ValueError:
+ raise exception.InvalidType("{0} is not a valid type/attribute".format(name))
+
+
+def attribute_factory(qpol_policy, name):
+ """Factory function for creating attribute objects."""
+
+ if isinstance(name, TypeAttribute):
+ assert name.policy == qpol_policy
+ return name
+
+ qpol_symbol = _symbol_lookup(qpol_policy, name)
+
+ if not qpol_symbol.isattr(qpol_policy):
+ raise TypeError("{0} is a type".format(qpol_symbol.name(qpol_policy)))
+
+ return TypeAttribute(qpol_policy, qpol_symbol)
+
+
+def type_factory(qpol_policy, name, deref=False):
+ """Factory function for creating type objects."""
+
+ if isinstance(name, Type):
+ assert name.policy == qpol_policy
+ return name
+
+ qpol_symbol = _symbol_lookup(qpol_policy, name)
+
+ if qpol_symbol.isattr(qpol_policy):
+ raise TypeError("{0} is an attribute".format(qpol_symbol.name(qpol_policy)))
+ elif qpol_symbol.isalias(qpol_policy) and not deref:
+ raise TypeError("{0} is an alias.".format(qpol_symbol.name(qpol_policy)))
+
+ return Type(qpol_policy, qpol_symbol)
+
+
+def type_or_attr_factory(qpol_policy, name, deref=False):
+ """Factory function for creating type or attribute objects."""
+
+ if isinstance(name, (Type, TypeAttribute)):
+ assert name.policy == qpol_policy
+ return name
+
+ qpol_symbol = _symbol_lookup(qpol_policy, name)
+
+ if qpol_symbol.isalias(qpol_policy) and not deref:
+ raise TypeError("{0} is an alias.".format(qpol_symbol.name(qpol_policy)))
+
+ if qpol_symbol.isattr(qpol_policy):
+ return TypeAttribute(qpol_policy, qpol_symbol)
+ else:
+ return Type(qpol_policy, qpol_symbol)
+
+
+class BaseType(symbol.PolicySymbol):
+
+ """Type/attribute base class."""
+
+ @property
+ def ispermissive(self):
+ raise NotImplementedError
+
+ def expand(self):
+ """Generator that expands this attribute into its member types."""
+ raise NotImplementedError
+
+ def attributes(self):
+ """Generator that yields all attributes for this type."""
+ raise NotImplementedError
+
+ def aliases(self):
+ """Generator that yields all aliases for this type."""
+ raise NotImplementedError
+
+
+class Type(BaseType):
+
+ """A type."""
+
+ @property
+ def ispermissive(self):
+ """(T/F) the type is permissive."""
+ return self.qpol_symbol.ispermissive(self.policy)
+
+ def expand(self):
+ """Generator that expands this into its member types."""
+ yield self
+
+ def attributes(self):
+ """Generator that yields all attributes for this type."""
+ for attr in self.qpol_symbol.attr_iter(self.policy):
+ yield attribute_factory(self.policy, attr)
+
+ def aliases(self):
+ """Generator that yields all aliases for this type."""
+ for alias in self.qpol_symbol.alias_iter(self.policy):
+ yield alias
+
+ def statement(self):
+ attrs = list(self.attributes())
+ aliases = list(self.aliases())
+ stmt = "type {0}".format(self)
+ if aliases:
+ if len(aliases) > 1:
+ stmt += " alias {{ {0} }}".format(' '.join(aliases))
+ else:
+ stmt += " alias {0}".format(aliases[0])
+ for attr in attrs:
+ stmt += ", {0}".format(attr)
+ stmt += ";"
+ return stmt
+
+
+class TypeAttribute(BaseType):
+
+ """An attribute."""
+
+ def __contains__(self, other):
+ for type_ in self.expand():
+ if other == type_:
+ return True
+
+ return False
+
+ def expand(self):
+ """Generator that expands this attribute into its member types."""
+ for type_ in self.qpol_symbol.type_iter(self.policy):
+ yield type_factory(self.policy, type_)
+
+ def attributes(self):
+ """Generator that yields all attributes for this type."""
+ raise TypeError("{0} is an attribute, thus does not have attributes.".format(self))
+
+ def aliases(self):
+ """Generator that yields all aliases for this type."""
+ raise TypeError("{0} is an attribute, thus does not have aliases.".format(self))
+
+ @property
+ def ispermissive(self):
+ """(T/F) the type is permissive."""
+ raise TypeError("{0} is an attribute, thus cannot be permissive.".format(self))
+
+ def statement(self):
+ return "attribute {0};".format(self)
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/policyrep/user.py b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/user.py
new file mode 100644
index 0000000..94f81bc
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/policyrep/user.py
@@ -0,0 +1,86 @@
+# Copyright 2014, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+from . import exception
+from . import qpol
+from . import role
+from . import mls
+from . import symbol
+
+
+def user_factory(qpol_policy, name):
+ """Factory function for creating User objects."""
+
+ if isinstance(name, User):
+ assert name.policy == qpol_policy
+ return name
+ elif isinstance(name, qpol.qpol_user_t):
+ return User(qpol_policy, name)
+
+ try:
+ return User(qpol_policy, qpol.qpol_user_t(qpol_policy, str(name)))
+ except ValueError:
+ raise exception.InvalidUser("{0} is not a valid user".format(name))
+
+
+class User(symbol.PolicySymbol):
+
+ """A user."""
+
+ @property
+ def roles(self):
+ """The user's set of roles."""
+
+ roleset = set()
+
+ for role_ in self.qpol_symbol.role_iter(self.policy):
+ item = role.role_factory(self.policy, role_)
+
+ # object_r is implicitly added to all roles by the compiler.
+ # technically it is incorrect to skip it, but policy writers
+ # and analysts don't expect to see it in results, and it
+ # will confuse, especially for role set equality user queries.
+ if item != "object_r":
+ roleset.add(item)
+
+ return roleset
+
+ @property
+ def mls_level(self):
+ """The user's default MLS level."""
+ return mls.level_factory(self.policy, self.qpol_symbol.dfltlevel(self.policy))
+
+ @property
+ def mls_range(self):
+ """The user's MLS range."""
+ return mls.range_factory(self.policy, self.qpol_symbol.range(self.policy))
+
+ def statement(self):
+ roles = list(str(r) for r in self.roles)
+ stmt = "user {0} roles ".format(self)
+ if len(roles) > 1:
+ stmt += "{{ {0} }}".format(' '.join(roles))
+ else:
+ stmt += roles[0]
+
+ try:
+ stmt += " level {0.mls_level} range {0.mls_range};".format(self)
+ except exception.MLSDisabled:
+ stmt += ";"
+
+ return stmt
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/portconquery.py b/lib/python2.7/site-packages/setoolsgui/setools/portconquery.py
new file mode 100644
index 0000000..798a828
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/portconquery.py
@@ -0,0 +1,146 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+from socket import IPPROTO_TCP, IPPROTO_UDP
+
+from . import contextquery
+from .policyrep.netcontext import port_range
+
+
+class PortconQuery(contextquery.ContextQuery):
+
+ """
+ Port context query.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ protocol The protocol to match (socket.IPPROTO_TCP for
+ TCP or socket.IPPROTO_UDP for UDP)
+
+ ports A 2-tuple of the port range to match. (Set both to
+ the same value for a single port)
+ ports_subset If true, the criteria will match if it is a subset
+ of the portcon's range.
+ ports_overlap If true, the criteria will match if it overlaps
+ any of the portcon's range.
+ ports_superset If true, the criteria will match if it is a superset
+ of the portcon's range.
+ ports_proper If true, use proper superset/subset operations.
+ No effect if not using set operations.
+
+ user The criteria to match the context's user.
+ user_regex If true, regular expression matching
+ will be used on the user.
+
+ role The criteria to match the context's role.
+ role_regex If true, regular expression matching
+ will be used on the role.
+
+ type_ The criteria to match the context's type.
+ type_regex If true, regular expression matching
+ will be used on the type.
+
+ range_ The criteria to match the context's range.
+ range_subset If true, the criteria will match if it is a subset
+ of the context's range.
+ range_overlap If true, the criteria will match if it overlaps
+ any of the context's range.
+ range_superset If true, the criteria will match if it is a superset
+ of the context's range.
+ range_proper If true, use proper superset/subset operations.
+ No effect if not using set operations.
+ """
+
+ _protocol = None
+ _ports = None
+ ports_subset = False
+ ports_overlap = False
+ ports_superset = False
+ ports_proper = False
+
+ @property
+ def ports(self):
+ return self._ports
+
+ @ports.setter
+ def ports(self, value):
+ pending_ports = port_range(*value)
+
+ if all(pending_ports):
+ if pending_ports.low < 1 or pending_ports.high < 1:
+ raise ValueError("Port numbers must be positive: {0.low}-{0.high}".
+ format(pending_ports))
+
+ if pending_ports.low > pending_ports.high:
+ raise ValueError(
+ "The low port must be smaller than the high port: {0.low}-{0.high}".
+ format(pending_ports))
+
+ self._ports = pending_ports
+ else:
+ self._ports = None
+
+ @property
+ def protocol(self):
+ return self._protocol
+
+ @protocol.setter
+ def protocol(self, value):
+ if value:
+ if not (value == IPPROTO_TCP or value == IPPROTO_UDP):
+ raise ValueError(
+ "The protocol must be {0} for TCP or {1} for UDP.".
+ format(IPPROTO_TCP, IPPROTO_UDP))
+
+ self._protocol = value
+ else:
+ self._protocol = None
+
+ def results(self):
+ """Generator which yields all matching portcons."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Ports: {0.ports}, overlap: {0.ports_overlap}, "
+ "subset: {0.ports_subset}, superset: {0.ports_superset}, "
+ "proper: {0.ports_proper}".format(self))
+ self.log.debug("User: {0.user!r}, regex: {0.user_regex}".format(self))
+ self.log.debug("Role: {0.role!r}, regex: {0.role_regex}".format(self))
+ self.log.debug("Type: {0.type_!r}, regex: {0.type_regex}".format(self))
+ self.log.debug("Range: {0.range_!r}, subset: {0.range_subset}, overlap: {0.range_overlap}, "
+ "superset: {0.range_superset}, proper: {0.range_proper}".format(self))
+
+ for portcon in self.policy.portcons():
+
+ if self.ports and not self._match_range(
+ portcon.ports,
+ self.ports,
+ self.ports_subset,
+ self.ports_overlap,
+ self.ports_superset,
+ self.ports_proper):
+ continue
+
+ if self.protocol and self.protocol != portcon.protocol:
+ continue
+
+ if not self._match_context(portcon.context):
+ continue
+
+ yield portcon
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/query.py b/lib/python2.7/site-packages/setoolsgui/setools/query.py
new file mode 100644
index 0000000..358a095
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/query.py
@@ -0,0 +1,192 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+
+
+class PolicyQuery(object):
+
+ """Base class for SELinux policy queries."""
+
+ def __init__(self, policy, **kwargs):
+ self.log = logging.getLogger(self.__class__.__name__)
+
+ self.policy = policy
+
+ # keys are sorted in reverse order so regex settings
+ # are set before the criteria, e.g. name_regex
+ # is set before name. This ensures correct behavior
+ # since the criteria descriptors are sensitve to
+ # regex settings.
+ for name in sorted(kwargs.keys(), reverse=True):
+ attr = getattr(self, name, None) # None is not callable
+ if callable(attr):
+ raise ValueError("Keyword parameter {0} conflicts with a callable.".format(name))
+
+ setattr(self, name, kwargs[name])
+
+ @staticmethod
+ def _match_regex(obj, criteria, regex):
+ """
+ Match the object with optional regular expression.
+
+ Parameters:
+ obj The object to match.
+ criteria The criteria to match.
+ regex If regular expression matching should be used.
+ """
+
+ if regex:
+ return bool(criteria.search(str(obj)))
+ else:
+ return obj == criteria
+
+ @staticmethod
+ def _match_set(obj, criteria, equal):
+ """
+ Match the object (a set) with optional set equality.
+
+ Parameters:
+ obj The object to match. (a set)
+ criteria The criteria to match. (a set)
+ equal If set equality should be used. Otherwise
+ any set intersection will match.
+ """
+
+ if equal:
+ return obj == criteria
+ else:
+ return bool(obj.intersection(criteria))
+
+ @staticmethod
+ def _match_in_set(obj, criteria, regex):
+ """
+ Match if the criteria is in the list, with optional
+ regular expression matching.
+
+ Parameters:
+ obj The object to match.
+ criteria The criteria to match.
+ regex If regular expression matching should be used.
+ """
+
+ if regex:
+ return [m for m in obj if criteria.search(str(m))]
+ else:
+ return criteria in obj
+
+ @staticmethod
+ def _match_indirect_regex(obj, criteria, indirect, regex):
+ """
+ Match the object with optional regular expression and indirection.
+
+ Parameters:
+ obj The object to match.
+ criteria The criteria to match.
+ regex If regular expression matching should be used.
+ indirect If object indirection should be used, e.g.
+ expanding an attribute.
+ """
+
+ if indirect:
+ return PolicyQuery._match_in_set((obj.expand()), criteria, regex)
+ else:
+ return PolicyQuery._match_regex(obj, criteria, regex)
+
+ @staticmethod
+ def _match_regex_or_set(obj, criteria, equal, regex):
+ """
+ Match the object (a set) with either set comparisons
+ (equality or intersection) or by regex matching of the
+ set members. Regular expression matching will override
+ the set equality option.
+
+ Parameters:
+ obj The object to match. (a set)
+ criteria The criteria to match.
+ equal If set equality should be used. Otherwise
+ any set intersection will match. Ignored
+ if regular expression matching is used.
+ regex If regular expression matching should be used.
+ """
+
+ if regex:
+ return [m for m in obj if criteria.search(str(m))]
+ else:
+ return PolicyQuery._match_set(obj, set(criteria), equal)
+
+ @staticmethod
+ def _match_range(obj, criteria, subset, overlap, superset, proper):
+ """
+ Match ranges of objects.
+
+ obj An object with attributes named "low" and "high", representing the range.
+ criteria An object with attributes named "low" and "high", representing the criteria.
+ subset If true, the criteria will match if it is a subset obj's range.
+ overlap If true, the criteria will match if it overlaps any of the obj's range.
+ superset If true, the criteria will match if it is a superset of the obj's range.
+ proper If true, use proper superset/subset operations.
+ No effect if not using set operations.
+ """
+
+ if overlap:
+ return ((obj.low <= criteria.low <= obj.high) or (
+ obj.low <= criteria.high <= obj.high) or (
+ criteria.low <= obj.low and obj.high <= criteria.high))
+ elif subset:
+ if proper:
+ return ((obj.low < criteria.low and criteria.high <= obj.high) or (
+ obj.low <= criteria.low and criteria.high < obj.high))
+ else:
+ return obj.low <= criteria.low and criteria.high <= obj.high
+ elif superset:
+ if proper:
+ return ((criteria.low < obj.low and obj.high <= criteria.high) or (
+ criteria.low <= obj.low and obj.high < criteria.high))
+ else:
+ return (criteria.low <= obj.low and obj.high <= criteria.high)
+ else:
+ return criteria.low == obj.low and obj.high == criteria.high
+
+ @staticmethod
+ def _match_level(obj, criteria, dom, domby, incomp):
+ """
+ Match the an MLS level.
+
+ obj The level to match.
+ criteria The criteria to match. (a level)
+ dom If true, the criteria will match if it dominates obj.
+ domby If true, the criteria will match if it is dominated by obj.
+ incomp If true, the criteria will match if it is incomparable to obj.
+ """
+
+ if dom:
+ return (criteria >= obj)
+ elif domby:
+ return (criteria <= obj)
+ elif incomp:
+ return (criteria ^ obj)
+ else:
+ return (criteria == obj)
+
+ def results(self):
+ """
+ Generator which returns the matches for the query. This method
+ should be overridden by subclasses.
+ """
+ raise NotImplementedError
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/rbacrulequery.py b/lib/python2.7/site-packages/setoolsgui/setools/rbacrulequery.py
new file mode 100644
index 0000000..240b921
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/rbacrulequery.py
@@ -0,0 +1,147 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import mixins, query
+from .descriptors import CriteriaDescriptor, CriteriaSetDescriptor, RuletypeDescriptor
+from .policyrep.exception import InvalidType, RuleUseError
+
+
+class RBACRuleQuery(mixins.MatchObjClass, query.PolicyQuery):
+
+ """
+ Query the RBAC rules.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ ruletype The list of rule type(s) to match.
+ source The name of the source role/attribute to match.
+ source_indirect If true, members of an attribute will be
+ matched rather than the attribute itself.
+ source_regex If true, regular expression matching will
+ be used on the source role/attribute.
+ Obeys the source_indirect option.
+ target The name of the target role/attribute to match.
+ target_indirect If true, members of an attribute will be
+ matched rather than the attribute itself.
+ target_regex If true, regular expression matching will
+ be used on the target role/attribute.
+ Obeys target_indirect option.
+ tclass The object class(es) to match.
+ tclass_regex If true, use a regular expression for
+ matching the rule's object class.
+ default The name of the default role to match.
+ default_regex If true, regular expression matching will
+ be used on the default role.
+ """
+
+ ruletype = RuletypeDescriptor("validate_rbac_ruletype")
+ source = CriteriaDescriptor("source_regex", "lookup_role")
+ source_regex = False
+ source_indirect = True
+ _target = None
+ target_regex = False
+ target_indirect = True
+ tclass = CriteriaSetDescriptor("tclass_regex", "lookup_class")
+ tclass_regex = False
+ default = CriteriaDescriptor("default_regex", "lookup_role")
+ default_regex = False
+
+ @property
+ def target(self):
+ return self._target
+
+ @target.setter
+ def target(self, value):
+ if not value:
+ self._target = None
+ elif self.target_regex:
+ self._target = re.compile(value)
+ else:
+ try:
+ self._target = self.policy.lookup_type_or_attr(value)
+ except InvalidType:
+ self._target = self.policy.lookup_role(value)
+
+ def results(self):
+ """Generator which yields all matching RBAC rules."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Ruletypes: {0.ruletype}".format(self))
+ self.log.debug("Source: {0.source!r}, indirect: {0.source_indirect}, "
+ "regex: {0.source_regex}".format(self))
+ self.log.debug("Target: {0.target!r}, indirect: {0.target_indirect}, "
+ "regex: {0.target_regex}".format(self))
+ self.log.debug("Class: {0.tclass!r}, regex: {0.tclass_regex}".format(self))
+ self.log.debug("Default: {0.default!r}, regex: {0.default_regex}".format(self))
+
+ for rule in self.policy.rbacrules():
+ #
+ # Matching on rule type
+ #
+ if self.ruletype:
+ if rule.ruletype not in self.ruletype:
+ continue
+
+ #
+ # Matching on source role
+ #
+ if self.source and not self._match_indirect_regex(
+ rule.source,
+ self.source,
+ self.source_indirect,
+ self.source_regex):
+ continue
+
+ #
+ # Matching on target type (role_transition)/role(allow)
+ #
+ if self.target and not self._match_indirect_regex(
+ rule.target,
+ self.target,
+ self.target_indirect,
+ self.target_regex):
+ continue
+
+ #
+ # Matching on object class
+ #
+ try:
+ if not self._match_object_class(rule):
+ continue
+ except RuleUseError:
+ continue
+
+ #
+ # Matching on default role
+ #
+ if self.default:
+ try:
+ if not self._match_regex(
+ rule.default,
+ self.default,
+ self.default_regex):
+ continue
+ except RuleUseError:
+ continue
+
+ # if we get here, we have matched all available criteria
+ yield rule
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/rolequery.py b/lib/python2.7/site-packages/setoolsgui/setools/rolequery.py
new file mode 100644
index 0000000..e95dfa6
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/rolequery.py
@@ -0,0 +1,77 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import compquery
+from .descriptors import CriteriaSetDescriptor
+
+
+class RoleQuery(compquery.ComponentQuery):
+
+ """
+ Query SELinux policy roles.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The role name to match.
+ name_regex If true, regular expression matching
+ will be used on the role names.
+ types The type to match.
+ types_equal If true, only roles with type sets
+ that are equal to the criteria will
+ match. Otherwise, any intersection
+ will match.
+ types_regex If true, regular expression matching
+ will be used on the type names instead
+ of set logic.
+ """
+
+ types = CriteriaSetDescriptor("types_regex", "lookup_type")
+ types_equal = False
+ types_regex = False
+
+ def results(self):
+ """Generator which yields all matching roles."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("Types: {0.types!r}, regex: {0.types_regex}, "
+ "eq: {0.types_equal}".format(self))
+
+ for r in self.policy.roles():
+ if r == "object_r":
+ # all types are implicitly added to object_r by the compiler.
+ # technically it is incorrect to skip it, but policy writers
+ # and analysts don't expect to see it in results, and it
+ # will confuse, especially for set equality type queries.
+ continue
+
+ if not self._match_name(r):
+ continue
+
+ if self.types and not self._match_regex_or_set(
+ set(r.types()),
+ self.types,
+ self.types_equal,
+ self.types_regex):
+ continue
+
+ yield r
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/sensitivityquery.py b/lib/python2.7/site-packages/setoolsgui/setools/sensitivityquery.py
new file mode 100644
index 0000000..a102836
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/sensitivityquery.py
@@ -0,0 +1,74 @@
+# Copyright 2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+
+from . import compquery
+from . import mixins
+from .descriptors import CriteriaDescriptor
+
+
+class SensitivityQuery(mixins.MatchAlias, compquery.ComponentQuery):
+
+ """
+ Query MLS Sensitivities
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The name of the category to match.
+ name_regex If true, regular expression matching will
+ be used for matching the name.
+ alias The alias name to match.
+ alias_regex If true, regular expression matching
+ will be used on the alias names.
+ sens The criteria to match the sensitivity by dominance.
+ sens_dom If true, the criteria will match if it dominates
+ the sensitivity.
+ sens_domby If true, the criteria will match if it is dominated
+ by the sensitivity.
+ """
+
+ sens = CriteriaDescriptor(lookup_function="lookup_sensitivity")
+ sens_dom = False
+ sens_domby = False
+
+ def results(self):
+ """Generator which yields all matching sensitivities."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("Alias: {0.alias}, regex: {0.alias_regex}".format(self))
+ self.log.debug("Sens: {0.sens!r}, dom: {0.sens_dom}, domby: {0.sens_domby}".format(self))
+
+ for s in self.policy.sensitivities():
+ if not self._match_name(s):
+ continue
+
+ if not self._match_alias(s):
+ continue
+
+ if self.sens and not self._match_level(
+ s,
+ self.sens,
+ self.sens_dom,
+ self.sens_domby,
+ False):
+ continue
+
+ yield s
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/terulequery.py b/lib/python2.7/site-packages/setoolsgui/setools/terulequery.py
new file mode 100644
index 0000000..7f3eccf
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/terulequery.py
@@ -0,0 +1,178 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import mixins, query
+from .descriptors import CriteriaDescriptor, CriteriaSetDescriptor, RuletypeDescriptor
+from .policyrep.exception import RuleUseError, RuleNotConditional
+
+
+class TERuleQuery(mixins.MatchObjClass, mixins.MatchPermission, query.PolicyQuery):
+
+ """
+ Query the Type Enforcement rules.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ ruletype The list of rule type(s) to match.
+ source The name of the source type/attribute to match.
+ source_indirect If true, members of an attribute will be
+ matched rather than the attribute itself.
+ Default is true.
+ source_regex If true, regular expression matching will
+ be used on the source type/attribute.
+ Obeys the source_indirect option.
+ Default is false.
+ target The name of the target type/attribute to match.
+ target_indirect If true, members of an attribute will be
+ matched rather than the attribute itself.
+ Default is true.
+ target_regex If true, regular expression matching will
+ be used on the target type/attribute.
+ Obeys target_indirect option.
+ Default is false.
+ tclass The object class(es) to match.
+ tclass_regex If true, use a regular expression for
+ matching the rule's object class.
+ Default is false.
+ perms The set of permission(s) to match.
+ perms_equal If true, the permission set of the rule
+ must exactly match the permissions
+ criteria. If false, any set intersection
+ will match.
+ Default is false.
+ perms_regex If true, regular expression matching will be used
+ on the permission names instead of set logic.
+ default The name of the default type to match.
+ default_regex If true, regular expression matching will be
+ used on the default type.
+ Default is false.
+ boolean The set of boolean(s) to match.
+ boolean_regex If true, regular expression matching will be
+ used on the booleans.
+ Default is false.
+ boolean_equal If true, the booleans in the conditional
+ expression of the rule must exactly match the
+ criteria. If false, any set intersection
+ will match. Default is false.
+ """
+
+ ruletype = RuletypeDescriptor("validate_te_ruletype")
+ source = CriteriaDescriptor("source_regex", "lookup_type_or_attr")
+ source_regex = False
+ source_indirect = True
+ target = CriteriaDescriptor("target_regex", "lookup_type_or_attr")
+ target_regex = False
+ target_indirect = True
+ default = CriteriaDescriptor("default_regex", "lookup_type")
+ default_regex = False
+ boolean = CriteriaSetDescriptor("boolean_regex", "lookup_boolean")
+ boolean_regex = False
+ boolean_equal = False
+
+ def results(self):
+ """Generator which yields all matching TE rules."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Ruletypes: {0.ruletype}".format(self))
+ self.log.debug("Source: {0.source!r}, indirect: {0.source_indirect}, "
+ "regex: {0.source_regex}".format(self))
+ self.log.debug("Target: {0.target!r}, indirect: {0.target_indirect}, "
+ "regex: {0.target_regex}".format(self))
+ self.log.debug("Class: {0.tclass!r}, regex: {0.tclass_regex}".format(self))
+ self.log.debug("Perms: {0.perms!r}, regex: {0.perms_regex}, eq: {0.perms_equal}".
+ format(self))
+ self.log.debug("Default: {0.default!r}, regex: {0.default_regex}".format(self))
+ self.log.debug("Boolean: {0.boolean!r}, eq: {0.boolean_equal}, "
+ "regex: {0.boolean_regex}".format(self))
+
+ for rule in self.policy.terules():
+ #
+ # Matching on rule type
+ #
+ if self.ruletype:
+ if rule.ruletype not in self.ruletype:
+ continue
+
+ #
+ # Matching on source type
+ #
+ if self.source and not self._match_indirect_regex(
+ rule.source,
+ self.source,
+ self.source_indirect,
+ self.source_regex):
+ continue
+
+ #
+ # Matching on target type
+ #
+ if self.target and not self._match_indirect_regex(
+ rule.target,
+ self.target,
+ self.target_indirect,
+ self.target_regex):
+ continue
+
+ #
+ # Matching on object class
+ #
+ if not self._match_object_class(rule):
+ continue
+
+ #
+ # Matching on permission set
+ #
+ try:
+ if not self._match_perms(rule):
+ continue
+ except RuleUseError:
+ continue
+
+ #
+ # Matching on default type
+ #
+ if self.default:
+ try:
+ if not self._match_regex(
+ rule.default,
+ self.default,
+ self.default_regex):
+ continue
+ except RuleUseError:
+ continue
+
+ #
+ # Match on Boolean in conditional expression
+ #
+ if self.boolean:
+ try:
+ if not self._match_regex_or_set(
+ rule.conditional.booleans,
+ self.boolean,
+ self.boolean_equal,
+ self.boolean_regex):
+ continue
+ except RuleNotConditional:
+ continue
+
+ # if we get here, we have matched all available criteria
+ yield rule
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/typeattrquery.py b/lib/python2.7/site-packages/setoolsgui/setools/typeattrquery.py
new file mode 100644
index 0000000..a91026c
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/typeattrquery.py
@@ -0,0 +1,70 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import compquery
+from .descriptors import CriteriaSetDescriptor
+
+
+class TypeAttributeQuery(compquery.ComponentQuery):
+
+ """
+ Query SELinux policy type attributes.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The type name to match.
+ name_regex If true, regular expression matching
+ will be used on the type names.
+ types The type to match.
+ types_equal If true, only attributes with type sets
+ that are equal to the criteria will
+ match. Otherwise, any intersection
+ will match.
+ types_regex If true, regular expression matching
+ will be used on the type names instead
+ of set logic.
+ """
+
+ types = CriteriaSetDescriptor("types_regex", "lookup_type")
+ types_equal = False
+ types_regex = False
+
+ def results(self):
+ """Generator which yields all matching types."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("Types: {0.types!r}, regex: {0.types_regex}, "
+ "eq: {0.types_equal}".format(self))
+
+ for attr in self.policy.typeattributes():
+ if not self._match_name(attr):
+ continue
+
+ if self.types and not self._match_regex_or_set(
+ set(attr.expand()),
+ self.types,
+ self.types_equal,
+ self.types_regex):
+ continue
+
+ yield attr
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/typequery.py b/lib/python2.7/site-packages/setoolsgui/setools/typequery.py
new file mode 100644
index 0000000..6634f76
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/typequery.py
@@ -0,0 +1,96 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import compquery
+from . import mixins
+from .descriptors import CriteriaSetDescriptor
+
+
+class TypeQuery(mixins.MatchAlias, compquery.ComponentQuery):
+
+ """
+ Query SELinux policy types.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The type name to match.
+ name_regex If true, regular expression matching
+ will be used on the type names.
+ alias The alias name to match.
+ alias_regex If true, regular expression matching
+ will be used on the alias names.
+ attrs The attribute to match.
+ attrs_equal If true, only types with attribute sets
+ that are equal to the criteria will
+ match. Otherwise, any intersection
+ will match.
+ attrs_regex If true, regular expression matching
+ will be used on the attribute names instead
+ of set logic.
+ permissive The permissive state to match. If this
+ is None, the state is not matched.
+ """
+
+ attrs = CriteriaSetDescriptor("attrs_regex", "lookup_typeattr")
+ attrs_regex = False
+ attrs_equal = False
+ _permissive = None
+
+ @property
+ def permissive(self):
+ return self._permissive
+
+ @permissive.setter
+ def permissive(self, value):
+ if value is None:
+ self._permissive = None
+ else:
+ self._permissive = bool(value)
+
+ def results(self):
+ """Generator which yields all matching types."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("Alias: {0.alias}, regex: {0.alias_regex}".format(self))
+ self.log.debug("Attrs: {0.attrs!r}, regex: {0.attrs_regex}, "
+ "eq: {0.attrs_equal}".format(self))
+ self.log.debug("Permissive: {0.permissive}".format(self))
+
+ for t in self.policy.types():
+ if not self._match_name(t):
+ continue
+
+ if not self._match_alias(t):
+ continue
+
+ if self.attrs and not self._match_regex_or_set(
+ set(t.attributes()),
+ self.attrs,
+ self.attrs_equal,
+ self.attrs_regex):
+ continue
+
+ if self.permissive is not None and t.ispermissive != self.permissive:
+ continue
+
+ yield t
diff --git a/lib/python2.7/site-packages/setoolsgui/setools/userquery.py b/lib/python2.7/site-packages/setoolsgui/setools/userquery.py
new file mode 100644
index 0000000..00910cf
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/setools/userquery.py
@@ -0,0 +1,116 @@
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import logging
+import re
+
+from . import compquery
+from .descriptors import CriteriaDescriptor, CriteriaSetDescriptor
+
+
+class UserQuery(compquery.ComponentQuery):
+
+ """
+ Query SELinux policy users.
+
+ Parameter:
+ policy The policy to query.
+
+ Keyword Parameters/Class attributes:
+ name The user name to match.
+ name_regex If true, regular expression matching
+ will be used on the user names.
+ roles The attribute to match.
+ roles_equal If true, only types with role sets
+ that are equal to the criteria will
+ match. Otherwise, any intersection
+ will match.
+ roles_regex If true, regular expression matching
+ will be used on the role names instead
+ of set logic.
+ level The criteria to match the user's default level.
+ level_dom If true, the criteria will match if it dominates
+ the user's default level.
+ level_domby If true, the criteria will match if it is dominated
+ by the user's default level.
+ level_incomp If true, the criteria will match if it is incomparable
+ to the user's default level.
+ range_ The criteria to match the user's range.
+ range_subset If true, the criteria will match if it is a subset
+ of the user's range.
+ range_overlap If true, the criteria will match if it overlaps
+ any of the user's range.
+ range_superset If true, the criteria will match if it is a superset
+ of the user's range.
+ range_proper If true, use proper superset/subset operations.
+ No effect if not using set operations.
+ """
+
+ level = CriteriaDescriptor(lookup_function="lookup_level")
+ level_dom = False
+ level_domby = False
+ level_incomp = False
+ range_ = CriteriaDescriptor(lookup_function="lookup_range")
+ range_overlap = False
+ range_subset = False
+ range_superset = False
+ range_proper = False
+ roles = CriteriaSetDescriptor("roles_regex", "lookup_role")
+ roles_equal = False
+ roles_regex = False
+
+ def results(self):
+ """Generator which yields all matching users."""
+ self.log.info("Generating results from {0.policy}".format(self))
+ self.log.debug("Name: {0.name!r}, regex: {0.name_regex}".format(self))
+ self.log.debug("Roles: {0.roles!r}, regex: {0.roles_regex}, "
+ "eq: {0.roles_equal}".format(self))
+ self.log.debug("Level: {0.level!r}, dom: {0.level_dom}, domby: {0.level_domby}, "
+ "incomp: {0.level_incomp}".format(self))
+ self.log.debug("Range: {0.range_!r}, subset: {0.range_subset}, overlap: {0.range_overlap}, "
+ "superset: {0.range_superset}, proper: {0.range_proper}".format(self))
+
+ for user in self.policy.users():
+ if not self._match_name(user):
+ continue
+
+ if self.roles and not self._match_regex_or_set(
+ user.roles,
+ self.roles,
+ self.roles_equal,
+ self.roles_regex):
+ continue
+
+ if self.level and not self._match_level(
+ user.mls_level,
+ self.level,
+ self.level_dom,
+ self.level_domby,
+ self.level_incomp):
+ continue
+
+ if self.range_ and not self._match_range(
+ user.mls_range,
+ self.range_,
+ self.range_subset,
+ self.range_overlap,
+ self.range_superset,
+ self.range_proper):
+ continue
+
+ yield user
diff --git a/lib/python2.7/site-packages/setoolsgui/widget.py b/lib/python2.7/site-packages/setoolsgui/widget.py
new file mode 100644
index 0000000..25067a2
--- /dev/null
+++ b/lib/python2.7/site-packages/setoolsgui/widget.py
@@ -0,0 +1,37 @@
+# Copyright 2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 2.1 of
+# the License, or (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with SETools. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+import sys
+from errno import ENOENT
+
+from PyQt5.uic import loadUi
+
+
+class SEToolsWidget(object):
+ def load_ui(self, filename):
+ # If we are in the git repo, look at the local
+ # UI file, otherwise look at the installed file.
+ for path in ["data/", sys.prefix + "/share/setools/"]:
+ try:
+ loadUi(path + filename, self)
+ break
+ except (IOError, OSError) as err:
+ if err.errno != ENOENT:
+ raise
+ else:
+ raise RuntimeError("Unable to load Qt UI file \"{0}\"".format(filename))