This file is indexed.

/usr/share/pyshared/ZEO/tests/IterationTests.py is in python-zodb 1:3.10.5-0ubuntu3.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
##############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""ZEO iterator protocol tests."""

import transaction


class IterationTests:

    def checkIteratorGCProtocol(self):
        # Test garbage collection on protocol level.
        server = self._storage._server

        iid = server.iterator_start(None, None)
        # None signals the end of iteration.
        self.assertEquals(None, server.iterator_next(iid))
        # The server has disposed the iterator already.
        self.assertRaises(KeyError, server.iterator_next, iid)

        iid = server.iterator_start(None, None)
        # This time, we tell the server to throw the iterator away.
        server.iterator_gc([iid])
        self.assertRaises(KeyError, server.iterator_next, iid)

    def checkIteratorExhaustionStorage(self):
        # Test the storage's garbage collection mechanism.
        self._dostore()
        iterator = self._storage.iterator()

        # At this point, a wrapping iterator might not have called the CS
        # iterator yet. We'll consume one item to make sure this happens.
        iterator.next()
        self.assertEquals(1, len(self._storage._iterator_ids))
        iid = list(self._storage._iterator_ids)[0]
        self.assertEquals([], list(iterator))
        self.assertEquals(0, len(self._storage._iterator_ids))

        # The iterator has run through, so the server has already disposed it.
        self.assertRaises(KeyError, self._storage._server.iterator_next, iid)

    def checkIteratorGCSpanTransactions(self):
        # Keep a hard reference to the iterator so it won't be automatically
        # garbage collected at the transaction boundary.
        self._dostore()
        iterator = self._storage.iterator()
        self._dostore()
        # As the iterator was not garbage collected, we can still use it. (We
        # don't see the transaction we just wrote being picked up, because
        # iterators only see the state from the point in time when they were
        # created.)
        self.assert_(list(iterator))

    def checkIteratorGCStorageCommitting(self):
        # We want the iterator to be garbage-collected, so we don't keep any
        # hard references to it. The storage tracks its ID, though.

        # The odd little jig we do below arises from the fact that the
        # CS iterator may not be constructed right away if the CS is wrapped.
        # We need to actually do some iteration to get the iterator created.
        # We do a store to make sure the iterator isn't exhausted right away.
        self._dostore()
        self._storage.iterator().next()

        self.assertEquals(1, len(self._storage._iterator_ids))
        iid = list(self._storage._iterator_ids)[0]

        # GC happens at the transaction boundary. After that, both the storage
        # and the server have forgotten the iterator.
        self._dostore()
        self.assertEquals(0, len(self._storage._iterator_ids))
        self.assertRaises(KeyError, self._storage._server.iterator_next, iid)

    def checkIteratorGCStorageTPCAborting(self):
        # The odd little jig we do below arises from the fact that the
        # CS iterator may not be constructed right away if the CS is wrapped.
        # We need to actually do some iteration to get the iterator created.
        # We do a store to make sure the iterator isn't exhausted right away.
        self._dostore()
        self._storage.iterator().next()

        iid = list(self._storage._iterator_ids)[0]

        t = transaction.Transaction()
        self._storage.tpc_begin(t)
        self._storage.tpc_abort(t)
        self.assertEquals(0, len(self._storage._iterator_ids))
        self.assertRaises(KeyError, self._storage._server.iterator_next, iid)

    def checkIteratorGCStorageDisconnect(self):

        # The odd little jig we do below arises from the fact that the
        # CS iterator may not be constructed right away if the CS is wrapped.
        # We need to actually do some iteration to get the iterator created.
        # We do a store to make sure the iterator isn't exhausted right away.
        self._dostore()
        self._storage.iterator().next()

        iid = list(self._storage._iterator_ids)[0]
        t = transaction.Transaction()
        self._storage.tpc_begin(t)
        # Show that after disconnecting, the client side GCs the iterators
        # as well. I'm calling this directly to avoid accidentally
        # calling tpc_abort implicitly.
        self._storage.notifyDisconnected()
        self.assertEquals(0, len(self._storage._iterator_ids))

    def checkIteratorParallel(self):
        self._dostore()
        self._dostore()
        iter1 = self._storage.iterator()
        iter2 = self._storage.iterator()
        txn_info1 = iter1.next()
        txn_info2 = iter2.next()
        self.assertEquals(txn_info1.tid, txn_info2.tid)
        txn_info1 = iter1.next()
        txn_info2 = iter2.next()
        self.assertEquals(txn_info1.tid, txn_info2.tid)
        self.assertRaises(StopIteration, iter1.next)
        self.assertRaises(StopIteration, iter2.next)


def iterator_sane_after_reconnect():
    r"""Make sure that iterators are invalidated on disconnect.

Start a server:

    >>> addr, adminaddr = start_server(
    ...     '<filestorage>\npath fs\n</filestorage>', keep=1)

Open a client storage to it and commit a some transactions:

    >>> import ZEO, transaction
    >>> db = ZEO.DB(addr)
    >>> conn = db.open()
    >>> for i in range(10):
    ...     conn.root().i = i
    ...     transaction.commit()

Create an iterator:

    >>> it = conn._storage.iterator()
    >>> tid1 = it.next().tid

Restart the storage:

    >>> stop_server(adminaddr)
    >>> wait_disconnected(conn._storage)
    >>> _ = start_server('<filestorage>\npath fs\n</filestorage>', addr=addr)
    >>> wait_connected(conn._storage)

Now, we'll create a second iterator:

    >>> it2 = conn._storage.iterator()

If we try to advance the first iterator, we should get an error:

    >>> it.next().tid > tid1
    Traceback (most recent call last):
    ...
    ClientDisconnected: Disconnected iterator

The second iterator should be peachy:

    >>> it2.next().tid == tid1
    True

Cleanup:

    >>> db.close()
    """